summaryrefslogtreecommitdiff
path: root/binaries
diff options
context:
space:
mode:
authorArutyunovG <arutyunovg@yandex.ru>2018-11-16 12:06:21 -0800
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>2018-11-16 12:16:28 -0800
commit8e91da4cb3d645b178b515bab54331c917340cd5 (patch)
tree055fe2360dd23e2c0435ee3769b83e9596699167 /binaries
parent2c21de2007b3185231abfd41ffb49ce632f52f04 (diff)
downloadpytorch-8e91da4cb3d645b178b515bab54331c917340cd5.tar.gz
pytorch-8e91da4cb3d645b178b515bab54331c917340cd5.tar.bz2
pytorch-8e91da4cb3d645b178b515bab54331c917340cd5.zip
Windows shared build (#13550)
Summary: Hi guys, I'd like to build Caffe2 with more supported options in Windows with Microsoft Visual Studios. This is the first pull request. Running scripts/build_windows_shared.bat is able to build Caffe2 with both CMAKE_BUILD_TYPE=Debug and CMAKE_BUILD_TYPE=Release with Visual Studio 14 2015. CUDA is 9.0, cudnn is 7.0.5, glog, gflags and lmdb are supported on my system. Python is 3.5, Detectron works from python interface as well. It was even possible to debug detectron code and step into caffe2_gpu.dll with pdbs built. What is disappointing, that c10/experimental ops don't build with this Visual Studio generator, I added special option INCLUDE_EXPERIMENTAL_C10_OPS (default ON) to deal with it in build_windows_shared.bat. After this pull request the next step is to add Visual Studio 2017 support in the script. Pull Request resolved: https://github.com/pytorch/pytorch/pull/13550 Reviewed By: ezyang Differential Revision: D13042597 Pulled By: orionr fbshipit-source-id: f313f909f599cd582a1d000eff766eef3a9fc4fc
Diffstat (limited to 'binaries')
-rw-r--r--binaries/benchmark_helper.cc2
-rw-r--r--binaries/convert_image_to_tensor.cc9
-rw-r--r--binaries/speed_benchmark.cc2
3 files changed, 9 insertions, 4 deletions
diff --git a/binaries/benchmark_helper.cc b/binaries/benchmark_helper.cc
index f5be44dd34..1020dc1bc6 100644
--- a/binaries/benchmark_helper.cc
+++ b/binaries/benchmark_helper.cc
@@ -141,7 +141,7 @@ void loadInput(
vector<string> input_dims_str = caffe2::split(',', input_dims_list[i]);
vector<int> input_dims;
for (const string& s : input_dims_str) {
- input_dims.push_back(caffe2::stoi(s));
+ input_dims.push_back(c10::stoi(s));
}
caffe2::Blob* blob = workspace->GetBlob(input_names[i]);
if (blob == nullptr) {
diff --git a/binaries/convert_image_to_tensor.cc b/binaries/convert_image_to_tensor.cc
index fa6b298017..31f1edacdc 100644
--- a/binaries/convert_image_to_tensor.cc
+++ b/binaries/convert_image_to_tensor.cc
@@ -99,9 +99,9 @@ std::vector<float> convertToVector(cv::Mat& img) {
} else if (step == "normalize") {
normalize = {255, 255, 255};
} else if (step == "mean") {
- mean = {0.406, 0.456, 0.485};
+ mean = {0.406f, 0.456f, 0.485f};
} else if (step == "std") {
- std = {0.225, 0.224, 0.229};
+ std = {0.225f, 0.224f, 0.229f};
} else if (step == "bgrtorgb") {
bgrtorgb = true;
} else {
@@ -143,9 +143,14 @@ std::vector<float> convertOneImage(std::string& filename) {
assert(filename[0] != '~');
std::cout << "Converting " << filename << std::endl;
+
// Load image
cv::Mat img = cv::imread(
+#if CV_MAJOR_VERSION <= 3
filename, FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
+#else
+ filename, FLAGS_color ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
+#endif
cv::Mat crop = cropToSquare(img);
diff --git a/binaries/speed_benchmark.cc b/binaries/speed_benchmark.cc
index 89151546e8..00f93f4743 100644
--- a/binaries/speed_benchmark.cc
+++ b/binaries/speed_benchmark.cc
@@ -127,7 +127,7 @@ int main(int argc, char** argv) {
vector<string> input_dims_str = caffe2::split(',', input_dims_list[i]);
vector<int> input_dims;
for (const string& s : input_dims_str) {
- input_dims.push_back(caffe2::stoi(s));
+ input_dims.push_back(c10::stoi(s));
}
caffe2::Blob* blob = workspace->GetBlob(input_names[i]);
if (blob == nullptr) {