From eb88098e1157d17421d4202d37f1d7f3f8e21809 Mon Sep 17 00:00:00 2001 From: Teng Li Date: Wed, 7 Nov 2018 17:07:14 -0800 Subject: Kill c10d/private/CUDAUtils.hpp (#13681) Summary: Use AT_CUDA_CHECK instead Pull Request resolved: https://github.com/pytorch/pytorch/pull/13681 Differential Revision: D12966607 Pulled By: teng-li fbshipit-source-id: da0431f588969791a19519368edb909b9c3dc5ab --- torch/lib/c10d/ProcessGroupGloo.cpp | 2 -- torch/lib/c10d/ProcessGroupNCCL.cpp | 3 +-- torch/lib/c10d/private/CUDAUtils.hpp | 23 ----------------------- torch/lib/c10d/test/CUDATest.cu | 5 ++--- torch/lib/c10d/test/CUDATest.hpp | 3 --- torch/lib/c10d/test/ProcessGroupGlooAsyncTest.cpp | 1 - torch/lib/c10d/test/ProcessGroupNCCLTest.cpp | 1 - 7 files changed, 3 insertions(+), 35 deletions(-) delete mode 100644 torch/lib/c10d/private/CUDAUtils.hpp (limited to 'torch/lib') diff --git a/torch/lib/c10d/ProcessGroupGloo.cpp b/torch/lib/c10d/ProcessGroupGloo.cpp index cd226f0fff..6b6a77932d 100644 --- a/torch/lib/c10d/ProcessGroupGloo.cpp +++ b/torch/lib/c10d/ProcessGroupGloo.cpp @@ -21,8 +21,6 @@ #include #include #include - -#include #endif #include diff --git a/torch/lib/c10d/ProcessGroupNCCL.cpp b/torch/lib/c10d/ProcessGroupNCCL.cpp index 9e64fe7fae..a3ede7b1cc 100644 --- a/torch/lib/c10d/ProcessGroupNCCL.cpp +++ b/torch/lib/c10d/ProcessGroupNCCL.cpp @@ -10,7 +10,6 @@ #include #include -#include namespace c10d { @@ -103,7 +102,7 @@ bool ProcessGroupNCCL::WorkNCCL::finishedGPUExecution() const { // Checking the work's corresponding CUDA events' status auto ret = cudaEventQuery(cudaEvents_[i]); if (ret != cudaSuccess && ret != cudaErrorNotReady) { - C10D_CUDA_CHECK(ret); + AT_CUDA_CHECK(ret); } if (ret == cudaErrorNotReady) { return false; diff --git a/torch/lib/c10d/private/CUDAUtils.hpp b/torch/lib/c10d/private/CUDAUtils.hpp deleted file mode 100644 index 6b31263add..0000000000 --- a/torch/lib/c10d/private/CUDAUtils.hpp +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include -#include - -#include -#include - -// TODO: Use AT_CHECK or similar here -#define C10D_CUDA_CHECK(condition) \ - do { \ - cudaError_t error = (condition); \ - if (error != cudaSuccess) { \ - std::stringstream ss; \ - ss << "Error at: "; \ - ss << __FILE__; \ - ss << ":"; \ - ss << __LINE__; \ - ss << ": "; \ - ss << cudaGetErrorString(error); \ - throw std::runtime_error(ss.str()); \ - } \ - } while (0) diff --git a/torch/lib/c10d/test/CUDATest.cu b/torch/lib/c10d/test/CUDATest.cu index ef8a2780c6..b6efe6fceb 100644 --- a/torch/lib/c10d/test/CUDATest.cu +++ b/torch/lib/c10d/test/CUDATest.cu @@ -1,6 +1,5 @@ #include "CUDATest.hpp" - -#include +#include namespace c10d { namespace test { @@ -22,7 +21,7 @@ void cudaSleep(at::cuda::CUDAStream& stream, uint64_t clocks) { int cudaNumDevices() { int n = 0; - C10D_CUDA_CHECK(cudaGetDeviceCount(&n)); + AT_CUDA_CHECK(cudaGetDeviceCount(&n)); return n; } diff --git a/torch/lib/c10d/test/CUDATest.hpp b/torch/lib/c10d/test/CUDATest.hpp index d2275c128e..5e02e317fb 100644 --- a/torch/lib/c10d/test/CUDATest.hpp +++ b/torch/lib/c10d/test/CUDATest.hpp @@ -1,8 +1,5 @@ #pragma once -#include -#include - #include namespace c10d { diff --git a/torch/lib/c10d/test/ProcessGroupGlooAsyncTest.cpp b/torch/lib/c10d/test/ProcessGroupGlooAsyncTest.cpp index 5c4912706c..10b7ab1c10 100644 --- a/torch/lib/c10d/test/ProcessGroupGlooAsyncTest.cpp +++ b/torch/lib/c10d/test/ProcessGroupGlooAsyncTest.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include diff --git a/torch/lib/c10d/test/ProcessGroupNCCLTest.cpp b/torch/lib/c10d/test/ProcessGroupNCCLTest.cpp index 4f0f0c33f7..20ae4dda56 100644 --- a/torch/lib/c10d/test/ProcessGroupNCCLTest.cpp +++ b/torch/lib/c10d/test/ProcessGroupNCCLTest.cpp @@ -2,7 +2,6 @@ #include #include -#include #include #include -- cgit v1.2.3