diff options
author | Zach DeVito <zdevito@fb.com> | 2017-06-20 16:49:13 -0700 |
---|---|---|
committer | Edward Z. Yang <ezyang@mit.edu> | 2017-11-02 19:53:36 -0400 |
commit | 7a5987123fe8b758a45fd83cf9fc6407ae44ce95 (patch) | |
tree | 1ce8a996e32868f11e50dfcefa179934c85f5893 /aten | |
parent | 2c2648ea385e68389ad6a38ac2c3d0a442915042 (diff) | |
download | pytorch-7a5987123fe8b758a45fd83cf9fc6407ae44ce95.tar.gz pytorch-7a5987123fe8b758a45fd83cf9fc6407ae44ce95.tar.bz2 pytorch-7a5987123fe8b758a45fd83cf9fc6407ae44ce95.zip |
rename TensorLib -> ATen
Diffstat (limited to 'aten')
-rw-r--r-- | aten/CMakeLists.txt | 6 | ||||
-rw-r--r-- | aten/src/ATen/ATen.h | 11 | ||||
-rw-r--r-- | aten/src/ATen/ArrayRef.h (renamed from aten/src/aten/ArrayRef.h) | 6 | ||||
-rw-r--r-- | aten/src/ATen/CMakeLists.txt (renamed from aten/src/aten/CMakeLists.txt) | 28 | ||||
-rw-r--r-- | aten/src/ATen/CPUGenerator.cpp (renamed from aten/src/aten/CPUGenerator.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/CUDAGenerator.cpp (renamed from aten/src/aten/CUDAGenerator.cpp) | 10 | ||||
-rw-r--r-- | aten/src/ATen/Context.cpp (renamed from aten/src/aten/Context.cpp) | 14 | ||||
-rw-r--r-- | aten/src/ATen/Context.h (renamed from aten/src/aten/Context.h) | 8 | ||||
-rw-r--r-- | aten/src/ATen/Formatting.cpp (renamed from aten/src/aten/Formatting.cpp) | 8 | ||||
-rw-r--r-- | aten/src/ATen/Formatting.h (renamed from aten/src/aten/Formatting.h) | 4 | ||||
-rw-r--r-- | aten/src/ATen/Generator.h (renamed from aten/src/aten/Generator.h) | 4 | ||||
-rw-r--r-- | aten/src/ATen/HalfConvert.h (renamed from aten/src/aten/HalfConvert.h) | 2 | ||||
-rw-r--r-- | aten/src/ATen/Local.cwrap (renamed from aten/src/aten/Local.cwrap) | 0 | ||||
-rw-r--r-- | aten/src/ATen/Scalar.cpp (renamed from aten/src/aten/Scalar.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/Scalar.h (renamed from aten/src/aten/Scalar.h) | 26 | ||||
-rw-r--r-- | aten/src/ATen/Storage.h (renamed from aten/src/aten/Storage.h) | 8 | ||||
-rw-r--r-- | aten/src/ATen/THLongStorageView.h (renamed from aten/src/aten/THLongStorageView.h) | 2 | ||||
-rw-r--r-- | aten/src/ATen/TensorAccessor.h (renamed from aten/src/aten/TensorAccessor.h) | 4 | ||||
-rw-r--r-- | aten/src/ATen/TensorImpl.h (renamed from aten/src/aten/TensorImpl.h) | 6 | ||||
-rw-r--r-- | aten/src/ATen/TensorOperators.h (renamed from aten/src/aten/TensorOperators.h) | 19 | ||||
-rw-r--r-- | aten/src/ATen/Utils.cpp (renamed from aten/src/aten/Utils.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/Utils.h (renamed from aten/src/aten/Utils.h) | 8 | ||||
-rw-r--r-- | aten/src/ATen/code_template.py (renamed from aten/src/aten/code_template.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/common_with_cwrap.py (renamed from aten/src/aten/common_with_cwrap.py) | 4 | ||||
-rw-r--r-- | aten/src/ATen/copy_wrapper.py (renamed from aten/src/aten/copy_wrapper.py) | 10 | ||||
-rw-r--r-- | aten/src/ATen/cwrap_parser.py (renamed from aten/src/aten/cwrap_parser.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/dispatch_macros.py (renamed from aten/src/aten/dispatch_macros.py) | 2 | ||||
-rw-r--r-- | aten/src/ATen/extract_cwrap.py (renamed from aten/src/aten/extract_cwrap.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/function_wrapper.py (renamed from aten/src/aten/function_wrapper.py) | 2 | ||||
-rw-r--r-- | aten/src/ATen/gen.py (renamed from aten/src/aten/gen.py) | 10 | ||||
-rw-r--r-- | aten/src/ATen/nn_parse.py (renamed from aten/src/aten/nn_parse.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/preprocess_declarations.py (renamed from aten/src/aten/preprocess_declarations.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/scratch.py (renamed from aten/src/aten/scratch.py) | 0 | ||||
-rw-r--r-- | aten/src/ATen/templates/Functions.h (renamed from aten/src/aten/templates/Functions.h) | 12 | ||||
-rw-r--r-- | aten/src/ATen/templates/GeneratorDerived.h (renamed from aten/src/aten/templates/GeneratorDerived.h) | 4 | ||||
-rw-r--r-- | aten/src/ATen/templates/StorageDerived.cpp (renamed from aten/src/aten/templates/StorageDerived.cpp) | 10 | ||||
-rw-r--r-- | aten/src/ATen/templates/StorageDerived.h (renamed from aten/src/aten/templates/StorageDerived.h) | 6 | ||||
-rw-r--r-- | aten/src/ATen/templates/Tensor.h (renamed from aten/src/aten/templates/Tensor.h) | 22 | ||||
-rw-r--r-- | aten/src/ATen/templates/TensorDerived.cpp (renamed from aten/src/aten/templates/TensorDerived.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/templates/TensorDerived.h (renamed from aten/src/aten/templates/TensorDerived.h) | 8 | ||||
-rw-r--r-- | aten/src/ATen/templates/Type.cpp (renamed from aten/src/aten/templates/Type.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/templates/Type.h (renamed from aten/src/aten/templates/Type.h) | 12 | ||||
-rw-r--r-- | aten/src/ATen/templates/TypeDerived.cpp (renamed from aten/src/aten/templates/TypeDerived.cpp) | 20 | ||||
-rw-r--r-- | aten/src/ATen/templates/TypeDerived.h (renamed from aten/src/aten/templates/TypeDerived.h) | 8 | ||||
-rw-r--r-- | aten/src/ATen/test/CMakeLists.txt (renamed from aten/src/aten/test/CMakeLists.txt) | 4 | ||||
-rw-r--r-- | aten/src/ATen/test/atest.cpp (renamed from aten/src/aten/test/atest.cpp) | 4 | ||||
-rw-r--r-- | aten/src/ATen/test/basic.cpp (renamed from aten/src/aten/test/basic.cpp) | 6 | ||||
-rw-r--r-- | aten/src/ATen/test/scalar_test.cpp (renamed from aten/src/aten/test/scalar_test.cpp) | 16 | ||||
-rw-r--r-- | aten/src/aten/TensorLib.h | 11 | ||||
-rw-r--r-- | aten/src/data/BatchDataset.cc | 6 | ||||
-rw-r--r-- | aten/src/data/BatchDataset.h | 4 | ||||
-rw-r--r-- | aten/src/data/CMakeLists.txt | 2 | ||||
-rw-r--r-- | aten/src/data/ConcatDataset.cc | 2 | ||||
-rw-r--r-- | aten/src/data/ConcatDataset.h | 2 | ||||
-rw-r--r-- | aten/src/data/Dataset.cc | 2 | ||||
-rw-r--r-- | aten/src/data/Dataset.h | 6 | ||||
-rw-r--r-- | aten/src/data/MergeDataset.cc | 2 | ||||
-rw-r--r-- | aten/src/data/MergeDataset.h | 2 | ||||
-rw-r--r-- | aten/src/data/ResampleDataset.cc | 4 | ||||
-rw-r--r-- | aten/src/data/ResampleDataset.h | 6 | ||||
-rw-r--r-- | aten/src/data/ShuffleDataset.cc | 2 | ||||
-rw-r--r-- | aten/src/data/TensorDataset.cc | 4 | ||||
-rw-r--r-- | aten/src/data/TensorDataset.h | 8 | ||||
-rw-r--r-- | aten/src/data/TransformDataset.cc | 6 | ||||
-rw-r--r-- | aten/src/data/TransformDataset.h | 4 | ||||
-rw-r--r-- | aten/src/data/test/basic.cc | 2 | ||||
-rw-r--r-- | aten/src/meter/APMeter.cc | 2 | ||||
-rw-r--r-- | aten/src/meter/APMeter.h | 2 | ||||
-rw-r--r-- | aten/src/meter/AUCMeter.cc | 4 | ||||
-rw-r--r-- | aten/src/meter/AUCMeter.h | 2 | ||||
-rw-r--r-- | aten/src/meter/CMakeLists.txt | 2 | ||||
-rw-r--r-- | aten/src/meter/ClassErrorMeter.cc | 4 | ||||
-rw-r--r-- | aten/src/meter/ClassErrorMeter.h | 2 | ||||
-rw-r--r-- | aten/src/meter/MAPMeter.cc | 2 | ||||
-rw-r--r-- | aten/src/meter/MAPMeter.h | 2 | ||||
-rw-r--r-- | aten/src/meter/MSEMeter.cc | 2 | ||||
-rw-r--r-- | aten/src/meter/MSEMeter.h | 2 | ||||
-rw-r--r-- | aten/src/meter/Meter.h | 4 | ||||
-rw-r--r-- | aten/src/meter/test/basic.cc | 2 |
79 files changed, 236 insertions, 235 deletions
diff --git a/aten/CMakeLists.txt b/aten/CMakeLists.txt index 6bc37e7dee..4eb2e2481a 100644 --- a/aten/CMakeLists.txt +++ b/aten/CMakeLists.txt @@ -48,7 +48,7 @@ endif() set(cwrap_files ${CMAKE_SOURCE_DIR}/tools/Declarations.cwrap - ${CMAKE_SOURCE_DIR}/src/aten/Local.cwrap + ${CMAKE_SOURCE_DIR}/src/ATen/Local.cwrap ${CMAKE_SOURCE_DIR}/lib/THNN/generic/THNN.h ${CMAKE_SOURCE_DIR}/lib/THCUNN/generic/THCUNN.h ) @@ -57,10 +57,10 @@ include_directories( ${CMAKE_SOURCE_DIR}/lib/THNN ${CMAKE_SOURCE_DIR}/lib/THCUNN) -add_subdirectory(src/aten) +add_subdirectory(src/ATen) include_directories( ${CMAKE_SOURCE_DIR}/src -${CMAKE_BINARY_DIR}/src/aten) +${CMAKE_BINARY_DIR}/src/ATen) add_subdirectory(src/data) add_subdirectory(src/meter) diff --git a/aten/src/ATen/ATen.h b/aten/src/ATen/ATen.h new file mode 100644 index 0000000000..12ebe86930 --- /dev/null +++ b/aten/src/ATen/ATen.h @@ -0,0 +1,11 @@ +#pragma once + +#include "ATen/Scalar.h" +#include "ATen/Type.h" +#include "ATen/Generator.h" +#include "ATen/Context.h" +#include "ATen/Storage.h" +#include "ATen/Tensor.h" +#include "ATen/Functions.h" +#include "ATen/Formatting.h" +#include "ATen/TensorOperators.h" diff --git a/aten/src/aten/ArrayRef.h b/aten/src/ATen/ArrayRef.h index 043eba4d5d..a20716102c 100644 --- a/aten/src/aten/ArrayRef.h +++ b/aten/src/ATen/ArrayRef.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// -// TensorLib: modified from llvm::ArrayRef. +// ATen: modified from llvm::ArrayRef. // removed llvm-specific functionality // removed some implicit const -> non-const conversions that rely on // complicated std::enable_if meta-programming @@ -18,7 +18,7 @@ #include <array> #include <vector> -namespace tlib { +namespace at { /// ArrayRef - Represent a constant reference to an array (0 or more elements /// consecutively in memory), i.e. a start pointer and a length. It allows /// various APIs to take consecutive elements easily and conveniently. @@ -172,4 +172,4 @@ namespace tlib { /// @} }; -} // end namespace tlib +} // end namespace at diff --git a/aten/src/aten/CMakeLists.txt b/aten/src/ATen/CMakeLists.txt index 5c0984388c..3f21a45016 100644 --- a/aten/src/aten/CMakeLists.txt +++ b/aten/src/ATen/CMakeLists.txt @@ -55,7 +55,7 @@ IF(NO_CUDA) MESSAGE(STATUS "ignoring CUDA") SET(CUDA_FLAG -n) ELSE() - ADD_DEFINITIONS(-DTENSORLIB_CUDA_ENABLED) + ADD_DEFINITIONS(-DAT_CUDA_ENABLED) FIND_PACKAGE(CUDA 5.5) IF(CUDA_FOUND) INCLUDE_DIRECTORIES(${CUDA_INCLUDE_DIRS}) @@ -146,7 +146,7 @@ endif() FILE(GLOB_RECURSE all_templates "templates/*") -FILE(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/TensorLib) +FILE(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ATen) ADD_CUSTOM_COMMAND(OUTPUT ${generated_cpp} COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/gen.py ${CUDA_FLAG} -s ${CMAKE_CURRENT_SOURCE_DIR} ${cwrap_files} @@ -158,40 +158,40 @@ filter_list(generated_h generated_cpp "\\.h$") INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/..) # so the build can find the generated header files INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) -ADD_LIBRARY(TensorLib SHARED ${all_cpp}) -SET_TARGET_PROPERTIES(TensorLib PROPERTIES VERSION 1 SOVERSION 1) +ADD_LIBRARY(ATen SHARED ${all_cpp}) +SET_TARGET_PROPERTIES(ATen PROPERTIES VERSION 1 SOVERSION 1) if(NOT ${CMAKE_VERSION} VERSION_LESS "3.1") - SET_PROPERTY(TARGET TensorLib PROPERTY CXX_STANDARD 11) + SET_PROPERTY(TARGET ATen PROPERTY CXX_STANDARD 11) endif(NOT ${CMAKE_VERSION} VERSION_LESS "3.1") -TARGET_LINK_LIBRARIES(TensorLib ${TH_LIBRARIES} ${THNN_LIBRARIES}) +TARGET_LINK_LIBRARIES(ATen ${TH_LIBRARIES} ${THNN_LIBRARIES}) IF(CUDA_FOUND) - TARGET_LINK_LIBRARIES(TensorLib ${THC_LIBRARIES} ${THCUNN_LIBRARIES}) - TARGET_LINK_LIBRARIES(TensorLib ${CUDA_LIBRARIES}) + TARGET_LINK_LIBRARIES(ATen ${THC_LIBRARIES} ${THCUNN_LIBRARIES}) + TARGET_LINK_LIBRARIES(ATen ${CUDA_LIBRARIES}) ENDIF() -INSTALL(TARGETS TensorLib +INSTALL(TARGETS ATen RUNTIME DESTINATION "${TENSOR_LIB_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TENSOR_LIB_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TENSOR_LIB_INSTALL_LIB_DIR}") # ADD_EXECUTABLE(scalar_test test/scalar_test.cpp) -# TARGET_LINK_LIBRARIES(scalar_test TensorLib) +# TARGET_LINK_LIBRARIES(scalar_test ATen) # TARGET_LINK_LIBRARIES(scalar_test ${CUDA_LIBRARIES}) # ADD_EXECUTABLE(basic test/basic.cpp) -# TARGET_LINK_LIBRARIES(basic TensorLib) +# TARGET_LINK_LIBRARIES(basic ATen) # TARGET_LINK_LIBRARIES(basic ${CUDA_LIBRARIES}) add_executable(atest test/atest.cpp) -target_link_libraries(atest TensorLib) +target_link_libraries(atest ATen) target_link_libraries(atest ${CUDA_LIBRARIES}) FOREACH(HEADER ${base_h}) - INSTALL(FILES ${HEADER} DESTINATION ${TENSOR_LIB_INSTALL_INCLUDE_DIR}/TensorLib) + INSTALL(FILES ${HEADER} DESTINATION ${TENSOR_LIB_INSTALL_INCLUDE_DIR}/ATen) ENDFOREACH() FOREACH(HEADER ${generated_h}) INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${HEADER} - DESTINATION ${TENSOR_LIB_INSTALL_INCLUDE_DIR}/TensorLib) + DESTINATION ${TENSOR_LIB_INSTALL_INCLUDE_DIR}/ATen) ENDFOREACH() diff --git a/aten/src/aten/CPUGenerator.cpp b/aten/src/ATen/CPUGenerator.cpp index 9bea365057..3572ffdd86 100644 --- a/aten/src/aten/CPUGenerator.cpp +++ b/aten/src/ATen/CPUGenerator.cpp @@ -1,9 +1,9 @@ -#include "TensorLib/CPUGenerator.h" +#include "ATen/CPUGenerator.h" #define const_generator_cast(generator) \ dynamic_cast<const CPUGenerator&>(generator) -namespace tlib { +namespace at { CPUGenerator::CPUGenerator(Context * context_) : context(context_), generator(THGenerator_new()) @@ -33,4 +33,4 @@ CPUGenerator& CPUGenerator::manualSeed(unsigned long seed) { return *this; } -} // namespace tlib +} // namespace at diff --git a/aten/src/aten/CUDAGenerator.cpp b/aten/src/ATen/CUDAGenerator.cpp index 05af8938ce..7fe2738bb2 100644 --- a/aten/src/aten/CUDAGenerator.cpp +++ b/aten/src/ATen/CUDAGenerator.cpp @@ -1,13 +1,13 @@ -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED -#include "TensorLib/CUDAGenerator.h" -#include "TensorLib/Context.h" +#include "ATen/CUDAGenerator.h" +#include "ATen/Context.h" #include <stdexcept> #define const_generator_cast(generator) \ dynamic_cast<const CUDAGenerator&>(generator) -namespace tlib { +namespace at { CUDAGenerator::CUDAGenerator(Context * context_) : context(context_) @@ -41,4 +41,4 @@ CUDAGenerator& CUDAGenerator::manualSeed(unsigned long seed) { } } // namespace thpp -#endif //TENSORLIB_CUDA_ENABLED +#endif //AT_CUDA_ENABLED diff --git a/aten/src/aten/Context.cpp b/aten/src/ATen/Context.cpp index 90056a01fd..4070f7ded1 100644 --- a/aten/src/aten/Context.cpp +++ b/aten/src/ATen/Context.cpp @@ -3,13 +3,13 @@ #include <mutex> #include <sstream> -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED #include "THC/THC.h" -#include "TensorLib/CUDAGenerator.h" +#include "ATen/CUDAGenerator.h" #endif -#include "TensorLib/CPUGenerator.h" +#include "ATen/CPUGenerator.h" -namespace tlib { +namespace at { static inline void errorHandler(const char * msg, void * data) { throw std::runtime_error(msg); @@ -25,7 +25,7 @@ Context::Context() { THSetDefaultErrorHandler(errorHandler,nullptr); THSetDefaultArgErrorHandler(argErrorHandler,nullptr); -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED thc_state = THCState_alloc(); THCState_setDeviceAllocator(thc_state, THCCachingAllocator_get()); thc_state->cudaHostAllocator = &THCCachingHostAllocator; @@ -41,7 +41,7 @@ Context::Context() { } Context::~Context() { -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED THCState_free(thc_state); #endif } @@ -52,7 +52,7 @@ Context & globalContext() { } bool Context::hasCUDA() const { -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED return true; #else return false; diff --git a/aten/src/aten/Context.h b/aten/src/ATen/Context.h index e9f02a494b..371181b9c6 100644 --- a/aten/src/aten/Context.h +++ b/aten/src/ATen/Context.h @@ -1,13 +1,13 @@ #pragma once #include <memory> -#include "TensorLib/Generator.h" -#include "TensorLib/Type.h" -#include "TensorLib/Utils.h" +#include "ATen/Generator.h" +#include "ATen/Type.h" +#include "ATen/Utils.h" class THCState; -namespace tlib { +namespace at { class Context { public: diff --git a/aten/src/aten/Formatting.cpp b/aten/src/ATen/Formatting.cpp index 4af05f110f..14e7e72638 100644 --- a/aten/src/aten/Formatting.cpp +++ b/aten/src/ATen/Formatting.cpp @@ -1,6 +1,6 @@ -#include "TensorLib/Formatting.h" -#include "TensorLib/Tensor.h" -#include "TensorLib/Context.h" +#include "ATen/Formatting.h" +#include "ATen/Tensor.h" +#include "ATen/Context.h" #include <cmath> #include <iostream> @@ -16,7 +16,7 @@ inline std::ios_base& defaultfloat(std::ios_base& __base) } #endif -namespace tlib { +namespace at { std::ostream& operator<<(std::ostream & out, IntList list) { int i = 0; diff --git a/aten/src/aten/Formatting.h b/aten/src/ATen/Formatting.h index 896c72f86e..28b1a58036 100644 --- a/aten/src/aten/Formatting.h +++ b/aten/src/ATen/Formatting.h @@ -1,9 +1,9 @@ #pragma once #include <iostream> -#include "TensorLib/Type.h" +#include "ATen/Type.h" -namespace tlib { +namespace at { std::ostream& operator<<(std::ostream & out, IntList list); diff --git a/aten/src/aten/Generator.h b/aten/src/ATen/Generator.h index 3e94644da0..a99da0e406 100644 --- a/aten/src/aten/Generator.h +++ b/aten/src/ATen/Generator.h @@ -1,6 +1,6 @@ #pragma once -namespace tlib { +namespace at { struct Generator { Generator() {}; @@ -15,4 +15,4 @@ struct Generator { virtual Generator& manualSeed(unsigned long seed) = 0; }; -} // namespace tlib +} // namespace at diff --git a/aten/src/aten/HalfConvert.h b/aten/src/ATen/HalfConvert.h index 38c2e5a13d..6e272a43b8 100644 --- a/aten/src/aten/HalfConvert.h +++ b/aten/src/ATen/HalfConvert.h @@ -1,6 +1,6 @@ #pragma once -namespace tlib { +namespace at { template<typename To, typename From> static inline To HalfFix(From h) { diff --git a/aten/src/aten/Local.cwrap b/aten/src/ATen/Local.cwrap index 805148509f..805148509f 100644 --- a/aten/src/aten/Local.cwrap +++ b/aten/src/ATen/Local.cwrap diff --git a/aten/src/aten/Scalar.cpp b/aten/src/ATen/Scalar.cpp index c78e161dde..4904f48cb4 100644 --- a/aten/src/aten/Scalar.cpp +++ b/aten/src/ATen/Scalar.cpp @@ -1,7 +1,7 @@ -#include "TensorLib/Scalar.h" +#include "ATen/Scalar.h" #include <TH/TH.h> -namespace tlib { +namespace at { template<> Half convert(double f) { float t = static_cast<float>(f); @@ -22,7 +22,7 @@ template<> int64_t convert(Half f) { } -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED template<> half convert(double d) { return half { convert<Half,double>(d).x }; } diff --git a/aten/src/aten/Scalar.h b/aten/src/ATen/Scalar.h index 1382fcb1cd..b31711902c 100644 --- a/aten/src/aten/Scalar.h +++ b/aten/src/ATen/Scalar.h @@ -3,33 +3,33 @@ #include<stdint.h> #include <stdexcept> #include <string> -#include "TensorLib/HalfConvert.h" +#include "ATen/HalfConvert.h" -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED #include <cuda_runtime.h> #include <cuda_fp16.h> #endif #if defined(__GNUC__) -#define TLIB_ALIGN(n) __attribute__((aligned(n))) +#define AT_ALIGN(n) __attribute__((aligned(n))) #elif defined(_WIN32) -#define TLIB_ALIGN(n) __declspec(align(n)) +#define AT_ALIGN(n) __declspec(align(n)) #else -#define TLIB_ALIGN(n) +#define AT_ALIGN(n) #endif -namespace tlib { +namespace at { template<typename To, typename From> To convert(From f) { return static_cast<To>(f); } -typedef struct TLIB_ALIGN(2) { +typedef struct AT_ALIGN(2) { unsigned short x; -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED operator half() { return half { x }; } #endif operator double(); @@ -43,11 +43,11 @@ template<> int64_t convert(Half f); inline Half::operator double() { return convert<double,Half>(*this); } -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED template<> half convert(double d); #endif -#define TLIB_FORALL_SCALAR_TYPES(_) \ +#define AT_FORALL_SCALAR_TYPES(_) \ _(uint8_t,Byte,i) \ _(int8_t,Char,i) \ _(double,Double,d) \ @@ -65,9 +65,9 @@ public: v . member = convert<decltype(v.member),type>(vv); \ } - TLIB_FORALL_SCALAR_TYPES(DEFINE_IMPLICIT_CTOR) + AT_FORALL_SCALAR_TYPES(DEFINE_IMPLICIT_CTOR) -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED Scalar(half vv) : tag(Tag::HAS_d) { v.d = convert<double,Half>(Half{vv.x}); @@ -93,7 +93,7 @@ public: } \ } - TLIB_FORALL_SCALAR_TYPES(DEFINE_ACCESSOR) + AT_FORALL_SCALAR_TYPES(DEFINE_ACCESSOR) #undef DEFINE_ACCESSOR bool isFloatingPoint() { diff --git a/aten/src/aten/Storage.h b/aten/src/ATen/Storage.h index f668608aab..7ea9ab55f2 100644 --- a/aten/src/aten/Storage.h +++ b/aten/src/ATen/Storage.h @@ -1,9 +1,9 @@ #pragma once -#include "TensorLib/Scalar.h" -#include "TensorLib/Type.h" +#include "ATen/Scalar.h" +#include "ATen/Type.h" -namespace tlib { +namespace at { struct Storage { Storage() {} @@ -32,4 +32,4 @@ struct Storage { }; -} // namespace tlib +} // namespace at diff --git a/aten/src/aten/THLongStorageView.h b/aten/src/ATen/THLongStorageView.h index cca71d36d0..cee9bf1dca 100644 --- a/aten/src/aten/THLongStorageView.h +++ b/aten/src/ATen/THLongStorageView.h @@ -2,7 +2,7 @@ #include "TH/TH.h" -namespace tlib { +namespace at { // make a fake storage out of a size, pointer pair... class THLongStorageView { diff --git a/aten/src/aten/TensorAccessor.h b/aten/src/ATen/TensorAccessor.h index bb1fd45a40..6d9cd83595 100644 --- a/aten/src/aten/TensorAccessor.h +++ b/aten/src/ATen/TensorAccessor.h @@ -2,9 +2,9 @@ #include <cstddef> #include <stdint.h> -#include "TensorLib/Type.h" +#include "ATen/Type.h" -namespace tlib { +namespace at { template<typename T, size_t N> diff --git a/aten/src/aten/TensorImpl.h b/aten/src/ATen/TensorImpl.h index 992562136b..6c4f86bdd9 100644 --- a/aten/src/aten/TensorImpl.h +++ b/aten/src/ATen/TensorImpl.h @@ -2,10 +2,10 @@ #include <atomic> -#include "TensorLib/Scalar.h" -#include "TensorLib/Type.h" +#include "ATen/Scalar.h" +#include "ATen/Type.h" #include <iostream> -namespace tlib { +namespace at { class Type; struct TensorImpl { diff --git a/aten/src/aten/TensorOperators.h b/aten/src/ATen/TensorOperators.h index 86e9ad9fac..27cfd67a33 100644 --- a/aten/src/aten/TensorOperators.h +++ b/aten/src/ATen/TensorOperators.h @@ -1,12 +1,12 @@ #pragma once -#include "TensorLib/Tensor.h" -#include "TensorLib/Scalar.h" +#include "ATen/Tensor.h" +#include "ATen/Scalar.h" -namespace tlib { +namespace at { -#define TLIB_FORALL_BINARY_OPS(_) \ +#define AT_FORALL_BINARY_OPS(_) \ _(+,x.add(y), y.add(x)) \ _(*,x.mul(y), y.mul(x)) \ _(-,x.sub(y), y.type().tensor().resize_(y.sizes()).fill_(x).sub_(y)) \ @@ -20,18 +20,19 @@ _(==,x.eq(y), y.eq(x)) \ _(!=,x.ne(y), y.ne(x)) #define DEFINE_OPERATOR(op,body,reverse_scalar_body) \ -Tensor operator op(const Tensor & x, const Tensor & y) { \ +static inline Tensor operator op(const Tensor & x, const Tensor & y) { \ return body; \ } \ -Tensor operator op(const Tensor & x, Scalar y) { \ +static inline Tensor operator op(const Tensor & x, Scalar y) { \ return body; \ } \ -Tensor operator op(const Scalar & x, Tensor y) { \ +static inline Tensor operator op(Scalar x, const Tensor & y) { \ return reverse_scalar_body; \ } -TLIB_FORALL_BINARY_OPS(DEFINE_OPERATOR) + +AT_FORALL_BINARY_OPS(DEFINE_OPERATOR) #undef DEFINE_OPERATOR -#undef TLIB_FORALL_BINARY_OPS +#undef AT_FORALL_BINARY_OPS } diff --git a/aten/src/aten/Utils.cpp b/aten/src/ATen/Utils.cpp index 65106d0f95..fbd84eb25f 100644 --- a/aten/src/aten/Utils.cpp +++ b/aten/src/ATen/Utils.cpp @@ -1,9 +1,9 @@ -#include "TensorLib/Utils.h" +#include "ATen/Utils.h" #include <stdarg.h> #include <stdexcept> #include <typeinfo> -namespace tlib { +namespace at { void runtime_error(const char *format, ...) { static const size_t ERROR_BUF_SIZE = 1024; @@ -17,4 +17,4 @@ void runtime_error(const char *format, ...) { throw std::runtime_error(error_buf); } -} // tlib +} // at diff --git a/aten/src/aten/Utils.h b/aten/src/ATen/Utils.h index 9865a4a5e2..319e23a541 100644 --- a/aten/src/aten/Utils.h +++ b/aten/src/ATen/Utils.h @@ -1,10 +1,10 @@ #pragma once -#include "TensorLib/CPUGenerator.h" +#include "ATen/CPUGenerator.h" -namespace tlib { +namespace at { -#define TLIB_ASSERT(cond, ...) if (! (cond) ) { tlib::runtime_error(__VA_ARGS__); } +#define AT_ASSERT(cond, ...) if (! (cond) ) { at::runtime_error(__VA_ARGS__); } [[noreturn]] void runtime_error(const char *format, ...); @@ -25,4 +25,4 @@ static inline CPUGenerator * check_generator(Generator* expr) { runtime_error("Expected a 'CPUGenerator' but found 'CUDAGenerator'"); } -} // tlib +} // at diff --git a/aten/src/aten/code_template.py b/aten/src/ATen/code_template.py index f5563440c3..f5563440c3 100644 --- a/aten/src/aten/code_template.py +++ b/aten/src/ATen/code_template.py diff --git a/aten/src/aten/common_with_cwrap.py b/aten/src/ATen/common_with_cwrap.py index f451f4c93d..e0038949fc 100644 --- a/aten/src/aten/common_with_cwrap.py +++ b/aten/src/ATen/common_with_cwrap.py @@ -1,4 +1,4 @@ -# this code should be common among cwrap and TensorLib preprocessing +# this code should be common among cwrap and ATen preprocessing # for now, I have put it in one place but right now is copied out of cwrap from copy import deepcopy @@ -43,7 +43,7 @@ def set_declaration_defaults(declaration): for option in declaration['options']: for k, v in declaration.items(): # TODO(zach): why does cwrap not propagate 'name'? I need it - # propagaged for TensorLib + # propagaged for ATen if k != 'options': option.setdefault(k, v) diff --git a/aten/src/aten/copy_wrapper.py b/aten/src/ATen/copy_wrapper.py index 1c5fe9cdbe..bec87ad937 100644 --- a/aten/src/aten/copy_wrapper.py +++ b/aten/src/ATen/copy_wrapper.py @@ -2,14 +2,14 @@ from code_template import CodeTemplate FILE = CodeTemplate("""\ #include "TH/TH.h" -#ifdef TENSORLIB_CUDA_ENABLED +#ifdef AT_CUDA_ENABLED #undef THNN_ #include "THC/THC.h" #endif -#include "TensorLib/Utils.h" +#include "ATen/Utils.h" ${copy_includes} -namespace tlib { +namespace at { ${copy_functions} @@ -62,8 +62,8 @@ def create(all_types): } for dst_type in all_types: top_env['copy_includes'].append( - '#include "TensorLib/{}.h"'.format(dst_type['Type'])) + '#include "ATen/{}.h"'.format(dst_type['Type'])) top_env['copy_includes'].append( - '#include "TensorLib/{}.h"'.format(dst_type['Tensor'])) + '#include "ATen/{}.h"'.format(dst_type['Tensor'])) top_env['copy_functions'].append(create_one(dst_type, all_types)) return FILE.substitute(top_env) diff --git a/aten/src/aten/cwrap_parser.py b/aten/src/ATen/cwrap_parser.py index f020dd0304..f020dd0304 100644 --- a/aten/src/aten/cwrap_parser.py +++ b/aten/src/ATen/cwrap_parser.py diff --git a/aten/src/aten/dispatch_macros.py b/aten/src/ATen/dispatch_macros.py index d3393fd13c..1fb0b45a32 100644 --- a/aten/src/aten/dispatch_macros.py +++ b/aten/src/ATen/dispatch_macros.py @@ -8,7 +8,7 @@ case ${TypeID}: MACRO_TEMPLATE = CodeTemplate("""\ #pragma once -namespace tlib { +namespace at { template<template <typename> class F, typename ... Args> auto dispatch(const Type & the_type, Args&&... args) diff --git a/aten/src/aten/extract_cwrap.py b/aten/src/ATen/extract_cwrap.py index 0fab951ce0..0fab951ce0 100644 --- a/aten/src/aten/extract_cwrap.py +++ b/aten/src/ATen/extract_cwrap.py diff --git a/aten/src/aten/function_wrapper.py b/aten/src/ATen/function_wrapper.py index 13a80c9cb8..7b91944851 100644 --- a/aten/src/aten/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -3,7 +3,7 @@ import yaml from code_template import CodeTemplate # temporary things we cannot handle -EXCLUDE_PATTERN = "bernoulli.*|normal.*|exponential.*|random.*" +EXCLUDE_PATTERN = "bernoulli.*|normal.*|exponential.*|random.*|arange.*" # what has to be done to add a Operation ... # 1. add virtual dispatch declaration to Type.h and default impl to Type.cpp TYPE_METHOD_DECLARATION = CodeTemplate("""\ diff --git a/aten/src/aten/gen.py b/aten/src/ATen/gen.py index 78d211fe3e..1417a2ad16 100644 --- a/aten/src/aten/gen.py +++ b/aten/src/ATen/gen.py @@ -83,7 +83,7 @@ top_env = { def write(filename, s): - filename = "TensorLib/" + filename + filename = "ATen/" + filename if options.print_dependencies: sys.stderr.write(filename + ";") return @@ -132,14 +132,14 @@ def generate_storage_type_and_tensor(backend, scalar_type, declarations): if scalar_name == "Half": if backend == "CUDA": env['to_th_half'] = 'HalfFix<__half,Half>' - env['to_tlib_half'] = 'HalfFix<Half,__half>' + env['to_at_half'] = 'HalfFix<Half,__half>' env['AS_REAL'] = 'convert<half,double>' else: env['to_th_half'] = 'HalfFix<THHalf,Half>' - env['to_tlib_half'] = 'HalfFix<Half,THHalf>' + env['to_at_half'] = 'HalfFix<Half,THHalf>' else: env['to_th_half'] = '' - env['to_tlib_half'] = '' + env['to_at_half'] = '' declarations, definitions = function_wrapper.create_derived( env, declarations) @@ -160,7 +160,7 @@ def generate_storage_type_and_tensor(backend, scalar_type, declarations): .format(backend, scalar_name, env['Type'])) top_env['type_registrations'].append(type_register) top_env['type_headers'].append( - '#include "TensorLib/{}.h"'.format(env['Type'])) + '#include "ATen/{}.h"'.format(env['Type'])) return env diff --git a/aten/src/aten/nn_parse.py b/aten/src/ATen/nn_parse.py index 9daa058da7..9daa058da7 100644 --- a/aten/src/aten/nn_parse.py +++ b/aten/src/ATen/nn_parse.py diff --git a/aten/src/aten/preprocess_declarations.py b/aten/src/ATen/preprocess_declarations.py index 6e34e9b675..6e34e9b675 100644 --- a/aten/src/aten/preprocess_declarations.py +++ b/aten/src/ATen/preprocess_declarations.py diff --git a/aten/src/aten/scratch.py b/aten/src/ATen/scratch.py index e239c65d0a..e239c65d0a 100644 --- a/aten/src/aten/scratch.py +++ b/aten/src/ATen/scratch.py diff --git a/aten/src/aten/templates/Functions.h b/aten/src/ATen/templates/Functions.h index bc0c9f12e1..f8288bfeb9 100644 --- a/aten/src/aten/templates/Functions.h +++ b/aten/src/ATen/templates/Functions.h @@ -1,14 +1,14 @@ #pragma once -#include "TensorLib/Scalar.h" -#include "TensorLib/Type.h" -#include "TensorLib/Tensor.h" -#include "TensorLib/Storage.h" -#include "TensorLib/Generator.h" +#include "ATen/Scalar.h" +#include "ATen/Type.h" +#include "ATen/Tensor.h" +#include "ATen/Storage.h" +#include "ATen/Generator.h" -namespace tlib { +namespace at { static inline Tensor & copy_out(const Tensor & src, Tensor & dst) { dst.resize_(src.sizes()); diff --git a/aten/src/aten/templates/GeneratorDerived.h b/aten/src/ATen/templates/GeneratorDerived.h index 3188fe34aa..addf95c0c7 100644 --- a/aten/src/aten/templates/GeneratorDerived.h +++ b/aten/src/ATen/templates/GeneratorDerived.h @@ -2,9 +2,9 @@ #include <$header> -#include "TensorLib/Generator.h" +#include "ATen/Generator.h" -namespace tlib { +namespace at { class Context; struct ${name}Generator : public Generator { diff --git a/aten/src/aten/templates/StorageDerived.cpp b/aten/src/ATen/templates/StorageDerived.cpp index 06a7e4c678..31c1801eee 100644 --- a/aten/src/aten/templates/StorageDerived.cpp +++ b/aten/src/ATen/templates/StorageDerived.cpp @@ -1,7 +1,7 @@ -#include "TensorLib/${Storage}.h" -#include "TensorLib/HalfConvert.h" +#include "ATen/${Storage}.h" +#include "ATen/HalfConvert.h" -namespace tlib { +namespace at { ${Storage}::${Storage}(Context* context): storage(${THStorage}_new(${state})), context(context) {} @@ -63,13 +63,13 @@ auto ${Storage}::fast_set(std::size_t ind, Scalar value) -> ${Storage}& { auto ${Storage}::get(std::size_t ind) -> Scalar { // static cast to fix long -> int64_t issues - return static_cast<${ScalarType}>(${to_tlib_half}(${THStorage}_get(${state,} storage, ind))); + return static_cast<${ScalarType}>(${to_at_half}(${THStorage}_get(${state,} storage, ind))); } auto ${Storage}::fast_get(std::size_t ind) -> Scalar { if(${isCUDA}) throw std::runtime_error("unsupported operation 'fast_get'"); - return static_cast<${ScalarType}>(${to_tlib_half}(storage->data[ind])); + return static_cast<${ScalarType}>(${to_at_half}(storage->data[ind])); } int ${Storage}::getDevice() const { diff --git a/aten/src/aten/templates/StorageDerived.h b/aten/src/ATen/templates/StorageDerived.h index f543b84956..9cda8364d4 100644 --- a/aten/src/aten/templates/StorageDerived.h +++ b/aten/src/ATen/templates/StorageDerived.h @@ -2,10 +2,10 @@ $th_headers -#include "TensorLib/Storage.h" -#include "TensorLib/Context.h" +#include "ATen/Storage.h" +#include "ATen/Context.h" -namespace tlib { +namespace at { struct ${Storage} : public Storage { public: diff --git a/aten/src/aten/templates/Tensor.h b/aten/src/ATen/templates/Tensor.h index 7a366e26d1..79b1de33a8 100644 --- a/aten/src/aten/templates/Tensor.h +++ b/aten/src/ATen/templates/Tensor.h @@ -1,12 +1,12 @@ #pragma once -#include "TensorLib/Scalar.h" -#include "TensorLib/Type.h" -#include "TensorLib/TensorImpl.h" -#include "TensorLib/Utils.h" -#include "TensorLib/TensorAccessor.h" +#include "ATen/Scalar.h" +#include "ATen/Type.h" +#include "ATen/TensorImpl.h" +#include "ATen/Utils.h" +#include "ATen/TensorAccessor.h" -namespace tlib { +namespace at { class Type; struct Tensor { @@ -104,7 +104,7 @@ struct Tensor { template<typename T, size_t N> TensorAccessor<T,N> accessor() { static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data<T>()"); - TLIB_ASSERT(dim() == N, "expected %d dims but tensor has %d",N,dim()); + AT_ASSERT(dim() == N, "expected %d dims but tensor has %d",N,dim()); return TensorAccessor<T,N>(data<T>(),sizes().data(),strides().data()); } @@ -160,13 +160,13 @@ inline T* Tensor::data() const { #define DEFINE_CAST(T,name,_) \ template<> \ inline T* Tensor::data() const { \ - TLIB_ASSERT(type().scalarType() == ScalarType::name, \ + AT_ASSERT(type().scalarType() == ScalarType::name, \ "expected scalar type % s but found %s", #name, \ - tlib::toString(type().scalarType())); \ + at::toString(type().scalarType())); \ return static_cast<T*>(this->data_ptr()); \ } -TLIB_FORALL_SCALAR_TYPES(DEFINE_CAST) +AT_FORALL_SCALAR_TYPES(DEFINE_CAST) #undef DEFINE_CAST -} //namespace tlib +} //namespace at diff --git a/aten/src/aten/templates/TensorDerived.cpp b/aten/src/ATen/templates/TensorDerived.cpp index f790382d14..5fc0a1c46d 100644 --- a/aten/src/aten/templates/TensorDerived.cpp +++ b/aten/src/ATen/templates/TensorDerived.cpp @@ -1,5 +1,5 @@ -#include "TensorLib/${Tensor}.h" -#include "TensorLib/HalfConvert.h" +#include "ATen/${Tensor}.h" +#include "ATen/HalfConvert.h" //sketch: @@ -36,7 +36,7 @@ Tensor add(Tensor a, Tensor b) { */ -namespace tlib { +namespace at { ${Tensor}::${Tensor}(Context* context) : ${Tensor}(context,${THTensor}_new(${state})) {} diff --git a/aten/src/aten/templates/TensorDerived.h b/aten/src/ATen/templates/TensorDerived.h index 7d9a273b73..ea033b886d 100644 --- a/aten/src/aten/templates/TensorDerived.h +++ b/aten/src/ATen/templates/TensorDerived.h @@ -2,11 +2,11 @@ $th_headers -#include "TensorLib/Tensor.h" -#include "TensorLib/TensorImpl.h" -#include "TensorLib/Context.h" +#include "ATen/Tensor.h" +#include "ATen/TensorImpl.h" +#include "ATen/Context.h" -namespace tlib { +namespace at { struct ${Tensor} : public TensorImpl { public: diff --git a/aten/src/aten/templates/Type.cpp b/aten/src/ATen/templates/Type.cpp index 52d7dd7cfa..7cc8d1963f 100644 --- a/aten/src/aten/templates/Type.cpp +++ b/aten/src/ATen/templates/Type.cpp @@ -1,9 +1,9 @@ -#include "TensorLib/Type.h" -#include "TensorLib/Tensor.h" +#include "ATen/Type.h" +#include "ATen/Tensor.h" ${type_headers} -namespace tlib { +namespace at { void Type::registerAll(Context * context) { ${type_registrations} diff --git a/aten/src/aten/templates/Type.h b/aten/src/ATen/templates/Type.h index e928dacd23..a60f4db524 100644 --- a/aten/src/aten/templates/Type.h +++ b/aten/src/ATen/templates/Type.h @@ -2,10 +2,10 @@ #include <memory> -#include "TensorLib/Scalar.h" -#include "TensorLib/ArrayRef.h" +#include "ATen/Scalar.h" +#include "ATen/ArrayRef.h" -namespace tlib { +namespace at { class Context; class Storage; @@ -16,7 +16,7 @@ class Generator; enum class ScalarType { #define DEFINE_ENUM(_1,n,_2) \ n, - TLIB_FORALL_SCALAR_TYPES(DEFINE_ENUM) + AT_FORALL_SCALAR_TYPES(DEFINE_ENUM) #undef DEFINE_ENUM NumOptions }; @@ -41,7 +41,7 @@ static inline const char * toString(Backend b) { #define DEFINE_CONSTANT(_,name,_2) \ constexpr ScalarType k##name = ScalarType::name; -TLIB_FORALL_SCALAR_TYPES(DEFINE_CONSTANT) +AT_FORALL_SCALAR_TYPES(DEFINE_CONSTANT) #undef DEFINE_CONSTANT static inline const char * toString(ScalarType t) { @@ -49,7 +49,7 @@ static inline const char * toString(ScalarType t) { case ScalarType:: name : return #name; switch(t) { - TLIB_FORALL_SCALAR_TYPES(DEFINE_CASE) + AT_FORALL_SCALAR_TYPES(DEFINE_CASE) default: return "UNKNOWN_SCALAR_TYPE"; } diff --git a/aten/src/aten/templates/TypeDerived.cpp b/aten/src/ATen/templates/TypeDerived.cpp index 246fbbddf7..ee091f8420 100644 --- a/aten/src/aten/templates/TypeDerived.cpp +++ b/aten/src/ATen/templates/TypeDerived.cpp @@ -1,15 +1,15 @@ -#include "TensorLib/${Type}.h" -#include "TensorLib/${Storage}.h" -#include "TensorLib/${Tensor}.h" -#include "TensorLib/${Backend}Generator.h" -#include "TensorLib/${Backend}ByteTensor.h" -#include "TensorLib/${Backend}IntTensor.h" -#include "TensorLib/${Backend}LongTensor.h" -#include "TensorLib/Utils.h" -#include "TensorLib/THLongStorageView.h" +#include "ATen/${Type}.h" +#include "ATen/${Storage}.h" +#include "ATen/${Tensor}.h" +#include "ATen/${Backend}Generator.h" +#include "ATen/${Backend}ByteTensor.h" +#include "ATen/${Backend}IntTensor.h" +#include "ATen/${Backend}LongTensor.h" +#include "ATen/Utils.h" +#include "ATen/THLongStorageView.h" #include <iostream> -namespace tlib { +namespace at { ${Type}::${Type}(Context* context) : Type(context) {} diff --git a/aten/src/aten/templates/TypeDerived.h b/aten/src/ATen/templates/TypeDerived.h index 6c8cc38c80..b269999a80 100644 --- a/aten/src/aten/templates/TypeDerived.h +++ b/aten/src/ATen/templates/TypeDerived.h @@ -1,8 +1,8 @@ #pragma once -#include "TensorLib/Type.h" -#include "TensorLib/Context.h" +#include "ATen/Type.h" +#include "ATen/Context.h" -namespace tlib { +namespace at { struct ${Type} : public Type { ${Type}(Context* context); @@ -24,4 +24,4 @@ struct ${Type} : public Type { ${type_derived_method_declarations} }; -} // namespace tlib +} // namespace at diff --git a/aten/src/aten/test/CMakeLists.txt b/aten/src/ATen/test/CMakeLists.txt index 941c564e3b..c9fb47f7dd 100644 --- a/aten/src/aten/test/CMakeLists.txt +++ b/aten/src/ATen/test/CMakeLists.txt @@ -1,7 +1,7 @@ ADD_EXECUTABLE(scalar_test scalar_test.cpp) -TARGET_LINK_LIBRARIES(scalar_test TensorLib) +TARGET_LINK_LIBRARIES(scalar_test ATen) TARGET_LINK_LIBRARIES(scalar_test ${CUDA_LIBRARIES}) ADD_EXECUTABLE(basic basic.cpp) -TARGET_LINK_LIBRARIES(scalar_test TensorLib) +TARGET_LINK_LIBRARIES(scalar_test ATen) TARGET_LINK_LIBRARIES(scalar_test ${CUDA_LIBRARIES}) diff --git a/aten/src/aten/test/atest.cpp b/aten/src/ATen/test/atest.cpp index 95e18b2482..d7695f6b5f 100644 --- a/aten/src/aten/test/atest.cpp +++ b/aten/src/ATen/test/atest.cpp @@ -1,8 +1,8 @@ -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include<iostream> using namespace std; -using namespace tlib; +using namespace at; int main() { auto foo = CPU(kFloat).rand({12,6}); diff --git a/aten/src/aten/test/basic.cpp b/aten/src/ATen/test/basic.cpp index 4023a82999..38cfe51464 100644 --- a/aten/src/aten/test/basic.cpp +++ b/aten/src/ATen/test/basic.cpp @@ -1,10 +1,10 @@ -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <iostream> #include <chrono> -using namespace tlib; +using namespace at; static void test(Type & type) { { @@ -137,7 +137,7 @@ static void test(Type & type) { { //TODO(zach): 0-dim //std::cout << "abs(value):" << std::endl; - //std::cout << tlib::abs(-3); + //std::cout << at::abs(-3); } //TODO(zach): operator overloads diff --git a/aten/src/aten/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp index 9d4118fae4..3fd57399cc 100644 --- a/aten/src/aten/test/scalar_test.cpp +++ b/aten/src/ATen/test/scalar_test.cpp @@ -1,9 +1,9 @@ #include <iostream> -#include "TensorLib/TensorLib.h" -#include "TensorLib/Dispatch.h" +#include "ATen/ATen.h" +#include "ATen/Dispatch.h" using std::cout; -using namespace tlib; +using namespace at; constexpr auto Float = ScalarType::Float; constexpr auto Double = ScalarType::Float; @@ -30,9 +30,9 @@ int main() { Half h = bar.toHalf(); Scalar h2 = h; cout << "H2: " << h2.toDouble() << " " << what.toFloat() << " " << bar.toDouble() << " " << what.isIntegral() << "\n"; - Generator & gen = tlib::globalContext().defaultGenerator(Backend::CPU); + Generator & gen = at::globalContext().defaultGenerator(Backend::CPU); cout << gen.seed() << "\n"; - auto && C = tlib::globalContext(); + auto && C = at::globalContext(); auto & CUDAFloat = C.getType(Backend::CPU,ScalarType::Float); auto t2 = CUDAFloat.zeros({4,4}); cout << &t2 << "\n"; @@ -50,14 +50,14 @@ int main() { cout << t.sizes() << " " << t.strides() << "\n"; auto output = CPU(Float).ones(3); - tlib::Abs_updateOutput(t,output); + at::Abs_updateOutput(t,output); Type & T = CPU(Float); Tensor x = T.randn({1,10}); Tensor prev_h = T.randn({1,20}); Tensor W_h = T.randn({20,20}); Tensor W_x = T.randn({20,10}); - Tensor i2h = tlib::mm(W_x, x.t()); - Tensor h2h = tlib::mm(W_h, prev_h.t()); + Tensor i2h = at::mm(W_x, x.t()); + Tensor h2h = at::mm(W_h, prev_h.t()); Tensor next_h = i2h.add(h2h); next_h = next_h.tanh(); diff --git a/aten/src/aten/TensorLib.h b/aten/src/aten/TensorLib.h deleted file mode 100644 index d3397f08f5..0000000000 --- a/aten/src/aten/TensorLib.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#include "TensorLib/Scalar.h" -#include "TensorLib/Type.h" -#include "TensorLib/Generator.h" -#include "TensorLib/Context.h" -#include "TensorLib/Storage.h" -#include "TensorLib/Tensor.h" -#include "TensorLib/Functions.h" -#include "TensorLib/Formatting.h" -#include "TensorLib/TensorOperators.h" diff --git a/aten/src/data/BatchDataset.cc b/aten/src/data/BatchDataset.cc index 02e5944409..77250a5680 100644 --- a/aten/src/data/BatchDataset.cc +++ b/aten/src/data/BatchDataset.cc @@ -1,11 +1,11 @@ #include "BatchDataset.h" #include "Dataset.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <vector> #include <cassert> #include <math.h> -using namespace tlib; +using namespace at; BatchDataset::BatchDataset(Dataset& dataset, uint64_t batchsize) { BatchDataset(dataset, batchsize, true); @@ -18,7 +18,7 @@ BatchDataset::BatchDataset(Dataset& dataset, uint64_t batchsize, bool fullbatche fullbatches_ = fullbatches; } -void BatchDataset::getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field) { +void BatchDataset::getField(uint64_t idx, std::string& fieldkey, at::Tensor& field) { // assertions: assert(idx < size()); diff --git a/aten/src/data/BatchDataset.h b/aten/src/data/BatchDataset.h index 435da4c3e2..8363bb2d43 100644 --- a/aten/src/data/BatchDataset.h +++ b/aten/src/data/BatchDataset.h @@ -2,14 +2,14 @@ #define XT_BATCH_DATASET_H #include "Dataset.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class BatchDataset : public Dataset { public: BatchDataset(Dataset& dataset, uint64_t batchsize); BatchDataset(Dataset& dataset, uint64_t batchsize, bool fullbatches); - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field); + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor& field); virtual uint64_t size(); private: Dataset* dataset_; diff --git a/aten/src/data/CMakeLists.txt b/aten/src/data/CMakeLists.txt index 0e70f36b96..43d2659b66 100644 --- a/aten/src/data/CMakeLists.txt +++ b/aten/src/data/CMakeLists.txt @@ -23,7 +23,7 @@ set(src ) add_library(xtdata SHARED ${src}) -target_link_libraries(xtdata TensorLib) +target_link_libraries(xtdata ATen) include_directories(.) # add_executable(test-data test/basic.cc) diff --git a/aten/src/data/ConcatDataset.cc b/aten/src/data/ConcatDataset.cc index 5330068f79..89ae3c1662 100644 --- a/aten/src/data/ConcatDataset.cc +++ b/aten/src/data/ConcatDataset.cc @@ -3,7 +3,7 @@ #include <vector> #include <cassert> -using namespace tlib; +using namespace at; ConcatDataset::ConcatDataset(std::vector<Dataset*>& datasets) { datasets_ = &datasets; diff --git a/aten/src/data/ConcatDataset.h b/aten/src/data/ConcatDataset.h index 19b4fb79c0..bb4a105a73 100644 --- a/aten/src/data/ConcatDataset.h +++ b/aten/src/data/ConcatDataset.h @@ -7,7 +7,7 @@ class ConcatDataset : public Dataset { public: ConcatDataset(std::vector<Dataset*>& datasets); - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor &field); + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor &field); virtual uint64_t size(); private: uint64_t binarySearch(uint64_t idx); diff --git a/aten/src/data/Dataset.cc b/aten/src/data/Dataset.cc index 637d93509c..e210be5863 100644 --- a/aten/src/data/Dataset.cc +++ b/aten/src/data/Dataset.cc @@ -1,7 +1,7 @@ #include "Dataset.h" #include <cassert> -typedef std::map<std::string, tlib::Tensor> Fields; +typedef std::map<std::string, at::Tensor> Fields; void Dataset::get(int64_t idx, Fields& fields) { for(auto& field : fields) { diff --git a/aten/src/data/Dataset.h b/aten/src/data/Dataset.h index 987070647e..1774811d1a 100644 --- a/aten/src/data/Dataset.h +++ b/aten/src/data/Dataset.h @@ -1,18 +1,18 @@ #ifndef XT_DATASET_H #define XT_DATASET_H -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <string> #include <map> #include <set> -typedef std::map<std::string, tlib::Tensor> Fields; +typedef std::map<std::string, at::Tensor> Fields; class Dataset { std::set<std::string> fieldkeys_; public: virtual uint64_t size() = 0; // pure virtual function - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field) = 0; + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor& field) = 0; virtual bool hasField(std::string& fieldkey); virtual std::set<std::string>& fieldKeys(); virtual void addFieldKey(std::string& fieldkey); diff --git a/aten/src/data/MergeDataset.cc b/aten/src/data/MergeDataset.cc index 871e24a0d7..a3cab619c3 100644 --- a/aten/src/data/MergeDataset.cc +++ b/aten/src/data/MergeDataset.cc @@ -1,7 +1,7 @@ #include "MergeDataset.h" #include <cassert> -using namespace tlib; +using namespace at; MergeDataset::MergeDataset(std::vector<Dataset*>& datasets) { datasets_ = &datasets; diff --git a/aten/src/data/MergeDataset.h b/aten/src/data/MergeDataset.h index a1f75e2622..220b5ad6aa 100644 --- a/aten/src/data/MergeDataset.h +++ b/aten/src/data/MergeDataset.h @@ -9,7 +9,7 @@ class MergeDataset : public Dataset { public: MergeDataset(std::vector<Dataset*>& datasets); - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field); + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor& field); virtual uint64_t size(); private: std::vector<Dataset*>* datasets_; diff --git a/aten/src/data/ResampleDataset.cc b/aten/src/data/ResampleDataset.cc index 6276abd36c..f5d6cce418 100644 --- a/aten/src/data/ResampleDataset.cc +++ b/aten/src/data/ResampleDataset.cc @@ -3,7 +3,7 @@ #include <vector> #include <cassert> -using namespace tlib; +using namespace at; ResampleDataset::ResampleDataset(Dataset& dataset) { dataset_ = &dataset; @@ -28,7 +28,7 @@ ResampleDataset::ResampleDataset(Dataset& dataset, std::function<uint64_t(uint64 resample(); } -void ResampleDataset::getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field) { +void ResampleDataset::getField(uint64_t idx, std::string& fieldkey, at::Tensor& field) { assert(idx < size()); assert(hasField(fieldkey)); dataset_->getField(perm_[idx], fieldkey, field); diff --git a/aten/src/data/ResampleDataset.h b/aten/src/data/ResampleDataset.h index 481ad9c997..1d34e69dad 100644 --- a/aten/src/data/ResampleDataset.h +++ b/aten/src/data/ResampleDataset.h @@ -4,7 +4,7 @@ #include <string> #include <vector> #include <functional> -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include "Dataset.h" class ResampleDataset : public Dataset @@ -13,7 +13,7 @@ public: ResampleDataset(Dataset& dataset); ResampleDataset(Dataset& dataset, std::vector<uint64_t>& perm); ResampleDataset(Dataset& dataset, std::function<uint64_t(uint64_t)> perm); - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field); + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor& field); virtual uint64_t size(); virtual void resample(); protected: @@ -22,7 +22,7 @@ protected: private: Dataset* dataset_; std::function<uint64_t(uint64_t)> permfunc_; - std::vector<tlib::Tensor> fields_; + std::vector<at::Tensor> fields_; }; #endif diff --git a/aten/src/data/ShuffleDataset.cc b/aten/src/data/ShuffleDataset.cc index c7cad61c4a..f9ce91242c 100644 --- a/aten/src/data/ShuffleDataset.cc +++ b/aten/src/data/ShuffleDataset.cc @@ -2,7 +2,7 @@ #include "Dataset.h" #include <algorithm> -using namespace tlib; +using namespace at; ShuffleDataset::ShuffleDataset(Dataset& dataset) : ResampleDataset(dataset) { resample(); diff --git a/aten/src/data/TensorDataset.cc b/aten/src/data/TensorDataset.cc index ade8c8fafe..fc31e6eb6a 100644 --- a/aten/src/data/TensorDataset.cc +++ b/aten/src/data/TensorDataset.cc @@ -1,8 +1,8 @@ #include "TensorDataset.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <cassert> -using namespace tlib; +using namespace at; TensorDataset::TensorDataset(Tensor& t, std::string& fieldkey) { t_ = t; diff --git a/aten/src/data/TensorDataset.h b/aten/src/data/TensorDataset.h index 53039ae85d..ac8b8c975d 100644 --- a/aten/src/data/TensorDataset.h +++ b/aten/src/data/TensorDataset.h @@ -2,17 +2,17 @@ #define XT_TENSOR_DATASET_H #include "Dataset.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <string> class TensorDataset : public Dataset { public: - TensorDataset(tlib::Tensor& t, std::string& fieldkey); - virtual void getField(uint64_t idx, std::string& fieldkey, tlib::Tensor& field); + TensorDataset(at::Tensor& t, std::string& fieldkey); + virtual void getField(uint64_t idx, std::string& fieldkey, at::Tensor& field); virtual uint64_t size(); private: - tlib::Tensor t_; + at::Tensor t_; std::string fieldkey_; }; diff --git a/aten/src/data/TransformDataset.cc b/aten/src/data/TransformDataset.cc index 5f2436deff..80bdc7e84e 100644 --- a/aten/src/data/TransformDataset.cc +++ b/aten/src/data/TransformDataset.cc @@ -1,9 +1,9 @@ #include "TransformDataset.h" -#include "TensorLib/TensorLib.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" +#include "ATen/ATen.h" #include <cassert> -using namespace tlib; +using namespace at; TransformDataset::TransformDataset(Dataset& dataset, std::string& fieldkey, std::function<Tensor(Tensor)>& transform) { assert(hasField(fieldkey)); diff --git a/aten/src/data/TransformDataset.h b/aten/src/data/TransformDataset.h index 6dc6e59da2..b611ba52d6 100644 --- a/aten/src/data/TransformDataset.h +++ b/aten/src/data/TransformDataset.h @@ -2,11 +2,11 @@ #define XT_TRANSFORM_DATASET_H #include "Dataset.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <functional> #include <string> -using namespace tlib; +using namespace at; class TransformDataset : public Dataset { diff --git a/aten/src/data/test/basic.cc b/aten/src/data/test/basic.cc index f5d98edd61..a52b817fdd 100644 --- a/aten/src/data/test/basic.cc +++ b/aten/src/data/test/basic.cc @@ -3,7 +3,7 @@ #include "TensorDataset.h" #include <iostream> -using namespace tlib; +using namespace at; int main() { diff --git a/aten/src/meter/APMeter.cc b/aten/src/meter/APMeter.cc index 4be57f2edc..2233b3b584 100644 --- a/aten/src/meter/APMeter.cc +++ b/aten/src/meter/APMeter.cc @@ -2,7 +2,7 @@ #include <math.h> #include <cassert> -using namespace tlib; +using namespace at; APMeter::APMeter() { reset(); diff --git a/aten/src/meter/APMeter.h b/aten/src/meter/APMeter.h index 12faa9219e..e93281d420 100644 --- a/aten/src/meter/APMeter.h +++ b/aten/src/meter/APMeter.h @@ -2,7 +2,7 @@ #define XT_AP_METER_H #include "Meter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class APMeter : public Meter { diff --git a/aten/src/meter/AUCMeter.cc b/aten/src/meter/AUCMeter.cc index b133405daa..f045a40390 100644 --- a/aten/src/meter/AUCMeter.cc +++ b/aten/src/meter/AUCMeter.cc @@ -2,7 +2,7 @@ #include "APMeter.h" #include <cassert> -using namespace tlib; +using namespace at; AUCMeter::AUCMeter() { reset(); @@ -44,7 +44,7 @@ void AUCMeter::value(Tensor& val) { } } tpr.div_(sum(targets)); - fpr.div_(sum(tlib::add(mul(targets, -1.), 1.))); + fpr.div_(sum(at::add(mul(targets, -1.), 1.))); /** local auc = torch.cmul( diff --git a/aten/src/meter/AUCMeter.h b/aten/src/meter/AUCMeter.h index 2db468d909..524284c647 100644 --- a/aten/src/meter/AUCMeter.h +++ b/aten/src/meter/AUCMeter.h @@ -3,7 +3,7 @@ #include "Meter.h" #include "APMeter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class AUCMeter : public Meter { diff --git a/aten/src/meter/CMakeLists.txt b/aten/src/meter/CMakeLists.txt index d994caa842..8fef567609 100644 --- a/aten/src/meter/CMakeLists.txt +++ b/aten/src/meter/CMakeLists.txt @@ -18,7 +18,7 @@ set(src ) add_library(xtmeter SHARED ${src}) -target_link_libraries(xtmeter TensorLib) +target_link_libraries(xtmeter ATen) add_executable(test-meter test/basic.cc ${BACKWARD_ENABLE}) # add_backward(test-meter) diff --git a/aten/src/meter/ClassErrorMeter.cc b/aten/src/meter/ClassErrorMeter.cc index e70f9da97c..1412b42f6c 100644 --- a/aten/src/meter/ClassErrorMeter.cc +++ b/aten/src/meter/ClassErrorMeter.cc @@ -1,8 +1,8 @@ #include "ClassErrorMeter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" #include <cassert> -using namespace tlib; +using namespace at; ClassErrorMeter::ClassErrorMeter() { ClassErrorMeter(1); diff --git a/aten/src/meter/ClassErrorMeter.h b/aten/src/meter/ClassErrorMeter.h index a81663cca5..c61cd13d97 100644 --- a/aten/src/meter/ClassErrorMeter.h +++ b/aten/src/meter/ClassErrorMeter.h @@ -2,7 +2,7 @@ #define XT_CLASS_ERROR_METER_H #include "Meter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class ClassErrorMeter : public Meter { diff --git a/aten/src/meter/MAPMeter.cc b/aten/src/meter/MAPMeter.cc index 807fe640d2..a73c541d5e 100644 --- a/aten/src/meter/MAPMeter.cc +++ b/aten/src/meter/MAPMeter.cc @@ -1,6 +1,6 @@ #include "MAPMeter.h" -using namespace tlib; +using namespace at; MAPMeter::MAPMeter() { reset(); diff --git a/aten/src/meter/MAPMeter.h b/aten/src/meter/MAPMeter.h index 898929b524..3ec8e94a31 100644 --- a/aten/src/meter/MAPMeter.h +++ b/aten/src/meter/MAPMeter.h @@ -3,7 +3,7 @@ #include "Meter.h" #include "APMeter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class MAPMeter : public Meter { diff --git a/aten/src/meter/MSEMeter.cc b/aten/src/meter/MSEMeter.cc index 195233bb56..012b6b8b0c 100644 --- a/aten/src/meter/MSEMeter.cc +++ b/aten/src/meter/MSEMeter.cc @@ -2,7 +2,7 @@ #include <cassert> #include <math.h> -using namespace tlib; +using namespace at; MSEMeter::MSEMeter() { reset(); diff --git a/aten/src/meter/MSEMeter.h b/aten/src/meter/MSEMeter.h index 867ccef41b..19c92a8c23 100644 --- a/aten/src/meter/MSEMeter.h +++ b/aten/src/meter/MSEMeter.h @@ -2,7 +2,7 @@ #define XT_MSE_METER_H #include "Meter.h" -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" class MSEMeter : public Meter { diff --git a/aten/src/meter/Meter.h b/aten/src/meter/Meter.h index 9de23fd242..55ba34e6f0 100644 --- a/aten/src/meter/Meter.h +++ b/aten/src/meter/Meter.h @@ -1,9 +1,9 @@ #ifndef XT_METER_H #define XT_METER_H -#include "TensorLib/TensorLib.h" +#include "ATen/ATen.h" -using namespace tlib; +using namespace at; class Meter { diff --git a/aten/src/meter/test/basic.cc b/aten/src/meter/test/basic.cc index 7f3523bc76..8a39cabeda 100644 --- a/aten/src/meter/test/basic.cc +++ b/aten/src/meter/test/basic.cc @@ -1,7 +1,7 @@ #include "APMeter.h" #include <iostream> -using namespace tlib; +using namespace at; int main() { |