diff options
author | Peter Goldsborough <psag@fb.com> | 2018-09-05 19:41:28 -0700 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2018-09-05 19:55:50 -0700 |
commit | dccd0f2de69396de99f45cf6792c684b5a095c49 (patch) | |
tree | 20c300726e1f4582e38a3071178ca394ae6a17d2 | |
parent | 83a1ab213652ffab27bd68569d6d28fead86e7bd (diff) | |
download | pytorch-dccd0f2de69396de99f45cf6792c684b5a095c49.tar.gz pytorch-dccd0f2de69396de99f45cf6792c684b5a095c49.tar.bz2 pytorch-dccd0f2de69396de99f45cf6792c684b5a095c49.zip |
Bag of clang tidy fixes for torch/csrc/ and torch/csrc/autograd (#11050)
Summary:
Linting `torch/csrc/` (non-recursive) and `torch/csrc/autograd` (non-recursive).
Fixed things like:
- `typedef` vs `using`
- Use `.empty()` instead of comparing with empty string/using `.size() == 0`
- Use range for loops instead of old style loops (`modernize-`)
- Remove some `virtual` + `override`
- Replace `stdint.h` with `cstdint`
- Replace `return Type(x, y)` with `return {x, y}`
- Use boolean values (`true`/`false`) instead of numbers (1/0)
- More ...
ezyang apaszke cpuhrsch
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11050
Differential Revision: D9597505
Pulled By: goldsborough
fbshipit-source-id: cb0fb4793ade885a8dbf4b10484487b84c64c7f2
31 files changed, 138 insertions, 122 deletions
diff --git a/.clang-tidy b/.clang-tidy index 3cb409f928..aa2bf56185 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -4,6 +4,7 @@ Checks: ' * ,clang-analyzer-* ,modernize-* + ,-cert-dcl21-cpp ,-cert-err58-cpp ,-cert-err60-cpp ,-clang-diagnostic-* @@ -12,10 +13,12 @@ Checks: ' ,-cppcoreguidelines-pro-bounds-constant-array-index ,-cppcoreguidelines-pro-type-member-init ,-cppcoreguidelines-pro-type-static-cast-downcast + ,-cppcoreguidelines-pro-type-union-access ,-cppcoreguidelines-pro-type-vararg ,-cppcoreguidelines-special-member-functions ,-fuchsia-* ,-google-build-using-namespace + ,-google-default-arguments ,-google-explicit-constructor ,-google-readability-braces-around-statements ,-google-readability-namespace-comments @@ -24,6 +27,7 @@ Checks: ' ,-google-runtime-references ,-hicpp-braces-around-statements ,-hicpp-explicit-conversions + ,-hicpp-member-init ,-hicpp-no-array-decay ,-hicpp-signed-bitwise ,-hicpp-special-member-functions diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index 9c6c52891e..ac7b212199 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -93,7 +93,7 @@ ${return_type} TypeDefault::${api_name}(${type_method_formals}) const { return at::native::${api_name}(${type_method_actuals}, options()); } """) -# 4. add virtual override to TypeDerived.h +# 4. add override to TypeDerived.h TYPE_DERIVED_DECLARATION = CodeTemplate("""\ ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override; """) diff --git a/test/cpp/api/optim.cpp b/test/cpp/api/optim.cpp index c7047a6c8d..ab278180b1 100644 --- a/test/cpp/api/optim.cpp +++ b/test/cpp/api/optim.cpp @@ -35,8 +35,7 @@ bool test_optimizer_xor(Options options) { const int64_t kBatchSize = 4; const int64_t kMaximumNumberOfEpochs = 3000; - auto optimizer = OptimizerClass(std::vector<torch::Tensor>(), options); - optimizer.add_parameters(model->parameters()); + OptimizerClass optimizer(model->parameters(), options); float running_loss = 1; int epoch = 0; @@ -152,6 +151,9 @@ TEST_CASE("Optim/BasicInterface") { REQUIRE(optimizer.size() == 0); optimizer.add_parameters(parameters); REQUIRE(optimizer.size() == parameters.size()); + for (size_t p = 0; p < parameters.size(); ++p) { + REQUIRE(optimizer.parameters()[p].allclose(parameters[p])); + } } { Linear linear(3, 4); diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 49b0944566..05affcbaa6 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -80,7 +80,7 @@ DONT_REQUIRE_DERIVATIVE = { } METHOD_DECLARATION = CodeTemplate("""\ -virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override; +${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override; """) METHOD_DEFINITION = CodeTemplate("""\ diff --git a/tools/autograd/templates/VariableType.h b/tools/autograd/templates/VariableType.h index 3b5f9a2eaf..b307a1459d 100644 --- a/tools/autograd/templates/VariableType.h +++ b/tools/autograd/templates/VariableType.h @@ -34,30 +34,30 @@ void register_variable_type_for(at::Type* baseType); struct TORCH_API VariableType final : public at::TypeDefault { VariableType(Context* context, at::Type* baseType); - virtual at::ScalarType scalarType() const override; - virtual at::Backend backend() const override; - virtual at::Allocator* allocator() const override; - virtual at::Device getDeviceFromPtr(void * data) const override; - virtual Storage storage(bool resizable = false) const override; - virtual Storage storage(size_t size, bool resizable = false) const override; - virtual Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override; - virtual Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override; - virtual std::unique_ptr<at::Generator> generator() const override; - virtual const char * toString() const override; - virtual at::TypeID ID() const override; - virtual size_t elementSizeInBytes() const override; - virtual at::Type & toBackend(at::Backend b) const override; - virtual at::Type & toScalarType(at::ScalarType s) const override; - virtual Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override; - virtual at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override; + at::ScalarType scalarType() const override; + at::Backend backend() const override; + at::Allocator* allocator() const override; + at::Device getDeviceFromPtr(void * data) const override; + Storage storage(bool resizable = false) const override; + Storage storage(size_t size, bool resizable = false) const override; + Storage storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const override; + Storage storageWithAllocator(int64_t size, at::Allocator* allocator) const override; + std::unique_ptr<at::Generator> generator() const override; + const char * toString() const override; + at::TypeID ID() const override; + size_t elementSizeInBytes() const override; + at::Type & toBackend(at::Backend b) const override; + at::Type & toScalarType(at::ScalarType s) const override; + Storage unsafeStorageFromTH(void * th_pointer, bool retain) const override; + at::Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const override; static at::Type* getVariableTypeFromBaseType(const at::Type& baseType); static bool isVariableType(const at::Type& type); static std::vector<at::Type*> allCUDATypes(); static std::vector<at::Type*> allCPUTypes(); - virtual Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override; - virtual Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override; + Tensor & s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const override; + Tensor & _s_copy_from(const Tensor & self, Tensor & dst, bool non_blocking) const override; ${type_derived_method_declarations} private: diff --git a/tools/clang_tidy.py b/tools/clang_tidy.py index 77b101dedf..22c2accb6b 100644 --- a/tools/clang_tidy.py +++ b/tools/clang_tidy.py @@ -210,7 +210,7 @@ def parse_options(): def main(): options = parse_options() - paths = map(normalize_directory_path, options.paths) + paths = list(map(normalize_directory_path, options.paths)) if options.revision: files = get_changed_files(options.revision, paths, options.verbose) else: diff --git a/torch/csrc/Size.h b/torch/csrc/Size.h index 03a4c48a1c..2ce259248a 100644 --- a/torch/csrc/Size.h +++ b/torch/csrc/Size.h @@ -2,7 +2,7 @@ #include "torch/csrc/python_headers.h" #include "torch/csrc/autograd/variable.h" -#include "stdint.h" +#include "cstdint" extern PyTypeObject THPSizeType; diff --git a/torch/csrc/Types.h b/torch/csrc/Types.h index 8342c7a94b..f1bc466dc0 100644 --- a/torch/csrc/Types.h +++ b/torch/csrc/Types.h @@ -5,7 +5,7 @@ #include <TH/TH.h> #ifndef INT64_MAX -#include "stdint.h" +#include "cstdint" #endif template <typename T> struct THPTypeInfo {}; diff --git a/torch/csrc/api/include/torch/detail/ordered_dict.h b/torch/csrc/api/include/torch/detail/ordered_dict.h index 1b576922da..cb7cc59a3d 100644 --- a/torch/csrc/api/include/torch/detail/ordered_dict.h +++ b/torch/csrc/api/include/torch/detail/ordered_dict.h @@ -72,7 +72,9 @@ class OrderedDict { // Move works by default, because you can move-construct vectors of const // values.. - OrderedDict(OrderedDict&& other) = default; + OrderedDict(OrderedDict&& other) noexcept( + noexcept(std::unordered_map<Key, size_t>()) && + noexcept(std::vector<Item>())) = default; OrderedDict& operator=(OrderedDict&& other) = default; ~OrderedDict() = default; diff --git a/torch/csrc/api/include/torch/nn/pimpl.h b/torch/csrc/api/include/torch/nn/pimpl.h index 334212f688..af83f2e0ed 100644 --- a/torch/csrc/api/include/torch/nn/pimpl.h +++ b/torch/csrc/api/include/torch/nn/pimpl.h @@ -155,31 +155,31 @@ class ModuleHolder : torch::detail::ModuleHolderIndicator { } // namespace nn } // namespace torch -#define TORCH_ARG(T, name) \ - auto name(const T& new_##name)->decltype(*this) { \ - this->name##_ = new_##name; \ - return *this; \ - } \ - auto name(T&& new_##name)->decltype(*this) { \ - this->name##_ = std::move(new_##name); \ - return *this; \ - } \ - const T& name() const noexcept { \ - return this->name##_; \ - } \ - T name##_ +#define TORCH_ARG(T, name) \ + auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \ + this->name##_ = new_##name; \ + return *this; \ + } \ + auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \ + this->name##_ = std::move(new_##name); \ + return *this; \ + } \ + const T& name() const noexcept { /* NOLINT */ \ + return this->name##_; \ + } \ + T name##_ /* NOLINT */ /// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a /// wrapper over a `std::shared_ptr<Impl>`. -#define TORCH_MODULE_IMPL(Name, Impl) \ - class Name : public torch::nn::ModuleHolder<Impl> { \ - public: \ - using torch::nn::ModuleHolder<Impl>::ModuleHolder; \ - Name(const Name&) = default; \ - Name(Name&&) = default; \ - Name(Name& other) : Name(static_cast<const Name&>(other)) {} \ - Name& operator=(const Name&) = default; \ - Name& operator=(Name&&) = default; \ +#define TORCH_MODULE_IMPL(Name, Impl) \ + class Name : public torch::nn::ModuleHolder<Impl> { /* NOLINT */ \ + public: \ + using torch::nn::ModuleHolder<Impl>::ModuleHolder; \ + Name(const Name&) = default; /* NOLINT */ \ + Name(Name&&) = default; /* NOLINT */ \ + Name(Name& other) : Name(static_cast<const Name&>(other)) {} /* NOLINT */ \ + Name& operator=(const Name&) = default; /* NOLINT */ \ + Name& operator=(Name&&) = default; /* NOLINT */ \ } /// Like `TORCH_MODULE_IMPL`, but defaults the `Impl` name to `<Name>Impl`. diff --git a/torch/csrc/api/include/torch/optim/optimizer.h b/torch/csrc/api/include/torch/optim/optimizer.h index cb86c84284..feb8ef617f 100644 --- a/torch/csrc/api/include/torch/optim/optimizer.h +++ b/torch/csrc/api/include/torch/optim/optimizer.h @@ -31,21 +31,20 @@ class OptimizerBase { virtual ~OptimizerBase() = default; /// Adds the given vector of parameters to the optimizer's parameter list. - /// Override this method if you want to modify the way parameters are added to - /// the `Optimizer`. - virtual void add_parameters(const std::vector<Tensor>& parameters); + void add_parameters(const std::vector<Tensor>& parameters); /// Adds the `ParameterCursor`'s parameters to the optimizer's parameter list. - /// NOTE: Calls the `vector<Tensor>` overload of `add_parameters` -- override - /// that method if you want to modify the behavior of `add_parameters`. - virtual void add_parameters(const ParameterCursor& cursor); + void add_parameters(const ParameterCursor& cursor); /// Zeros out the gradients of all parameters. virtual void zero_grad(); - /// Provides a reference to the parameters this optimizer holds. + /// Provides a const reference to the parameters this optimizer holds. const std::vector<Tensor>& parameters() const noexcept; + /// Provides a reference to the parameters this optimizer holds. + std::vector<Tensor>& parameters() noexcept; + /// Returns the number of parameters referenced by the optimizer. size_t size() const noexcept; diff --git a/torch/csrc/api/src/optim/optimizer.cpp b/torch/csrc/api/src/optim/optimizer.cpp index 9eed489f1c..b57bf67461 100644 --- a/torch/csrc/api/src/optim/optimizer.cpp +++ b/torch/csrc/api/src/optim/optimizer.cpp @@ -36,6 +36,14 @@ void OptimizerBase::zero_grad() { } } +const std::vector<Tensor>& OptimizerBase::parameters() const noexcept { + return parameters_; +} + +std::vector<Tensor>& OptimizerBase::parameters() noexcept { + return parameters_; +} + size_t OptimizerBase::size() const noexcept { return parameters_.size(); } diff --git a/torch/csrc/autograd/anomaly_mode.cpp b/torch/csrc/autograd/anomaly_mode.cpp index 7392286671..edf3476b81 100644 --- a/torch/csrc/autograd/anomaly_mode.cpp +++ b/torch/csrc/autograd/anomaly_mode.cpp @@ -2,6 +2,6 @@ namespace torch { namespace autograd { -bool AnomalyMode::_enabled = 0; +bool AnomalyMode::_enabled = false; }} diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index cb024a0296..a8dc01fa5e 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -63,7 +63,7 @@ struct FunctionTask { FunctionTask(GraphTask* base, std::shared_ptr<Function> fn, InputBuffer inputs) : base(base) - , fn(fn) + , fn(std::move(fn)) , inputs(std::move(inputs)) {} }; @@ -170,15 +170,10 @@ struct GraphTask { } GraphTask(bool keep_graph, bool grad_mode) - : exception() - , has_error(false) + : has_error(false) , outstanding_tasks(0) , keep_graph(keep_graph) , grad_mode(grad_mode) - , mutex() - , not_done() - , not_ready() - , dependencies() , owner(NO_DEVICE) {} }; @@ -194,12 +189,12 @@ auto ReadyQueue::push(FunctionTask item) -> void { auto ReadyQueue::pop() -> FunctionTask { std::unique_lock<std::mutex> lock(mutex); not_empty.wait(lock, [this]{ return !heap.empty(); }); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) auto task = std::move(const_cast<FunctionTask&>(heap.top())); heap.pop(); return task; } -Engine::Engine() : ready_queues() { -} +Engine::Engine() = default; // This Engine's ReadyQueues and their corresponding threads are leaked here Engine::~Engine() = default; @@ -376,6 +371,7 @@ static variable_list call_function(FunctionTask& task) { checkpoint_valid = prev_checkpoint_valid_state; if(has_post_hooks){ + // NOLINTNEXTLINE(bugprone-use-after-move) return call_post_hooks(fn, std::move(outputs), std::move(inputs)); } return outputs; @@ -478,7 +474,7 @@ auto Engine::compute_dependencies(Function* root, GraphTask& task) -> void { // Queue contains all nodes that will start propagating gradients. // We no longer have to expand functions that don't require grad. auto& dependencies = task.dependencies; - while (queue.size() > 0) { + while (!queue.empty()) { auto fn = queue.back(); queue.pop_back(); for (const auto& edge : fn->next_edges()) { if (auto next_ptr = edge.function.get()) { @@ -513,6 +509,7 @@ auto Engine::execute(const edge_list& roots, const edge_list& outputs) -> variable_list { std::call_once(start_threads_flag, &Engine::start_threads, this); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) validate_outputs(roots, const_cast<variable_list&>(inputs), [](const std::string& msg) { return msg; }); @@ -559,6 +556,9 @@ auto Engine::execute(const edge_list& roots, // more callbacks (or they can be registered from other threads // while it's waiting. std::unique_lock<std::mutex> cb_lock(post_callbacks_lock); + // WARNING: Don't use a range-for loop here because more callbacks may be + // added in between callback calls, so iterators may become invalidated. + // NOLINTNEXTLINE(modernize-loop-convert) for (size_t i = 0; i < final_callbacks.size(); ++i) { cb_lock.unlock(); final_callbacks[i](); diff --git a/torch/csrc/autograd/engine.h b/torch/csrc/autograd/engine.h index 94490303cc..0c7a5829da 100644 --- a/torch/csrc/autograd/engine.h +++ b/torch/csrc/autograd/engine.h @@ -67,7 +67,7 @@ protected: }; // allow python_engine to override the default engine when it loads -typedef Engine& (*EngineStub)(void); +using EngineStub = Engine& (*)(); TORCH_API void set_default_engine_stub(EngineStub stub); }} // namespace torch::autograd diff --git a/torch/csrc/autograd/function.cpp b/torch/csrc/autograd/function.cpp index 56ea7f7d29..e077fadcb8 100644 --- a/torch/csrc/autograd/function.cpp +++ b/torch/csrc/autograd/function.cpp @@ -114,14 +114,14 @@ void deleteFunction(Function* function) { delete function; - if (deleteFunctionQueue.size() == 0) { + if (deleteFunctionQueue.empty()) { return; } if (recursion_depth.value() != kDeleteFunctionMaxRecursionDepth) { AT_ERROR("Only one deleter per thread should be able to process " "the delete queue. Please open an issue."); } - while (deleteFunctionQueue.size() > 0) { + while (!deleteFunctionQueue.empty()) { auto queued_function = deleteFunctionQueue.front(); deleteFunctionQueue.pop_front(); delete queued_function; diff --git a/torch/csrc/autograd/grad_mode.cpp b/torch/csrc/autograd/grad_mode.cpp index fc438dfad3..8f5745668a 100644 --- a/torch/csrc/autograd/grad_mode.cpp +++ b/torch/csrc/autograd/grad_mode.cpp @@ -2,7 +2,7 @@ namespace torch { namespace autograd { -thread_local bool GradMode_enabled = 1; +thread_local bool GradMode_enabled = true; bool GradMode::is_enabled() { return GradMode_enabled; diff --git a/torch/csrc/byte_order.h b/torch/csrc/byte_order.h index 0b34730825..9699556c69 100644 --- a/torch/csrc/byte_order.h +++ b/torch/csrc/byte_order.h @@ -1,7 +1,7 @@ #ifndef THP_BYTE_ORDER_H #define THP_BYTE_ORDER_H -#include <stdint.h> +#include <cstdint> #include <stddef.h> #include <THHalf.h> diff --git a/torch/csrc/jit/attributes.h b/torch/csrc/jit/attributes.h index 7199e0ae57..0a9f7e3fd2 100644 --- a/torch/csrc/jit/attributes.h +++ b/torch/csrc/jit/attributes.h @@ -1,6 +1,6 @@ #pragma once #include <vector> -#include <stdint.h> +#include <cstdint> #include <string> #include <memory> #include <vector> @@ -36,7 +36,7 @@ struct ScalarAttributeValue : public AttributeValue { using ConstructorType = T; using ValueType = T; ScalarAttributeValue(Symbol name, ConstructorType value_) - : AttributeValue(name), value_(value_) {} + : AttributeValue(name), value_(std::move(value_)) {} ValueType & value() { return value_; } @@ -222,7 +222,7 @@ private: typename T::ValueType & get(Symbol name) const { JIT_ASSERT(name.is_attr()); auto it = find(name, true); - T* child = dynamic_cast<T*>(it->get()); + auto* child = dynamic_cast<T*>(it->get()); if(child == nullptr) { throw AttributeError(name, true); } diff --git a/torch/csrc/jit/function_schema.h b/torch/csrc/jit/function_schema.h index ee5c5e5ca2..c7b53abf46 100644 --- a/torch/csrc/jit/function_schema.h +++ b/torch/csrc/jit/function_schema.h @@ -18,8 +18,8 @@ struct Argument { bool kwarg_only = false) : name(std::move(name)), type(type? type : DynamicType::get()), - N(N), - default_value(default_value), + N(std::move(N)), + default_value(std::move(default_value)), kwarg_only(kwarg_only) {} std::string name; TypePtr type; diff --git a/torch/csrc/jit/interned_strings.cpp b/torch/csrc/jit/interned_strings.cpp index c1f018e611..bf487484b2 100644 --- a/torch/csrc/jit/interned_strings.cpp +++ b/torch/csrc/jit/interned_strings.cpp @@ -1,5 +1,5 @@ #include "torch/csrc/jit/interned_strings.h" -#include <stdint.h> +#include <cstdint> #include <iostream> #include <mutex> #include <sstream> diff --git a/torch/csrc/jit/interned_strings.h b/torch/csrc/jit/interned_strings.h index 12a74e2b37..3221697183 100644 --- a/torch/csrc/jit/interned_strings.h +++ b/torch/csrc/jit/interned_strings.h @@ -1,6 +1,6 @@ #pragma once #include <vector> -#include <stdint.h> +#include <cstdint> #include <string> #include <unordered_map> #include <algorithm> diff --git a/torch/csrc/jit/interned_strings_class.h b/torch/csrc/jit/interned_strings_class.h index 7b82c8034a..7dbf497d27 100644 --- a/torch/csrc/jit/interned_strings_class.h +++ b/torch/csrc/jit/interned_strings_class.h @@ -1,4 +1,4 @@ -#include <stdint.h> +#include <cstdint> #include <iostream> #include <mutex> #include <sstream> diff --git a/torch/csrc/jit/ir.h b/torch/csrc/jit/ir.h index 4d55392d73..9ac5059a2d 100644 --- a/torch/csrc/jit/ir.h +++ b/torch/csrc/jit/ir.h @@ -150,6 +150,7 @@ public: } Scope* parent = this->parent_; while (!parent->isRoot()) { + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) out = std::string(parent->name_.toUnqualString()) + separator + out; parent = parent->parent_; } @@ -181,7 +182,7 @@ private: std::string unique_name_; TypePtr type_; public: - Value* setType(const TypePtr type); + Value* setType(TypePtr type); void inferTypeFrom(const at::Tensor& output) { setType(CompleteTensorType::create(output)); } @@ -368,7 +369,7 @@ public: } bool hasUses() const { for(auto o : outputs()) { - if(o->uses().size() > 0) + if(!o->uses().empty()) return true; } return false; @@ -890,7 +891,7 @@ public: Graph(std::shared_ptr<Scope> scope_root) : next_unique_(0) , new_node_stage_(0) - , scope_root_(scope_root) + , scope_root_(std::move(scope_root)) , current_scope_(scope_root_.get()) , block_(new Block(this, nullptr)) , insert_before_(return_node()) {} @@ -1261,7 +1262,7 @@ inline Node::Node(Graph * graph_, NodeKind kind_) : inline void Node::eraseOutput(size_t i) { JIT_ASSERT(i < outputs_.size()); - JIT_ASSERT(outputs_[i]->uses().size() == 0); + JIT_ASSERT(outputs_[i]->uses().empty()); schema_ = nullptr; Value * n = outputs_[i]; outputs_.erase(outputs_.begin() + i); @@ -1286,9 +1287,9 @@ inline void Node::eraseBlock(size_t i) { } inline void Node::destroy() { - while(outputs().size() > 0) + while(!outputs().empty()) eraseOutput(outputs().size() - 1); - while(blocks().size() > 0) + while(!blocks().empty()) eraseBlock(blocks().size() - 1); removeAllInputs(); if(inBlockList()) @@ -1422,13 +1423,13 @@ inline Node* Graph::createPythonOp( } inline graph_node_list_iterator Node::iterator() { - return graph_node_list_iterator(this, 0); + return {this, 0}; } inline graph_node_list_iterator Node::reverseIterator() { return iterator().reverse(); } inline const_graph_node_list_iterator Node::iterator() const { - return const_graph_node_list_iterator(this, 0); + return {this, 0}; } inline const_graph_node_list_iterator Node::reverseIterator() const { return iterator().reverse(); diff --git a/torch/csrc/jit/ivalue.h b/torch/csrc/jit/ivalue.h index fff0abdea0..c8475b7ff8 100644 --- a/torch/csrc/jit/ivalue.h +++ b/torch/csrc/jit/ivalue.h @@ -17,8 +17,8 @@ struct TORCH_API ConstantString : c10::intrusive_ptr_target { private: const std::string str_; public: - ConstantString(const std::string & str) - : str_(str) {} + ConstantString(std::string str) + : str_(std::move(str)) {} static c10::intrusive_ptr<ConstantString> create(const std::string str_) { return c10::make_intrusive<ConstantString>(str_); } @@ -88,7 +88,7 @@ struct TORCH_API IValue { c10::raw::intrusive_ptr::decref(as_intrusive_ptr); } } - IValue & operator=(IValue && rhs) & { + IValue & operator=(IValue && rhs) & noexcept { rhs.swap(*this); return *this; } @@ -96,7 +96,7 @@ struct TORCH_API IValue { IValue(rhs).swap(*this); return *this; } - void swap(IValue & rhs) { + void swap(IValue & rhs) noexcept { std::swap(payload, rhs.payload); std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr); std::swap(tag, rhs.tag); diff --git a/torch/csrc/jit/source_location.h b/torch/csrc/jit/source_location.h index ec55ce8f23..6b86391a8f 100644 --- a/torch/csrc/jit/source_location.h +++ b/torch/csrc/jit/source_location.h @@ -17,7 +17,7 @@ struct SourceLocation { void wrapAndRethrowException(const std::exception & e, const std::string & additional = "") { std::stringstream msg; msg << "\n" << e.what() << ":\n"; - if(additional.size() != 0) { + if(!additional.empty()) { msg << additional << ":\n"; } highlight(msg); diff --git a/torch/csrc/jit/source_range.h b/torch/csrc/jit/source_range.h index b84729f5dd..0139c25275 100644 --- a/torch/csrc/jit/source_range.h +++ b/torch/csrc/jit/source_range.h @@ -10,10 +10,10 @@ namespace jit { // range. struct SourceRange : public SourceLocation { SourceRange( - const std::shared_ptr<std::string>& file_, + std::shared_ptr<std::string> file_, size_t start_, size_t end_) - : file_(file_), start_(start_), end_(end_) {} + : file_(std::move(file_)), start_(start_), end_(end_) {} const std::string text() const { return file().substr(start(), end() - start()); } @@ -22,7 +22,7 @@ struct SourceRange : public SourceLocation { } static const size_t CONTEXT = 10; - virtual void highlight(std::ostream& out) const override { + void highlight(std::ostream& out) const override { const std::string& str = file(); size_t begin_line = start(); // beginning of line to highlight size_t end_line = start(); // end of line to highlight @@ -57,7 +57,7 @@ struct SourceRange : public SourceLocation { out << std::string(len, '~') << (len < size() ? "... <--- HERE" : " <--- HERE"); out << str.substr(end_line, end_highlight - end_line); - if (str.size() > 0 && str.back() != '\n') + if (!str.empty() && str.back() != '\n') out << "\n"; } const std::string& file() const { diff --git a/torch/csrc/jit/type.h b/torch/csrc/jit/type.h index ed1a920528..2c9c1d550a 100644 --- a/torch/csrc/jit/type.h +++ b/torch/csrc/jit/type.h @@ -129,7 +129,7 @@ struct TORCH_API DynamicType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static DynamicTypePtr create( T&& ... all ) { - return DynamicTypePtr(new DynamicType( std::forward<T>(all)... )); + return DynamicTypePtr(new DynamicType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { @@ -156,7 +156,7 @@ struct TORCH_API TensorType : public Type { template<typename ... T> static TensorTypePtr create( T&& ... all ) { - return TensorTypePtr(new TensorType( std::forward<T>(all)... )); + return TensorTypePtr(new TensorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } at::ScalarType scalarType() const { return scalar_type_; } @@ -215,15 +215,15 @@ struct TORCH_API CompleteTensorType : public TensorType { friend struct Type; template<typename ... T> static CompleteTensorTypePtr create( T&& ... all ) { - return CompleteTensorTypePtr(new CompleteTensorType( std::forward<T>(all)... )); + return CompleteTensorTypePtr(new CompleteTensorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } // overloaded create variadic template argument as it could not distinguish initializer list static CompleteTensorTypePtr create(at::ScalarType scalar_type, int device, at::IntList sizes) { - return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes)); + return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes)); // NOLINT(modernize-make-shared) } static CompleteTensorTypePtr create(at::ScalarType scalar_type, int device, at::IntList sizes, at::IntList strides) { - return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes, strides)); + return CompleteTensorTypePtr(new CompleteTensorType(scalar_type, device, sizes, strides)); // NOLINT(modernize-make-shared) } static const TypeKind Kind = TypeKind::CompleteTensorType; @@ -295,7 +295,7 @@ private: static std::vector<int64_t> contiguousStridesOf(at::IntList sizes) { std::vector<int64_t> strides(sizes.size()); - if(sizes.size() == 0) // zero-dim case + if(sizes.empty()) // zero-dim case return strides; strides.back() = 1; for(size_t i = strides.size() - 1; i > 0; i--) { @@ -318,7 +318,7 @@ struct TORCH_API ListType : public Type { friend struct Type; template<typename ... T> static ListTypePtr create( T&& ... all ) { - return ListTypePtr(new ListType( std::forward<T>(all)... )); + return ListTypePtr(new ListType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { if(auto rhs_ = rhs.cast<ListType>()) { @@ -340,7 +340,7 @@ struct TORCH_API ListType : public Type { static ListTypePtr ofFloats(); private: ListType(TypePtr elem) - : Type(TypeKind::ListType), elem(elem) {} + : Type(TypeKind::ListType), elem(std::move(elem)) {} static const TypeKind Kind = TypeKind::ListType; TypePtr elem; }; @@ -352,7 +352,7 @@ struct TORCH_API TupleType : public Type { static constexpr bool is_singleton = false; friend struct Type; static TupleTypePtr create(std::vector<TypePtr> types) { - return TupleTypePtr(new TupleType( std::move(types) )); + return TupleTypePtr(new TupleType( std::move(types) )); // NOLINT(modernize-make-shared) } at::ArrayRef<TypePtr> elements() const { return elements_; @@ -408,7 +408,7 @@ struct TORCH_API NumberType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static NumberTypePtr create( T&& ... all ) { - return NumberTypePtr(new NumberType( std::forward<T>(all)... )); + return NumberTypePtr(new NumberType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); @@ -431,7 +431,7 @@ struct TORCH_API FloatType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static FloatTypePtr create( T&& ... all ) { - return FloatTypePtr(new FloatType( std::forward<T>(all)... )); + return FloatTypePtr(new FloatType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); @@ -457,7 +457,7 @@ struct TORCH_API IntType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static IntTypePtr create( T&& ... all ) { - return IntTypePtr(new IntType( std::forward<T>(all)... )); + return IntTypePtr(new IntType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); @@ -483,7 +483,7 @@ struct TORCH_API StringType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static StringTypePtr create( T&& ... all ) { - return StringTypePtr(new StringType( std::forward<T>(all)... )); + return StringTypePtr(new StringType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); @@ -509,12 +509,12 @@ struct NoneType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static NoneTypePtr create( T&& ... all ) { - return NoneTypePtr(new NoneType( std::forward<T>(all)... )); + return NoneTypePtr(new NoneType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } - virtual bool operator==(const Type& rhs) const override { + bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); } - virtual std::string str() const override { + std::string str() const override { return "None"; } static const TypeKind Kind = TypeKind::NoneType; @@ -531,12 +531,12 @@ struct GeneratorType : public Type { static constexpr bool is_singleton = true; template<typename ... T> static GeneratorTypePtr create( T&& ... all) { - return GeneratorTypePtr(new GeneratorType( std::forward<T>(all)... )); + return GeneratorTypePtr(new GeneratorType( std::forward<T>(all)... )); // NOLINT(modernize-make-shared) } - virtual bool operator==(const Type& rhs) const override { + bool operator==(const Type& rhs) const override { return rhs.kind() == kind(); } - virtual std::string str() const override { + std::string str() const override { return "Generator"; } static const TypeKind Kind = TypeKind::GeneratorType; diff --git a/torch/csrc/utils/object_ptr.h b/torch/csrc/utils/object_ptr.h index 14991b8779..8a3b362d20 100644 --- a/torch/csrc/utils/object_ptr.h +++ b/torch/csrc/utils/object_ptr.h @@ -6,16 +6,16 @@ template<class T> class THPPointer { public: THPPointer(): ptr(nullptr) {}; - explicit THPPointer(T *ptr): ptr(ptr) {}; - THPPointer(THPPointer &&p) { free(); ptr = p.ptr; p.ptr = nullptr; }; + explicit THPPointer(T *ptr) noexcept : ptr(ptr) {}; + THPPointer(THPPointer &&p) noexcept { free(); ptr = p.ptr; p.ptr = nullptr; }; ~THPPointer() { free(); }; T * get() { return ptr; } const T * get() const { return ptr; } T * release() { T *tmp = ptr; ptr = nullptr; return tmp; } operator T*() { return ptr; } - THPPointer& operator =(T *new_ptr) { free(); ptr = new_ptr; return *this; } - THPPointer& operator =(THPPointer &&p) { free(); ptr = p.ptr; p.ptr = nullptr; return *this; } + THPPointer& operator =(T *new_ptr) noexcept { free(); ptr = new_ptr; return *this; } + THPPointer& operator =(THPPointer &&p) noexcept { free(); ptr = p.ptr; p.ptr = nullptr; return *this; } T * operator ->() { return ptr; } explicit operator bool() const { return ptr != nullptr; } @@ -35,4 +35,4 @@ private: * out the GIL itself. Easiest way to avoid this problem is to * not use THPPointer in this situation. */ -typedef THPPointer<PyObject> THPObjectPtr; +using THPObjectPtr = THPPointer<PyObject>; diff --git a/torch/csrc/utils/python_numbers.h b/torch/csrc/utils/python_numbers.h index 729c62b65e..69f952bc6e 100644 --- a/torch/csrc/utils/python_numbers.h +++ b/torch/csrc/utils/python_numbers.h @@ -1,7 +1,7 @@ #pragma once #include "torch/csrc/python_headers.h" -#include <stdint.h> +#include <cstdint> #include <stdexcept> #include "torch/csrc/Exceptions.h" #include "torch/csrc/utils/tensor_numpy.h" diff --git a/torch/csrc/utils/python_stub.h b/torch/csrc/utils/python_stub.h index 485b3b5f9e..336c530d2b 100644 --- a/torch/csrc/utils/python_stub.h +++ b/torch/csrc/utils/python_stub.h @@ -1,4 +1,4 @@ #pragma once struct _object; -typedef _object PyObject; +using PyObject = _object; |