diff options
author | Gu, Jinghui <jinghui.gu@intel.com> | 2019-02-22 10:32:07 -0800 |
---|---|---|
committer | Facebook Github Bot <facebook-github-bot@users.noreply.github.com> | 2019-02-22 10:47:53 -0800 |
commit | 60de0b885f031b4e30f9c068932137148e29744e (patch) | |
tree | 2059b3eabb89573e73858906f857eb97f51e44bf | |
parent | 4778a4089ece23e1ead0e414d1b4d206ce60a192 (diff) | |
download | pytorch-60de0b885f031b4e30f9c068932137148e29744e.tar.gz pytorch-60de0b885f031b4e30f9c068932137148e29744e.tar.bz2 pytorch-60de0b885f031b4e30f9c068932137148e29744e.zip |
fallback operators to CPU for onnx support (#15270)
Summary:
fallback operators to CPU for onnx support
Pull Request resolved: https://github.com/pytorch/pytorch/pull/15270
Differential Revision: D14099496
Pulled By: yinghai
fbshipit-source-id: 52b744aa5917700a802bdf19f7007cdcaa6e640a
-rw-r--r-- | caffe2/ideep/operators/operator_fallback_ideep.cc | 71 | ||||
-rw-r--r-- | caffe2/ideep/utils/ideep_operator.h | 8 |
2 files changed, 71 insertions, 8 deletions
diff --git a/caffe2/ideep/operators/operator_fallback_ideep.cc b/caffe2/ideep/operators/operator_fallback_ideep.cc index 864605f8aa..04e87a7c75 100644 --- a/caffe2/ideep/operators/operator_fallback_ideep.cc +++ b/caffe2/ideep/operators/operator_fallback_ideep.cc @@ -1,11 +1,14 @@ #include <caffe2/ideep/operators/operator_fallback_ideep.h> #include <caffe2/ideep/utils/ideep_operator.h> +#include <caffe2/operators/abs_op.h> +#include <caffe2/operators/atan_op.h> #include <caffe2/operators/accuracy_op.h> #include <caffe2/operators/affine_channel_op.h> #include <caffe2/operators/batch_matmul_op.h> #include "caffe2/operators/bbox_transform_op.h" #include "caffe2/operators/box_with_nms_limit_op.h" +#include <caffe2/operators/cast_op.h> #include <caffe2/operators/clip_op.h> #include <caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.h> #include <caffe2/operators/cross_entropy_op.h> @@ -17,6 +20,7 @@ #include <caffe2/operators/elementwise_div_op.h> #include <caffe2/operators/elementwise_mul_op.h> #include <caffe2/operators/elementwise_ops.h> +#include <caffe2/operators/elementwise_sub_op.h> #include <caffe2/operators/expand_op.h> #include <caffe2/operators/filler_op.h> #include <caffe2/operators/flatten_op.h> @@ -32,12 +36,15 @@ #include <caffe2/operators/roi_align_op.h> #include <caffe2/operators/roi_align_rotated_op.h> #include <caffe2/operators/scale_op.h> +#include <caffe2/operators/slice_op.h> +#include <caffe2/operators/sqrt_op.h> #include <caffe2/operators/softmax_op.h> #include <caffe2/operators/softmax_with_loss_op.h> #include <caffe2/operators/stop_gradient.h> #include <caffe2/operators/tanh_op.h> #include <caffe2/operators/tensor_protos_db_input.h> #include <caffe2/operators/transpose_op.h> +#include <caffe2/operators/utility_ops.h> #include <caffe2/queue/queue_ops.h> #include <caffe2/sgd/iter_op.h> #include <caffe2/sgd/learning_rate_op.h> @@ -54,6 +61,14 @@ // can add more non-IDEEP operators if needed namespace caffe2 { +// Boolean operators +REGISTER_IDEEP_COMPARE_OPERATOR(EQ); +REGISTER_IDEEP_COMPARE_OPERATOR(GT); +REGISTER_IDEEP_COMPARE_OPERATOR(GE); +REGISTER_IDEEP_COMPARE_OPERATOR(LT); +REGISTER_IDEEP_COMPARE_OPERATOR(LE); +REGISTER_IDEEP_COMPARE_OPERATOR(NE); + REGISTER_IDEEP_OPERATOR(Softmax, IDEEPFallbackOp<SoftmaxOp<float, CPUContext>>); REGISTER_IDEEP_OPERATOR( LabelCrossEntropy, @@ -64,6 +79,14 @@ REGISTER_IDEEP_OPERATOR( REGISTER_IDEEP_OPERATOR(Flatten, IDEEPFallbackOp<FlattenOp<CPUContext>>); REGISTER_IDEEP_OPERATOR(ResizeLike, IDEEPFallbackOp<ResizeLikeOp<CPUContext>>); REGISTER_IDEEP_OPERATOR(Transpose, IDEEPFallbackOp<TransposeOp<CPUContext>>); +REGISTER_IDEEP_OPERATOR(Slice, IDEEPFallbackOp<SliceOp<CPUContext>>); +REGISTER_IDEEP_OPERATOR(Clip, IDEEPFallbackOp<ClipOp<float, CPUContext>>); +REGISTER_IDEEP_OPERATOR( + ScatterAssign, + IDEEPFallbackOp<ScatterAssignOp<CPUContext>>); +REGISTER_IDEEP_OPERATOR( + Cast, + IDEEPFallbackOp<CastOp<CPUContext>>); // filter operators REGISTER_IDEEP_OPERATOR( @@ -81,6 +104,22 @@ REGISTER_IDEEP_OPERATOR( REGISTER_IDEEP_OPERATOR( GivenTensorFill, IDEEPFallbackOp<GivenTensorFillOp<float, CPUContext>>); +// Not supported tensor types in below FillOp +REGISTER_IDEEP_OPERATOR( + GivenTensorDoubleFill, + IDEEPFallbackOp<GivenTensorFillOp<double, CPUContext>, SkipIndices<0>>); +REGISTER_IDEEP_OPERATOR( + GivenTensorBoolFill, + IDEEPFallbackOp<GivenTensorFillOp<bool, CPUContext>, SkipIndices<0>>); +REGISTER_IDEEP_OPERATOR( + GivenTensorIntFill, + IDEEPFallbackOp<GivenTensorFillOp<int, CPUContext>, SkipIndices<0>>); +REGISTER_IDEEP_OPERATOR( + GivenTensorInt64Fill, + IDEEPFallbackOp<GivenTensorFillOp<int64_t, CPUContext>, SkipIndices<0>>); +REGISTER_IDEEP_OPERATOR( + GivenTensorStringFill, + IDEEPFallbackOp<GivenTensorFillOp<std::string, CPUContext>, SkipIndices<0>>); REGISTER_IDEEP_OPERATOR(Load, IDEEPFallbackOp<LoadOp<CPUContext>>); REGISTER_IDEEP_OPERATOR(Save, IDEEPFallbackOp<SaveOp<CPUContext>>); @@ -143,10 +182,30 @@ REGISTER_IDEEP_OPERATOR( LearningRate, IDEEPFallbackOp<LearningRateOp<float, CPUContext>>); REGISTER_IDEEP_OPERATOR( + Abs, + IDEEPFallbackOp<UnaryElementwiseOp< + TensorTypes<float>, CPUContext, AbsFunctor<CPUContext>>>); +REGISTER_IDEEP_OPERATOR( + Atan, + IDEEPFallbackOp<UnaryElementwiseOp< + TensorTypes<float>, CPUContext, AtanFunctor<CPUContext>>>); +REGISTER_IDEEP_OPERATOR( + Sqrt, + IDEEPFallbackOp<UnaryElementwiseOp< + TensorTypes<float>, CPUContext, SqrtFunctor<CPUContext>>>); +REGISTER_IDEEP_OPERATOR( + Div, + IDEEPFallbackOp<BinaryElementwiseOp< + NumericTypes, CPUContext, DivFunctor<CPUContext>>>); +REGISTER_IDEEP_OPERATOR( Mul, IDEEPFallbackOp< BinaryElementwiseOp<NumericTypes, CPUContext, MulFunctor<CPUContext>>>); REGISTER_IDEEP_OPERATOR( + Sub, + IDEEPFallbackOp<BinaryElementwiseOp< + NumericTypes, CPUContext, SubFunctor<CPUContext>>>); +REGISTER_IDEEP_OPERATOR( Tanh, IDEEPFallbackOp<UnaryElementwiseOp< TensorTypes<float>, @@ -210,18 +269,14 @@ REGISTER_IDEEP_OPERATOR( TensorTypes<std::int32_t, std::int64_t, float, double>, CPUContext, SumReducer<CPUContext>>>); - +REGISTER_IDEEP_OPERATOR( + ReduceMean, + IDEEPFallbackOp<ReduceOp< + TensorTypes<float>, CPUContext, MeanReducer<CPUContext>>>); REGISTER_IDEEP_OPERATOR( BatchMatMul, IDEEPFallbackOp<BatchMatMulOp<CPUContext>>); -REGISTER_IDEEP_OPERATOR( - Div, - IDEEPFallbackOp< - BinaryElementwiseOp<NumericTypes, CPUContext, DivFunctor<CPUContext>>>); - -REGISTER_IDEEP_OPERATOR(Clip, IDEEPFallbackOp<ClipOp<float, CPUContext>>); - #ifdef CAFFE2_USE_GLOO namespace gloo { // gloo operators diff --git a/caffe2/ideep/utils/ideep_operator.h b/caffe2/ideep/utils/ideep_operator.h index f9b6a83106..e21aa56274 100644 --- a/caffe2/ideep/utils/ideep_operator.h +++ b/caffe2/ideep/utils/ideep_operator.h @@ -18,6 +18,14 @@ C10_DECLARE_REGISTRY( C10_REGISTER_CLASS(IDEEPOperatorRegistry, name, __VA_ARGS__) #define REGISTER_IDEEP_OPERATOR_STR(str_name, ...) \ C10_REGISTER_TYPED_CLASS(IDEEPOperatorRegistry, str_name, __VA_ARGS__) +#define REGISTER_IDEEP_COMPARE_OPERATOR(Op) \ + REGISTER_IDEEP_OPERATOR( \ + Op, \ + IDEEPFallbackOp<BinaryElementwiseOp< \ + TensorTypes<bool, int32_t, int64_t, float, double>, \ + CPUContext, \ + Op##Functor<CPUContext>, \ + FixedType<bool>>>) #define REGISTER_IDEEP_OPERATOR_WITH_ENGINE(name, engine, ...) \ C10_REGISTER_CLASS(IDEEPOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__) |