#include "caffe2/operators/sinh_op.h" #include #include namespace caffe2 { template <> template bool SinhGradientFunctor::Forward( const std::vector& /* dY_dims */, const std::vector& X_dims, const T* dY, const T* X, T* dX, CPUContext* /* context */) const { const int size = std::accumulate( X_dims.cbegin(), X_dims.cend(), 1, std::multiplies()); ConstEigenVectorArrayMap dY_arr(dY, size); ConstEigenVectorArrayMap X_arr(X, size); EigenVectorMap(dX, size) = dY_arr * (X_arr.exp() + (-X_arr).exp()) / 2; return true; } REGISTER_CPU_OPERATOR( Sinh, UnaryElementwiseOp< TensorTypes, CPUContext, SinhFunctor>); REGISTER_CPU_OPERATOR( SinhGradient, BinaryElementwiseOp< TensorTypes, CPUContext, SinhGradientFunctor>); OPERATOR_SCHEMA(Sinh) .NumInputs(1) .NumOutputs(1) .IdenticalTypeAndShape() .SetDoc(R"DOC( Calculates the hyperbolic sine of the given input tensor, element-wise. Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sinh_op.cc
Example **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Sinh", ["X"], ["Y"] ) workspace.FeedBlob("X", np.random.rand(5).astype(np.float32)) print("X:", workspace.FetchBlob("X")) workspace.RunOperatorOnce(op) print("Y:", workspace.FetchBlob("Y")) ``` **Result** ``` X: [0.98907769 0.52907848 0.03216429 0.94983935 0.47881418] Y: [1.15841695 0.5541099 0.03216984 1.09924557 0.49732079] ```
)DOC") .Input(0, "input", "Input tensor") .Output( 0, "output", "The hyperbolic sine values of the input tensor, computed " "element-wise") .InheritOnnxSchema(); OPERATOR_SCHEMA(SinhGradient) .NumInputs(2) .NumOutputs(1) .IdenticalTypeAndShape(); namespace { class GetSinhGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; std::vector GetGradientDefs() override { return SingleGradientDef( "SinhGradient", "", std::vector{GO(0), I(0)}, std::vector{GI(0)}); } }; } // namespace REGISTER_GRADIENT(Sinh, GetSinhGradient); } // namespace caffe2