diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-07-30 11:32:26 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-07-30 11:32:26 +0900 |
commit | 05e0ec30a632339a8533082476f27bda31ccde16 (patch) | |
tree | 5f220ac83084fe133ffb08a6a17e99f9bb36ec1c /runtime/onert/backend/cpu/ops/SoftMaxLayer.h | |
parent | e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (diff) | |
download | nnfw-05e0ec30a632339a8533082476f27bda31ccde16.tar.gz nnfw-05e0ec30a632339a8533082476f27bda31ccde16.tar.bz2 nnfw-05e0ec30a632339a8533082476f27bda31ccde16.zip |
Imported Upstream version 1.7.0upstream/1.7.0
Diffstat (limited to 'runtime/onert/backend/cpu/ops/SoftMaxLayer.h')
-rw-r--r-- | runtime/onert/backend/cpu/ops/SoftMaxLayer.h | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/runtime/onert/backend/cpu/ops/SoftMaxLayer.h b/runtime/onert/backend/cpu/ops/SoftMaxLayer.h new file mode 100644 index 000000000..d0c704c2c --- /dev/null +++ b/runtime/onert/backend/cpu/ops/SoftMaxLayer.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ +#define __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ + +#include <backend/IPortableTensor.h> + +#include <exec/IFunction.h> + +namespace onert +{ +namespace backend +{ +namespace cpu +{ +namespace ops +{ + +class SoftMaxLayer : public ::onert::exec::IFunction +{ +public: + SoftMaxLayer(); + +public: + void softmaxFloat32(); + + void softmaxQuant8(); + + void configure(const IPortableTensor *input, const float beta, IPortableTensor *output); + + void run() override; + +private: + const IPortableTensor *_input; + IPortableTensor *_output; + + float _beta; +}; + +} // namespace ops +} // namespace cpu +} // namespace backend +} // namespace onert + +#endif // __ONERT_BACKEND_CPU_OPS_SOFTMAXLAYER_H__ |