summaryrefslogtreecommitdiff
path: root/runtimes/logging
diff options
context:
space:
mode:
author서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 <sangmin7.seo@samsung.com>2018-04-27 18:42:03 +0900
committerGitHub Enterprise <noreply-CODE@samsung.com>2018-04-27 18:42:03 +0900
commit970b693b63843d0cce77dedb7ca7c133de73deee (patch)
treead6ae7ea42654238514e079e2f13ea1866dec613 /runtimes/logging
parentf204957b1ceb30acc444732b9d0ed767838625c4 (diff)
downloadnnfw-970b693b63843d0cce77dedb7ca7c133de73deee.tar.gz
nnfw-970b693b63843d0cce77dedb7ca7c133de73deee.tar.bz2
nnfw-970b693b63843d0cce77dedb7ca7c133de73deee.zip
Rename src as runtimes (#956)
For issue #831 and #925 This patch renames `src` as `runtimes`. Signed-off-by: Sangmin Seo <sangmin7.seo@samsung.com>
Diffstat (limited to 'runtimes/logging')
-rw-r--r--runtimes/logging/CMakeLists.txt5
-rw-r--r--runtimes/logging/include/operand.def12
-rw-r--r--runtimes/logging/include/operation.def14
-rw-r--r--runtimes/logging/src/nnapi_logging.cc381
4 files changed, 412 insertions, 0 deletions
diff --git a/runtimes/logging/CMakeLists.txt b/runtimes/logging/CMakeLists.txt
new file mode 100644
index 000000000..2df3e90f5
--- /dev/null
+++ b/runtimes/logging/CMakeLists.txt
@@ -0,0 +1,5 @@
+file(GLOB_RECURSE NNAPI_LOGGING_SRCS "src/*.cc")
+
+add_library(neuralnetworks SHARED ${NNAPI_LOGGING_SRCS})
+target_include_directories(neuralnetworks PUBLIC ${NNAPI_INCLUDE_DIR})
+target_include_directories(neuralnetworks PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
diff --git a/runtimes/logging/include/operand.def b/runtimes/logging/include/operand.def
new file mode 100644
index 000000000..c570cf026
--- /dev/null
+++ b/runtimes/logging/include/operand.def
@@ -0,0 +1,12 @@
+// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h
+//
+// NNAPI_OPERAND(NAME, CODE)
+#ifndef NNAPI_OPERAND
+#error NNAPI_OPERAND should be defined
+#endif
+NNAPI_OPERAND(ANEURALNETWORKS_FLOAT32, 0)
+NNAPI_OPERAND(ANEURALNETWORKS_INT32, 1)
+NNAPI_OPERAND(ANEURALNETWORKS_UINT32, 2)
+NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_FLOAT32, 3)
+NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_INT32, 4)
+NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 5)
diff --git a/runtimes/logging/include/operation.def b/runtimes/logging/include/operation.def
new file mode 100644
index 000000000..32e684daf
--- /dev/null
+++ b/runtimes/logging/include/operation.def
@@ -0,0 +1,14 @@
+// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h
+//
+// NNAPI_OPERATION(NAME, CODE)
+#ifndef NNAPI_OPERATION
+#error NNAPI_OPERATION should be defined
+#endif
+NNAPI_OPERATION(ANEURALNETWORKS_AVERAGE_POOL_2D, 1)
+NNAPI_OPERATION(ANEURALNETWORKS_CONCATENATION, 2)
+NNAPI_OPERATION(ANEURALNETWORKS_CONV_2D, 3)
+NNAPI_OPERATION(ANEURALNETWORKS_DEPTHWISE_CONV_2D, 4)
+NNAPI_OPERATION(ANEURALNETWORKS_FULLY_CONNECTED, 9)
+NNAPI_OPERATION(ANEURALNETWORKS_MAX_POOL_2D, 17)
+NNAPI_OPERATION(ANEURALNETWORKS_RESHAPE, 22)
+NNAPI_OPERATION(ANEURALNETWORKS_SOFTMAX, 25)
diff --git a/runtimes/logging/src/nnapi_logging.cc b/runtimes/logging/src/nnapi_logging.cc
new file mode 100644
index 000000000..978412584
--- /dev/null
+++ b/runtimes/logging/src/nnapi_logging.cc
@@ -0,0 +1,381 @@
+#include <nnapi.h>
+
+#include <stdexcept>
+#include <iostream>
+
+#include <string>
+#include <map>
+
+#include <cassert>
+
+#include <boost/format.hpp>
+
+namespace
+{
+
+class OperationCodeResolver
+{
+public:
+ OperationCodeResolver();
+
+public:
+ std::string resolve(int code) const;
+
+private:
+ void setName(int code, const std::string &name);
+
+private:
+ std::map<int, std::string> _table;
+
+public:
+ static const OperationCodeResolver &access()
+ {
+ static const OperationCodeResolver resolver;
+
+ return resolver;
+ }
+};
+
+OperationCodeResolver::OperationCodeResolver()
+{
+#define NNAPI_OPERATION(NAME, CODE) setName(CODE, #NAME);
+#include "operation.def"
+#undef NNAPI_OPERATION
+}
+
+void OperationCodeResolver::setName(int code, const std::string &name)
+{
+ assert(_table.find(code) == _table.end());
+ _table[code] = name;
+}
+
+std::string OperationCodeResolver::resolve(int code) const
+{
+ auto it = _table.find(code);
+
+ if (it == _table.end())
+ {
+ return boost::str(boost::format("unknown(%d)") % code);
+ }
+
+ return it->second;
+}
+
+class OperandCodeResolver
+{
+public:
+ OperandCodeResolver();
+
+public:
+ std::string resolve(int code) const;
+
+private:
+ void setName(int code, const std::string &name);
+
+private:
+ std::map<int, std::string> _table;
+
+public:
+ static const OperandCodeResolver &access()
+ {
+ static const OperandCodeResolver resolver;
+
+ return resolver;
+ }
+};
+
+OperandCodeResolver::OperandCodeResolver()
+{
+#define NNAPI_OPERAND(NAME, CODE) setName(CODE, #NAME);
+#include "operand.def"
+#undef NNAPI_OPERAND
+}
+
+void OperandCodeResolver::setName(int code, const std::string &name)
+{
+ assert(_table.find(code) == _table.end());
+ _table[code] = name;
+}
+
+std::string OperandCodeResolver::resolve(int code) const
+{
+ auto it = _table.find(code);
+
+ if (it == _table.end())
+ {
+ return boost::str(boost::format("unknown(%d)") % code);
+ }
+
+ return it->second;
+}
+}
+
+//
+// Asynchronous Event
+//
+struct ANeuralNetworksEvent
+{
+};
+
+ResultCode ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
+{
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksEvent_free(ANeuralNetworksEvent *event)
+{
+ delete event;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+//
+// Memory
+//
+struct ANeuralNetworksMemory
+{
+ // 1st approach - Store all the data inside ANeuralNetworksMemory object
+ // 2nd approach - Store metadata only, and defer data loading as much as possible
+};
+
+ResultCode ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
+ ANeuralNetworksMemory **memory)
+{
+ std::cout << __FUNCTION__ << "()" << std::endl;
+ *memory = new ANeuralNetworksMemory;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory)
+{
+ delete memory;
+ std::cout << __FUNCTION__ << "(" << memory << ")" << std::endl;
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+//
+// Model
+//
+struct ANeuralNetworksModel
+{
+ // ANeuralNetworksModel should be a factory for Graph IR (a.k.a ISA Frontend)
+ // TODO Record # of operands
+ uint32_t numOperands;
+
+ ANeuralNetworksModel() : numOperands(0)
+ {
+ // DO NOTHING
+ }
+};
+
+ResultCode ANeuralNetworksModel_create(ANeuralNetworksModel **model)
+{
+ std::cout << __FUNCTION__ << "(" << model << ")" << std::endl;
+
+ *model = new ANeuralNetworksModel;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_free(ANeuralNetworksModel *model)
+{
+ std::cout << __FUNCTION__ << "(" << model << ")" << std::endl;
+
+ delete model;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
+ const ANeuralNetworksOperandType *type)
+{
+ std::cout << __FUNCTION__ << "(model: " << model
+ << ", type: " << ::OperandCodeResolver::access().resolve(type->type) << ")"
+ << std::endl;
+
+ auto id = model->numOperands;
+
+ std::cout << " id: " << id << std::endl;
+ std::cout << " rank: " << type->dimensionCount << std::endl;
+ for (uint32_t dim = 0; dim < type->dimensionCount; ++dim)
+ {
+ std::cout << " dim(" << dim << "): " << type->dimensions[dim] << std::endl;
+ }
+
+ model->numOperands += 1;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
+ const void *buffer, size_t length)
+{
+ std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl;
+
+ // TODO Implement this!
+ // NOTE buffer becomes invalid after ANeuralNetworksModel_setOperandValue returns
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
+ int32_t index,
+ const ANeuralNetworksMemory *memory,
+ size_t offset, size_t length)
+{
+ std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl;
+
+ // TODO Implement this!
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
+ ANeuralNetworksOperationType type, uint32_t inputCount,
+ const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ std::cout << __FUNCTION__ << "(model: " << model
+ << ", type: " << ::OperationCodeResolver::access().resolve(type)
+ << ", inputCount: " << inputCount << ", outputCount: " << outputCount << ")"
+ << std::endl;
+
+ for (uint32_t input = 0; input < inputCount; ++input)
+ {
+ std::cout << " input(" << input << "): " << inputs[input] << std::endl;
+ }
+ for (uint32_t output = 0; output < outputCount; ++output)
+ {
+ std::cout << " output(" << output << "): " << outputs[output] << std::endl;
+ }
+
+ // TODO Implement this!
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model,
+ uint32_t inputCount,
+ const uint32_t *inputs,
+ uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
+
+ for (uint32_t input = 0; input < inputCount; ++input)
+ {
+ std::cout << " input(" << input << "): " << inputs[input] << std::endl;
+ }
+ for (uint32_t output = 0; output < outputCount; ++output)
+ {
+ std::cout << " output(" << output << "): " << outputs[output] << std::endl;
+ }
+
+ // TODO Implement this!
+ // NOTE It seems that this function identifies the input and output of the whole model
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
+{
+ std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
+
+ // TODO Implement this!
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+//
+// Compilation
+//
+struct ANeuralNetworksCompilation
+{
+ // ANeuralNetworksCompilation should hold a compiled IR
+};
+
+ResultCode ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
+ ANeuralNetworksCompilation **compilation)
+{
+ std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
+
+ *compilation = new ANeuralNetworksCompilation;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
+{
+ std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+//
+// Execution
+//
+struct ANeuralNetworksExecution
+{
+ // ANeuralNetworksExecution corresponds to NPU::Interp::Session
+};
+
+ResultCode ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
+ ANeuralNetworksExecution **execution)
+{
+ std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl;
+
+ *execution = new ANeuralNetworksExecution;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+// ANeuralNetworksExecution_setInput and ANeuralNetworksExecution_setOutput specify HOST buffer for
+// input/output
+ResultCode ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type,
+ const void *buffer, size_t length)
+{
+ std::cout << __FUNCTION__ << "(execution: " << execution << ", type: ";
+
+ if (type == nullptr)
+ std::cout << "nullptr)" << std::endl;
+ else
+ std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl;
+
+ // Q: Should we transfer input from HOST to DEVICE here, or in
+ // ANeuralNetworksExecution_startCompute?
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
+ const ANeuralNetworksOperandType *type, void *buffer,
+ size_t length)
+{
+ std::cout << __FUNCTION__ << "(execution: " << execution << ", type: ";
+
+ if (type == nullptr)
+ std::cout << "nullptr)" << std::endl;
+ else
+ std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
+ ANeuralNetworksEvent **event)
+{
+ std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl;
+
+ *event = new ANeuralNetworksEvent;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
+
+ResultCode ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution)
+{
+ std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl;
+
+ delete execution;
+
+ return ANEURALNETWORKS_NO_ERROR;
+}