summaryrefslogtreecommitdiff
path: root/tests/nnapi/include/NeuralNetworksWrapper.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/nnapi/include/NeuralNetworksWrapper.h')
-rw-r--r--tests/nnapi/include/NeuralNetworksWrapper.h208
1 files changed, 37 insertions, 171 deletions
diff --git a/tests/nnapi/include/NeuralNetworksWrapper.h b/tests/nnapi/include/NeuralNetworksWrapper.h
index af19008fe..71468e212 100644
--- a/tests/nnapi/include/NeuralNetworksWrapper.h
+++ b/tests/nnapi/include/NeuralNetworksWrapper.h
@@ -20,17 +20,10 @@
#ifndef __NNFW_RT_NEURAL_NETWORKS_WRAPPER_H__
#define __NNFW_RT_NEURAL_NETWORKS_WRAPPER_H__
-// Fix for onert:
-// NeuralNetworks.h => NeuralNetworksShim.h
-// Additional include NeuralNetworksExShim.h
#include "NeuralNetworksShim.h"
#include "NeuralNetworksExShim.h"
#include <math.h>
-// Fix for onert: use boost::optional instead of std::optional
-// TODO in onert: introduce and use internal optional library
-#include <boost/optional.hpp>
-#include <string>
#include <vector>
namespace nnfw {
@@ -44,14 +37,6 @@ enum class Type {
TENSOR_FLOAT32 = ANEURALNETWORKS_TENSOR_FLOAT32,
TENSOR_INT32 = ANEURALNETWORKS_TENSOR_INT32,
TENSOR_QUANT8_ASYMM = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
- BOOL = ANEURALNETWORKS_BOOL,
- TENSOR_QUANT16_SYMM = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
- TENSOR_FLOAT16 = ANEURALNETWORKS_TENSOR_FLOAT16,
- TENSOR_BOOL8 = ANEURALNETWORKS_TENSOR_BOOL8,
- FLOAT16 = ANEURALNETWORKS_FLOAT16,
- TENSOR_QUANT8_SYMM_PER_CHANNEL = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
- TENSOR_QUANT16_ASYMM = ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
- TENSOR_QUANT8_SYMM = ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
};
enum class ExecutePreference {
@@ -66,102 +51,32 @@ enum class Result {
INCOMPLETE = ANEURALNETWORKS_INCOMPLETE,
UNEXPECTED_NULL = ANEURALNETWORKS_UNEXPECTED_NULL,
BAD_DATA = ANEURALNETWORKS_BAD_DATA,
- OP_FAILED = ANEURALNETWORKS_OP_FAILED,
- UNMAPPABLE = ANEURALNETWORKS_UNMAPPABLE,
- BAD_STATE = ANEURALNETWORKS_BAD_STATE,
- OUTPUT_INSUFFICIENT_SIZE = ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE,
- UNAVAILABLE_DEVICE = ANEURALNETWORKS_UNAVAILABLE_DEVICE,
-};
-
-struct SymmPerChannelQuantParams {
- ANeuralNetworksSymmPerChannelQuantParams params;
- std::vector<float> scales;
-
- SymmPerChannelQuantParams(std::vector<float> scalesVec, uint32_t channelDim)
- : scales(std::move(scalesVec)) {
- params = {
- .channelDim = channelDim,
- .scaleCount = static_cast<uint32_t>(scales.size()),
- .scales = scales.size() > 0 ? scales.data() : nullptr,
- };
- }
-
- SymmPerChannelQuantParams(const SymmPerChannelQuantParams& other)
- : params(other.params), scales(other.scales) {
- params.scales = scales.size() > 0 ? scales.data() : nullptr;
- }
-
- SymmPerChannelQuantParams& operator=(const SymmPerChannelQuantParams& other) {
- if (this != &other) {
- params = other.params;
- scales = other.scales;
- params.scales = scales.size() > 0 ? scales.data() : nullptr;
- }
- return *this;
- }
};
struct OperandType {
ANeuralNetworksOperandType operandType;
+ // int32_t type;
std::vector<uint32_t> dimensions;
- // Fix for onert:
- // Use boost::optional instead of std::optional
- // Default value: std::nullopt -> boost::none
- boost::optional<SymmPerChannelQuantParams> channelQuant;
-
- OperandType(const OperandType& other)
- : operandType(other.operandType),
- dimensions(other.dimensions),
- channelQuant(other.channelQuant) {
- operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr;
- }
-
- OperandType& operator=(const OperandType& other) {
- if (this != &other) {
- operandType = other.operandType;
- dimensions = other.dimensions;
- channelQuant = other.channelQuant;
- operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr;
- }
- return *this;
- }
- OperandType(Type type, std::vector<uint32_t> d, float scale = 0.0f, int32_t zeroPoint = 0)
- : dimensions(std::move(d)), channelQuant(boost::none) {
- operandType = {
- .type = static_cast<int32_t>(type),
- .dimensionCount = static_cast<uint32_t>(dimensions.size()),
- .dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr,
- .scale = scale,
- .zeroPoint = zeroPoint,
- };
- }
+ OperandType(Type type, const std::vector<uint32_t>& d, float scale = 0.0f,
+ int32_t zeroPoint = 0)
+ : dimensions(d) {
+ operandType.type = static_cast<int32_t>(type);
+ operandType.scale = scale;
+ operandType.zeroPoint = zeroPoint;
- OperandType(Type type, std::vector<uint32_t> data, float scale, int32_t zeroPoint,
- SymmPerChannelQuantParams&& channelQuant)
- : dimensions(std::move(data)), channelQuant(std::move(channelQuant)) {
- operandType = {
- .type = static_cast<int32_t>(type),
- .dimensionCount = static_cast<uint32_t>(dimensions.size()),
- .dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr,
- .scale = scale,
- .zeroPoint = zeroPoint,
- };
+ operandType.dimensionCount = static_cast<uint32_t>(dimensions.size());
+ operandType.dimensions = dimensions.data();
}
};
class Memory {
- public:
+public:
Memory(size_t size, int protect, int fd, size_t offset) {
mValid = ANeuralNetworksMemory_createFromFd(size, protect, fd, offset, &mMemory) ==
ANEURALNETWORKS_NO_ERROR;
}
- Memory(AHardwareBuffer* buffer) {
- mValid = ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &mMemory) ==
- ANEURALNETWORKS_NO_ERROR;
- }
-
~Memory() { ANeuralNetworksMemory_free(mMemory); }
// Disallow copy semantics to ensure the runtime object can only be freed
@@ -176,7 +91,6 @@ class Memory {
Memory(Memory&& other) { *this = std::move(other); }
Memory& operator=(Memory&& other) {
if (this != &other) {
- ANeuralNetworksMemory_free(mMemory);
mMemory = other.mMemory;
mValid = other.mValid;
other.mMemory = nullptr;
@@ -188,13 +102,13 @@ class Memory {
ANeuralNetworksMemory* get() const { return mMemory; }
bool isValid() const { return mValid; }
- private:
+private:
ANeuralNetworksMemory* mMemory = nullptr;
bool mValid = true;
};
class Model {
- public:
+public:
Model() {
// TODO handle the value returned by this call
ANeuralNetworksModel_create(&mModel);
@@ -213,7 +127,6 @@ class Model {
Model(Model&& other) { *this = std::move(other); }
Model& operator=(Model&& other) {
if (this != &other) {
- ANeuralNetworksModel_free(mModel);
mModel = other.mModel;
mNextOperandId = other.mNextOperandId;
mValid = other.mValid;
@@ -224,30 +137,13 @@ class Model {
return *this;
}
- Result finish() {
- if (mValid) {
- auto result = static_cast<Result>(ANeuralNetworksModel_finish(mModel));
- if (result != Result::NO_ERROR) {
- mValid = false;
- }
- return result;
- } else {
- return Result::BAD_STATE;
- }
- }
+ Result finish() { return static_cast<Result>(ANeuralNetworksModel_finish(mModel)); }
uint32_t addOperand(const OperandType* type) {
if (ANeuralNetworksModel_addOperand(mModel, &(type->operandType)) !=
ANEURALNETWORKS_NO_ERROR) {
mValid = false;
}
- if (type->channelQuant) {
- if (ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
- mModel, mNextOperandId, &type->channelQuant.value().params) !=
- ANEURALNETWORKS_NO_ERROR) {
- mValid = false;
- }
- }
return mNextOperandId++;
}
@@ -275,7 +171,6 @@ class Model {
}
}
- // Fix for onert: addOperationEx for operation support extension (NeuralNetworksEx.h)
void addOperationEx(ANeuralNetworksOperationTypeEx type, const std::vector<uint32_t>& inputs,
const std::vector<uint32_t>& outputs) {
if (ANeuralNetworksModel_addOperationEx(mModel, type, static_cast<uint32_t>(inputs.size()),
@@ -288,34 +183,24 @@ class Model {
void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs,
const std::vector<uint32_t>& outputs) {
if (ANeuralNetworksModel_identifyInputsAndOutputs(
- mModel, static_cast<uint32_t>(inputs.size()), inputs.data(),
- static_cast<uint32_t>(outputs.size()),
- outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
+ mModel, static_cast<uint32_t>(inputs.size()), inputs.data(),
+ static_cast<uint32_t>(outputs.size()),
+ outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
mValid = false;
}
}
-
- void relaxComputationFloat32toFloat16(bool isRelax) {
- if (ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, isRelax) ==
- ANEURALNETWORKS_NO_ERROR) {
- mRelaxed = isRelax;
- }
- }
-
ANeuralNetworksModel* getHandle() const { return mModel; }
bool isValid() const { return mValid; }
- bool isRelaxed() const { return mRelaxed; }
- protected:
+private:
ANeuralNetworksModel* mModel = nullptr;
// We keep track of the operand ID as a convenience to the caller.
uint32_t mNextOperandId = 0;
bool mValid = true;
- bool mRelaxed = false;
};
class Event {
- public:
+public:
Event() {}
~Event() { ANeuralNetworksEvent_free(mEvent); }
@@ -331,7 +216,6 @@ class Event {
Event(Event&& other) { *this = std::move(other); }
Event& operator=(Event&& other) {
if (this != &other) {
- ANeuralNetworksEvent_free(mEvent);
mEvent = other.mEvent;
other.mEvent = nullptr;
}
@@ -346,12 +230,12 @@ class Event {
mEvent = newEvent;
}
- private:
+private:
ANeuralNetworksEvent* mEvent = nullptr;
};
class Compilation {
- public:
+public:
Compilation(const Model* model) {
int result = ANeuralNetworksCompilation_create(model->getHandle(), &mCompilation);
if (result != 0) {
@@ -361,19 +245,12 @@ class Compilation {
~Compilation() { ANeuralNetworksCompilation_free(mCompilation); }
- // Disallow copy semantics to ensure the runtime object can only be freed
- // once. Copy semantics could be enabled if some sort of reference counting
- // or deep-copy system for runtime objects is added later.
Compilation(const Compilation&) = delete;
Compilation& operator=(const Compilation&) = delete;
- // Move semantics to remove access to the runtime object from the wrapper
- // object that is being moved. This ensures the runtime object will be
- // freed only once.
Compilation(Compilation&& other) { *this = std::move(other); }
Compilation& operator=(Compilation&& other) {
if (this != &other) {
- ANeuralNetworksCompilation_free(mCompilation);
mCompilation = other.mCompilation;
other.mCompilation = nullptr;
}
@@ -382,27 +259,19 @@ class Compilation {
Result setPreference(ExecutePreference preference) {
return static_cast<Result>(ANeuralNetworksCompilation_setPreference(
- mCompilation, static_cast<int32_t>(preference)));
- }
-
- Result setCaching(const std::string& cacheDir, const std::vector<uint8_t>& token) {
- if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) {
- return Result::BAD_DATA;
- }
- return static_cast<Result>(ANeuralNetworksCompilation_setCaching(
- mCompilation, cacheDir.c_str(), token.data()));
+ mCompilation, static_cast<int32_t>(preference)));
}
Result finish() { return static_cast<Result>(ANeuralNetworksCompilation_finish(mCompilation)); }
ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
- private:
+private:
ANeuralNetworksCompilation* mCompilation = nullptr;
};
class Execution {
- public:
+public:
Execution(const Compilation* compilation) {
int result = ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution);
if (result != 0) {
@@ -424,7 +293,6 @@ class Execution {
Execution(Execution&& other) { *this = std::move(other); }
Execution& operator=(Execution&& other) {
if (this != &other) {
- ANeuralNetworksExecution_free(mExecution);
mExecution = other.mExecution;
other.mExecution = nullptr;
}
@@ -434,25 +302,25 @@ class Execution {
Result setInput(uint32_t index, const void* buffer, size_t length,
const ANeuralNetworksOperandType* type = nullptr) {
return static_cast<Result>(
- ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length));
+ ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length));
}
Result setInputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
return static_cast<Result>(ANeuralNetworksExecution_setInputFromMemory(
- mExecution, index, type, memory->get(), offset, length));
+ mExecution, index, type, memory->get(), offset, length));
}
Result setOutput(uint32_t index, void* buffer, size_t length,
const ANeuralNetworksOperandType* type = nullptr) {
return static_cast<Result>(
- ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length));
+ ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length));
}
Result setOutputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
return static_cast<Result>(ANeuralNetworksExecution_setOutputFromMemory(
- mExecution, index, type, memory->get(), offset, length));
+ mExecution, index, type, memory->get(), offset, length));
}
Result startCompute(Event* event) {
@@ -462,23 +330,21 @@ class Execution {
return result;
}
- Result compute() { return static_cast<Result>(ANeuralNetworksExecution_compute(mExecution)); }
-
- Result getOutputOperandDimensions(uint32_t index, std::vector<uint32_t>* dimensions) {
- uint32_t rank = 0;
- Result result = static_cast<Result>(
- ANeuralNetworksExecution_getOutputOperandRank(mExecution, index, &rank));
- dimensions->resize(rank);
- if ((result != Result::NO_ERROR && result != Result::OUTPUT_INSUFFICIENT_SIZE) ||
- rank == 0) {
+ Result compute() {
+ ANeuralNetworksEvent* event = nullptr;
+ Result result =
+ static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution, &event));
+ if (result != Result::NO_ERROR) {
return result;
}
- result = static_cast<Result>(ANeuralNetworksExecution_getOutputOperandDimensions(
- mExecution, index, dimensions->data()));
+ // TODO how to manage the lifetime of events when multiple waiters is not
+ // clear.
+ result = static_cast<Result>(ANeuralNetworksEvent_wait(event));
+ ANeuralNetworksEvent_free(event);
return result;
}
- private:
+private:
ANeuralNetworksExecution* mExecution = nullptr;
};