summaryrefslogtreecommitdiff
path: root/externals/nnapi_test_generator
diff options
context:
space:
mode:
Diffstat (limited to 'externals/nnapi_test_generator')
-rw-r--r--externals/nnapi_test_generator/README.md11
-rw-r--r--externals/nnapi_test_generator/include/TestHarness.h209
-rwxr-xr-xexternals/nnapi_test_generator/slicing.py167
-rwxr-xr-xexternals/nnapi_test_generator/test_generator.py775
-rw-r--r--externals/nnapi_test_generator/tests/P_conv/conv_1_h3_w2_SAME.mod.py11
-rw-r--r--externals/nnapi_test_generator/tests/P_conv/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_conv/stdout.txt.expect41
-rw-r--r--externals/nnapi_test_generator/tests/P_depthwise_conv/depthwise_conv.bin.mod.py11
-rw-r--r--externals/nnapi_test_generator/tests/P_depthwise_conv/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_depthwise_conv/stdout.txt.expect43
-rw-r--r--externals/nnapi_test_generator/tests/P_explicit/explicit_add.mod.py7
-rw-r--r--externals/nnapi_test_generator/tests/P_explicit/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_explicit/stdout.txt.expect21
-rw-r--r--externals/nnapi_test_generator/tests/P_float/addfloat.mod.py8
-rw-r--r--externals/nnapi_test_generator/tests/P_float/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_float/stdout.txt.expect23
-rw-r--r--externals/nnapi_test_generator/tests/P_full/addfloat.mod.py22
-rw-r--r--externals/nnapi_test_generator/tests/P_full/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_full/stdout.txt.expect46
-rw-r--r--externals/nnapi_test_generator/tests/P_lstm/lstm.mod.py161
-rw-r--r--externals/nnapi_test_generator/tests/P_lstm/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_lstm/stdout.txt.expect75
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_avgpool/averpoolfloat.mod.py20
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_avgpool/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_avgpool/stdout.txt.expect48
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_conv/quantized.mod.py11
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_conv/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_quantized_conv/stdout.txt.expect40
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_full/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_full/stdout.txt.expect93
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_full/vts_full.mod.py19
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_operands/addfloat.mod.py12
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_operands/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_vts_operands/stdout.txt.expect103
-rw-r--r--externals/nnapi_test_generator/tests/P_weird/stderr.txt.expect2
-rw-r--r--externals/nnapi_test_generator/tests/P_weird/stdout.txt.expect51
-rw-r--r--externals/nnapi_test_generator/tests/P_weird/weird_add.mod.py29
-rwxr-xr-xexternals/nnapi_test_generator/tests/test.py328
-rwxr-xr-xexternals/nnapi_test_generator/vts_generator.py247
39 files changed, 0 insertions, 2654 deletions
diff --git a/externals/nnapi_test_generator/README.md b/externals/nnapi_test_generator/README.md
deleted file mode 100644
index 37d3af0c7..000000000
--- a/externals/nnapi_test_generator/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# nnapi test generator
-
-_nnapi test generator_ aims at generating NN public C API tests.
-
-To generate tests, run the following command.
-
-```
-$ runtimes/tests/neural_networks_test/specs/generate_test.sh
-```
-
-Original code is at https://android.googlesource.com/platform/frameworks/ml/+/efd22b6.
diff --git a/externals/nnapi_test_generator/include/TestHarness.h b/externals/nnapi_test_generator/include/TestHarness.h
deleted file mode 100644
index 1fcb0d661..000000000
--- a/externals/nnapi_test_generator/include/TestHarness.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Header-only library for various helpers of test harness
- * See frameworks/ml/nn/runtime/test/TestGenerated.cpp for how this is used.
- */
-#ifndef ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
-#define ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
-
-#include <gtest/gtest.h>
-
-#include <cmath>
-#include <functional>
-#include <map>
-#include <tuple>
-#include <vector>
-
-namespace generated_tests {
-
-constexpr const size_t gMaximumNumberOfErrorMessages = 10;
-
-typedef std::map<int, std::vector<float>> Float32Operands;
-typedef std::map<int, std::vector<int32_t>> Int32Operands;
-typedef std::map<int, std::vector<uint8_t>> Quant8Operands;
-typedef std::tuple<Float32Operands, // ANEURALNETWORKS_TENSOR_FLOAT32
- Int32Operands, // ANEURALNETWORKS_TENSOR_INT32
- Quant8Operands // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
- >
- MixedTyped;
-typedef std::pair<MixedTyped, MixedTyped> MixedTypedExampleType;
-
-template <typename T>
-struct MixedTypedIndex {};
-
-template <>
-struct MixedTypedIndex<float> {
- static constexpr size_t index = 0;
-};
-template <>
-struct MixedTypedIndex<int32_t> {
- static constexpr size_t index = 1;
-};
-template <>
-struct MixedTypedIndex<uint8_t> {
- static constexpr size_t index = 2;
-};
-
-// Go through all index-value pairs of a given input type
-template <typename T>
-inline void for_each(const MixedTyped& idx_and_data,
- std::function<void(int, const std::vector<T>&)> execute) {
- for (auto& i : std::get<MixedTypedIndex<T>::index>(idx_and_data)) {
- execute(i.first, i.second);
- }
-}
-
-// non-const variant of for_each
-template <typename T>
-inline void for_each(MixedTyped& idx_and_data,
- std::function<void(int, std::vector<T>&)> execute) {
- for (auto& i : std::get<MixedTypedIndex<T>::index>(idx_and_data)) {
- execute(i.first, i.second);
- }
-}
-
-// internal helper for for_all
-template <typename T>
-inline void for_all_internal(
- MixedTyped& idx_and_data,
- std::function<void(int, void*, size_t)> execute_this) {
- for_each<T>(idx_and_data, [&execute_this](int idx, std::vector<T>& m) {
- execute_this(idx, static_cast<void*>(m.data()), m.size() * sizeof(T));
- });
-}
-
-// Go through all index-value pairs of all input types
-// expects a functor that takes (int index, void *raw data, size_t sz)
-inline void for_all(MixedTyped& idx_and_data,
- std::function<void(int, void*, size_t)> execute_this) {
- for_all_internal<float>(idx_and_data, execute_this);
- for_all_internal<int32_t>(idx_and_data, execute_this);
- for_all_internal<uint8_t>(idx_and_data, execute_this);
-}
-
-// Const variant of internal helper for for_all
-template <typename T>
-inline void for_all_internal(
- const MixedTyped& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
- for_each<T>(idx_and_data, [&execute_this](int idx, const std::vector<T>& m) {
- execute_this(idx, static_cast<const void*>(m.data()), m.size() * sizeof(T));
- });
-}
-
-// Go through all index-value pairs (const variant)
-// expects a functor that takes (int index, const void *raw data, size_t sz)
-inline void for_all(
- const MixedTyped& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
- for_all_internal<float>(idx_and_data, execute_this);
- for_all_internal<int32_t>(idx_and_data, execute_this);
- for_all_internal<uint8_t>(idx_and_data, execute_this);
-}
-
-// Helper template - resize test output per golden
-template <typename ty, size_t tuple_index>
-void resize_accordingly_(const MixedTyped& golden, MixedTyped& test) {
- std::function<void(int, const std::vector<ty>&)> execute =
- [&test](int index, const std::vector<ty>& m) {
- auto& t = std::get<tuple_index>(test);
- t[index].resize(m.size());
- };
- for_each<ty>(golden, execute);
-}
-
-inline void resize_accordingly(const MixedTyped& golden, MixedTyped& test) {
- resize_accordingly_<float, 0>(golden, test);
- resize_accordingly_<int32_t, 1>(golden, test);
- resize_accordingly_<uint8_t, 2>(golden, test);
-}
-
-template <typename ty, size_t tuple_index>
-void filter_internal(const MixedTyped& golden, MixedTyped* filtered,
- std::function<bool(int)> is_ignored) {
- for_each<ty>(golden,
- [filtered, &is_ignored](int index, const std::vector<ty>& m) {
- auto& g = std::get<tuple_index>(*filtered);
- if (!is_ignored(index)) g[index] = m;
- });
-}
-
-inline MixedTyped filter(const MixedTyped& golden,
- std::function<bool(int)> is_ignored) {
- MixedTyped filtered;
- filter_internal<float, 0>(golden, &filtered, is_ignored);
- filter_internal<int32_t, 1>(golden, &filtered, is_ignored);
- filter_internal<uint8_t, 2>(golden, &filtered, is_ignored);
- return filtered;
-}
-
-// Compare results
-#define VECTOR_TYPE(x) \
- typename std::tuple_element<x, MixedTyped>::type::mapped_type
-#define VALUE_TYPE(x) VECTOR_TYPE(x)::value_type
-template <size_t tuple_index>
-void compare_(
- const MixedTyped& golden, const MixedTyped& test,
- std::function<void(VALUE_TYPE(tuple_index), VALUE_TYPE(tuple_index))>
- cmp) {
- for_each<VALUE_TYPE(tuple_index)>(
- golden,
- [&test, &cmp](int index, const VECTOR_TYPE(tuple_index) & m) {
- const auto& test_operands = std::get<tuple_index>(test);
- const auto& test_ty = test_operands.find(index);
- ASSERT_NE(test_ty, test_operands.end());
- for (unsigned int i = 0; i < m.size(); i++) {
- SCOPED_TRACE(testing::Message()
- << "When comparing element " << i);
- cmp(m[i], test_ty->second[i]);
- }
- });
-}
-#undef VALUE_TYPE
-#undef VECTOR_TYPE
-inline void compare(const MixedTyped& golden, const MixedTyped& test, float fpRange = 1e-5f) {
- size_t totalNumberOfErrors = 0;
- compare_<0>(golden, test, [&totalNumberOfErrors, fpRange](float g, float t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(g, t, fpRange);
- }
- if (std::abs(g - t) > fpRange) {
- totalNumberOfErrors++;
- }
- });
- compare_<1>(golden, test, [&totalNumberOfErrors](int32_t g, int32_t t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_EQ(g, t);
- }
- if (g != t) {
- totalNumberOfErrors++;
- }
- });
- compare_<2>(golden, test, [&totalNumberOfErrors](uint8_t g, uint8_t t) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(g, t, 1);
- }
- if (std::abs(g - t) > 1) {
- totalNumberOfErrors++;
- }
- });
- EXPECT_EQ(size_t{0}, totalNumberOfErrors);
-}
-
-}; // namespace generated_tests
-
-#endif // ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
diff --git a/externals/nnapi_test_generator/slicing.py b/externals/nnapi_test_generator/slicing.py
deleted file mode 100755
index f08e9d1a1..000000000
--- a/externals/nnapi_test_generator/slicing.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Slicing the input Model file
-
-Invoked by ml/nn/runtime/test/specs/slicing.sh; this Python code is
-not intended to be invoked directly by the users. See that script for
-details on how to use the slicing tool is used.
-
-This script does the following work:
-
-Perform a topological sort similar to the test generator, except that:
-* It would stop at the N-th operation it encounters, and
-* Rename the output of the N-th operation to a model output, and
-* Name that as the output of the model.
-* Also only inputs and weights used by the submodel would be emitted.
-
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import argparse
-from functools import reduce
-import math
-import os
-import struct
-import sys
-import contextlib
-import test_generator
-import pprint
-# Stuff from test generator
-from test_generator import Example
-from test_generator import Float32Scalar
-from test_generator import Input
-from test_generator import Int32Scalar
-from test_generator import Internal
-from test_generator import Model
-from test_generator import Output
-from test_generator import Parameter
-from test_generator import smart_open
-
-
-# Take a model from command line
-def import_source():
- parser = argparse.ArgumentParser()
- parser.add_argument("spec", help="the spec file")
- parser.add_argument(
- "-n", "--number",
- help="number of operations in the sliced model. Default = 1",
- default=1)
- parser.add_argument(
- "-m", "--model", help="the output model file", default="-")
- parser.add_argument(
- "-e", "--example", help="the output example file", default="-")
- args = parser.parse_args()
-
- if os.path.exists(args.spec):
- test_generator.FileNames.SpecFile = os.path.basename(args.spec)
- exec (open(args.spec).read())
-
- return (args.model, args.example, args.number)
-
-
-# Slice till the Nth op the topological sort finds
-# the output of that op becomes the output of the model
-class slicing:
-
- def __init__(self, threshold):
- self.__nr_op_seen = 0
- self.__threshold = threshold
- self.__last_outs = []
- self.__all_formatted_ops = []
- self.__referenced_operands = set()
-
- def format_as_py_op(self, op):
- try:
- fmt = op.PyDefinition()
- except AttributeError: # not an op, but things like weights
- return True
- if fmt is not None:
- self.__nr_op_seen += 1
- if self.__nr_op_seen > self.__threshold:
- return False
- self.__last_outs = op.outs
- for o in op.ins:
- self.__referenced_operands.add(o)
- for o in op.outs:
- self.__referenced_operands.add(o)
- self.__all_formatted_ops.append("model = model.%s" % fmt)
- return True
-
- def dump(self, model_file):
- for x in self.__all_formatted_ops:
- print(x, file=model_file)
-
- def dump_example(self, example_file):
- override = {}
- # Make alias for the output variable
- for lo in self.__last_outs:
- override[lo.get_name()] = lo.type.get_nr_elements()
- alias_def = """\
-# Alias for the output variable {operand_name}
-aliased_output{number} = {operand_name}
-"""
- op = {
- 'operand_name': lo.get_name(),
- 'number': 0 # only support one output as of now
- }
- print (alias_def.format(**op), file=example_file)
- Example.py_dump(example_file, override, self.__referenced_operands)
-
- def format_operands(self):
- # Dump operand definitions
- op_definitions = []
- for o in test_generator.Operand.operands.objects():
- if o not in self.__referenced_operands:
- continue
- ty = o.type
- raw_shape = ty.get_raw_shape()
- op_def = """{op_name} = {operand}("{op_name}", "{element_type}", "{shape}" """
- if isinstance(o, test_generator.Parameter):
- op_def += """, {initializer})"""
- init = o.initializer
- py_operand_name = "Parameter"
- else:
- op_def += ")"
- init = []
- py_operand_name = "IgnoredOutput" if o in set(
- self.__last_outs) else o.__class__.__name__
-
- op = {
- "element_type": ty.get_element_type(),
- "shape": ty.get_raw_shape(),
- "op_name": o.get_name(),
- "operand": py_operand_name,
- "initializer": init
- }
- op_definitions.append(op_def.format(**op))
- return "\n".join(op_definitions)
-
-
-if __name__ == "__main__":
- (model, example, number) = import_source()
- s = slicing(int(number))
-
- with smart_open(model) as model_file:
- spec_file = " (from: %s)" % (test_generator.FileNames.SpecFile)
- print("# Generated file%s. Do not edit" % (spec_file), file=model_file)
- print("model = Model()", file=model_file)
- test_generator.TopologicalSort(lambda x: s.format_as_py_op(x))
- print(s.format_operands(), file=model_file)
- s.dump(model_file)
- with smart_open(example) as example_file:
- s.dump_example(example_file)
diff --git a/externals/nnapi_test_generator/test_generator.py b/externals/nnapi_test_generator/test_generator.py
deleted file mode 100755
index 922ef7754..000000000
--- a/externals/nnapi_test_generator/test_generator.py
+++ /dev/null
@@ -1,775 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""NN model compiler
-
-Compile models and examples into NDK-based CTS unit tests
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import argparse
-from functools import reduce
-import math
-import os
-import struct
-import sys
-import contextlib
-import pprint
-import re
-
-@contextlib.contextmanager
-def smart_open(filename=None):
- if filename and filename != '-':
- fh = open(filename, 'w')
- else:
- fh = sys.stdout
-
- try:
- yield fh
- finally:
- if fh is not sys.stdout:
- fh.close()
-
-class Phase(object):
- def __init__(self):
- self.__objects = []
- self.__contents = []
- self.__dict_of_objects = {}
-
- def append(self, obj, x):
- self.__objects.append(obj)
- self.__contents.append(x)
- self.__dict_of_objects[obj.ID()] = obj
-
- def dump(self, filename):
- for x in self.__contents:
- print (" " + x + ";", file=filename)
-
- def objects(self):
- return self.__objects
-
- def search(self, i):
- return self.__dict_of_objects[i]
-
-# Tracking objects inside a model with a not necessarily unique name and
-# an unique number
-class NamedObject(object):
- __serial = 0
-
- def __init__(self, name = "NamedObject"):
- self.__name = name
- self.__id = NamedObject.serial()
- NamedObject.__serial += 1
-
- def ID(self):
- return self.__id
-
- def serial():
- return NamedObject.__serial
-
- def get_name(self):
- return self.__name
-
- def __str__(self):
- return self.get_name()
-
- def __hash__(self):
- return self.__id
-
-# Object that can be traversed during topological sorting phase
-class Traversable(object):
- def traversable(self):
- return True
-
-class Nontraversable(object):
- def traversable(self):
- return False
-
-# Object that can take input from other objects
-class Uses(object):
- all_uses = set()
- def __init__(self, ins = []):
- self.ins = ins.copy()
- Uses.all_uses.add(self)
- for i in ins:
- i.outs.append(self)
-
-# Object that other objects takes its definition from
-class Definitions(object):
- def __init__(self, outs = []):
- self.outs = outs.copy()
- for o in outs:
- o.ins.append(self)
-
-class TypeLookup:
- __type_lookup = {
- "INT32": "int32_t",
- "UINT32": "uint32_t",
- "FLOAT32": "float",
- "TENSOR_INT32": "int32_t",
- "TENSOR_FLOAT32": "float",
- "TENSOR_QUANT8_ASYMM": "uint8_t",
-# "OEM_SCALAR": this is service-defined.
- "TENSOR_OEM_BYTE": "uint8_t",
- }
-
- def get_cpptype(nnapi_type):
- return TypeLookup.__type_lookup[nnapi_type]
-
- def is_float(nnapi_type):
- return TypeLookup.get_cpptype(nnapi_type) == "float"
-
- def get_size(nnapi_type):
- return 1 if TypeLookup.get_cpptype(nnapi_type) == "uint8_t" else 4
-
-
-class Type(object):
- __types = {}
- __type_serial = 0 # types have their own numbering
- def __init__(self, vt = None, shape = None):
- self.__vt = vt
- self.__shape = shape
- if vt is None or shape is None:
- self.__name = None
- return
-
- key = str(self)
- if key not in Type.__types:
- self.__id = Type.__type_serial
- Type.__types[str(self)] = self
- Type.__type_serial += 1
- else:
- self.__id = Type.__types[key].__id
- self.__name = "type" + str(self.__id)
-
- def get_shape(self):
- return self.__shape
-
- def get_element_type(self):
- return self.__vt
-
- def get_name(self):
- return self.__name
-
- def __str__(self):
- return (", ".join([self.__vt, self.__shape]))
-
- def __hash__(self):
- return self.__id
-
- def dump(filename):
- for key, value in sorted(Type.__types.items()):
- print (" OperandType " + str(value.__name) + "(Type::" + str(key) + ");", file=filename)
-
- def get_raw_shape(self):
- return self.__shape
-
- def get_parsed_shape(self):
- # Parse shape
- if (self.__shape != "" and self.__shape != "{}"):
- left, sep, right = self.__shape.partition('{')
- real_shape, sep, right = right.partition('}')
- shape = [int(x) for x in real_shape.split(",")]
- # left now looks like "0.0f, 127.5f, "
- scale, sep, zero_point = right.rpartition(',')
- if scale == "":
- if zero_point == "":
- return real_shape, "0", "0"
- return real_shape, zero_point, "0"
- left, sep, scale = scale.partition(',')
- return real_shape, scale.replace("f", ""), zero_point
- else:
- return "", "0", "0"
-
- def get_nr_elements(self):
- # Parse shape
- nr_elements = 1
- real_shape, scale, zero_point = self.get_parsed_shape()
-
- if (real_shape != "" and real_shape != "{}"):
- shape = [int(x) for x in real_shape.split(",")]
- nr_elements = reduce((lambda x, y: x*y), shape)
- return nr_elements
-
- def get_size(self):
- element_size = TypeLookup.get_size(self.__vt)
- return self.get_nr_elements() * element_size
-
-# A value is a typed, named object
-class Value(NamedObject):
- def __init__(self, name, vt):
- NamedObject.__init__(self, name)
- self.type = vt
-
-# An operand that can be fed into operations. Also, an operand is always
-# declared before operations.
-class Operand(Value):
- # All operand declarations in string
- operands = Phase()
-
- def __init__(self, name, vt):
- Value.__init__(self, name, vt)
- def_string = (
- "auto " + self.get_name() + " = "\
- "model->addOperand(&" + vt.get_name() + ")")
- Operand.operands.append(self, def_string)
-
- # By default, produce nothing (when asked by the Topological Sort phase)
- def Definition(self):
- pass
-
- def Reference(self):
- return NamedObject.__str__(self)
-
- # Print a set of operands in curly braces
- def print_operands(operands):
- return [ x.Reference() for x in operands ]
-
- # Defined with the model or not
- def is_weight(self):
- return False
-
-# A user-declared input operand
-class Input(Operand, Definitions, Traversable):
- # for enumerating inputs
- __next_number = 0
- # Holds reference to all Inputs; used by Topoligcal sort as starting nodes.
- __inputs = set()
-
- def __init__(self, name, vt, shape, increase_next_number=True):
- Operand.__init__(self, name, Type(vt, shape))
- Definitions.__init__(self)
- Input.__inputs.add(self)
- self.number = Input.__next_number
- if increase_next_number is True:
- Input.__next_number += 1
-
- def lifetime(self):
- return "MODEL_INPUT"
-
- def is_internal(self):
- return False
-
- def get_inputs(exclude_internal = None):
- if exclude_internal is not None:
- external = { x for x in Input.__inputs if not x.is_internal() }
- return external
- else:
- return Input.__inputs
-
-# A user-declared output operand
-class Output(Operand, Uses, Nontraversable):
- # for enumerating outputs
- __next_number = 0
- __outputs = []
-
- def __init__(self, name, vt, shape):
- Operand.__init__(self, name, Type(vt, shape))
- Uses.__init__(self)
- Output.__outputs.append(self)
- self.number = Output.__next_number
- Output.__next_number += 1
-
- def lifetime(self):
- return "MODEL_OUTPUT"
-
- # return all unique outputs in the original order
- def get_outputs():
- saw = set()
- unique = [x for x in Output.__outputs if x not in saw and (saw.add(x) or True)]
- return unique
-
-# An output that we don't want to compare the results
-class IgnoredOutput(Output):
- __ignored = set()
- def __init__(self, name, vt, shape):
- Output.__init__(self, name, vt, shape)
- IgnoredOutput.__ignored.add(self)
- def gen_ignored():
- ignored_func = """
-bool is_ignored(int i) {
- static std::set<int> ignore = {%s};
- return ignore.find(i) != ignore.end();
-}""" % ", ".join([str(x.number) for x in IgnoredOutput.__ignored])
- return ignored_func
-
-class ModelArgument:
- __arguments = []
-
- def __init__(self, arg_type, arg_name):
- self.__arg_type = arg_type
- self.__arg_name = arg_name
- ModelArgument.__arguments.append(" ".join([arg_type, arg_name]))
-
- def get_arg_type(self):
- return self.__arg_type
-
- def get_arg_name(self):
- return self.__arg_name
-
- def get_arguments():
- return ModelArgument.__arguments
-
- def lifetime(self):
- return "CONSTANT_COPY"
-
-# Print in C float literal format
-def pretty_print_as_float(x):
- s = str(float(x))
- if s.find(".") >= 0 or s.find("e") >= 0:
- return s + "f"
- else:
- return s + ".0f"
-
-class Parameter(Input):
- # TODO seems wrong that's an Input.
- def __init__(self, name, vt, shape, initializer):
- Input.__init__(self, name, vt, shape, False)
- self.initializer = initializer
- self.cpptype = TypeLookup.get_cpptype(vt)
- def is_internal(self):
- return True
- def Definition(self):
- init_name = self.get_name() + "_init"
- initializer = [str(x) for x in self.initializer]
- if self.cpptype == "float":
- initializer = [ pretty_print_as_float(x) for x in initializer]
- init = self.cpptype + " " + init_name + "[]"
- init = "static " + init + " = {" + ", ".join(initializer) + "};"
- args = [ self.get_name(), init_name,
- "sizeof(" + self.cpptype + ") * " + str(len(self.initializer)) ]
- stmt = "\n ".join([init,
- "model->setOperandValue(" + ", ".join(args)+");"])
- return stmt
- def is_weight(self):
- return True
- def lifetime(self):
- if Configuration.useSHM():
- return "CONSTANT_REFERENCE"
- else:
- return "CONSTANT_COPY"
-
-class Int32Scalar(Parameter):
- def __init__(self, name, value):
- Parameter.__init__(self, name, "INT32", "{}", [value])
-
-class Float32Scalar(Parameter):
- def __init__(self, name, value):
- Parameter.__init__(self, name, "FLOAT32", "{}", [value])
-
-# A compiler-generated intermediate result from an operation
-class IntermediateResult(Operand, Definitions, Uses, Traversable):
- def __init__(self, src: Value):
- tmp_name = "tmp" + str(NamedObject.serial())
- Operand.__init__(self, tmp_name, src.type)
- Definitions.__init__(self)
- Uses.__init__(self, [src])
-
- def lifetime(self):
- return "TEMPORARY_VARIABLE"
-
-# An explicitly declared intermediate result
-class Internal(Operand, Definitions, Uses, Traversable):
- def __init__(self, name, vt, shape):
- Operand.__init__(self, name, Type(vt, shape))
- Definitions.__init__(self)
- Uses.__init__(self)
-
- def lifetime(self):
- return "TEMPORARY_VARIABLE"
-
-# An operation in a model
-class Operation(Definitions, Uses, Traversable):
- def __init__(self, optype, ins, outs):
- self.type = ins[0].type
- Definitions.__init__(self, outs)
- Uses.__init__(self, ins)
- self.optype = optype
-
- def __str__(self):
- inputs = [ str(x) for x in self.ins ]
- return "Operation:" + self.optype + " " + ", ".join(inputs)
-
- def Reference(self):
- return "operation" + str(self.ID());
-
- def Definition(self):
- inputs = Operand.print_operands(self.ins);
- outputs = Operand.print_operands(self.outs);
- if re.search('_EX$', self.optype):
- return "model->addOperationEx(ANEURALNETWORKS_"+self.optype+", " + \
- "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
- else:
- return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \
- "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
-
- # Get Python-ish dump for the op
- def PyDefinition(self):
- py_op_string = """Operation("{optype}", {inputs}).To({outputs})"""
- inputs = [str(x) for x in Operand.print_operands(self.ins)]
- inputs = ", ".join(inputs)
- assert len(self.outs) <= 1
- outputs = str(Operand.print_operands(self.outs)[0])
- ops = {"optype": self.optype, "inputs": inputs, "outputs": outputs}
- return py_op_string.format(**ops)
-
-# Main interface
-class Model(object):
- __isRelaxed = False
-
- def __init__(self):
- self.__currentOp = None
-
- # TODO turn this into generic binary operations
- def Add(self, i1: Value, i2 = None) -> Operation:
- ins = [i1]
- if i2 is not None:
- ins.append(i2)
- if self.__currentOp is not None:
- ir = IntermediateResult(self.__currentOp)
- self.__currentOp = ir
- ins.append(self.__currentOp)
-
- op = Operation("ADD", ins, [])
-
- self.__currentOp = op
- return self
-
- def Operation(self, op_name, *args):
- ins = [i for i in args]
- outs = []
- op = Operation(op_name, ins, outs)
- self.__currentOp = op
- return self
-
- def RawAdd(self, i1: Value, i2: Value, o = None) -> Operation:
- ins = [i1, i2]
- outs = []
- if o is not None:
- outs = [o]
- op = Operation("ADD", ins, outs)
-
- self.__currentOp = op
- return self
-
- # See CpuExecutor::executeOperation() for the arguments of each op
- def AveragePool(self, input, padding, stride_width, stride_height, filter_width, filter_height, activation):
- ins = [input, padding, stride_width,
- stride_height, filter_width, filter_height, activation]
- outs = []
- op = Operation("AVERAGE_POOL_2D", ins, outs)
- self.__currentOp = op
- return self
-
- def Concatenation(self, *args):
- ins = [i for i in args]
- outs = []
- op = Operation("CONCATENATION", ins, outs)
- self.__currentOp = op
- return self
-
- def Conv(self, filter, bias, input, padding, stride_width, stride_height, activation):
- ins = [filter, bias, input, padding, stride_width,
- stride_height, activation]
- outs = []
- op = Operation("CONV_2D", ins, outs)
- self.__currentOp = op
- return self
-
- def DepthWiseConv(self, filter, bias, input, padding, stride_width, stride_height, depth_multiplier, activation):
- ins = [filter, bias, input, padding, stride_width,
- stride_height, depth_multiplier, activation]
- outs = []
- op = Operation("DEPTHWISE_CONV_2D", ins, outs)
- self.__currentOp = op
- return self
-
- def FullyConnected(self, input, weights, bias, activation):
- ins = [input, weights, bias, activation]
- outs = []
- op = Operation("FULLY_CONNECTED", ins, outs)
- self.__currentOp = op
- return self
-
- def Logistic(self, input):
- ins = [input]
- outs = []
- op = Operation("LOGISTIC", ins, outs)
- self.__currentOp = op
- return self
-
- def L2Pool(self, input, padding, stride_width, stride_height, filter_width, filter_height, activation):
- ins = [input, padding, stride_width,
- stride_height, filter_width, filter_height, activation]
- outs = []
- op = Operation("L2_POOL_2D", ins, outs)
- self.__currentOp = op
- return self
-
- def MaxPool(self, input, padding, stride_width, stride_height, filter_width, filter_height, activation):
- ins = [input, padding, stride_width,
- stride_height, filter_width, filter_height, activation]
- outs = []
- op = Operation("MAX_POOL_2D", ins, outs)
- self.__currentOp = op
- return self
-
- def SoftMax(self, input, beta):
- ins = [input, beta]
- outs = []
- op = Operation("SOFTMAX", ins, outs)
- self.__currentOp = op
- return self
-
- def Reshape(self, input, shape):
- ins = [input, shape]
- outs = []
- op = Operation("RESHAPE", ins, outs)
- self.__currentOp = op
- return self
-
- def Out(self, o):
- if (type(o) is list or type(o) is tuple):
- for i in o:
- self.__currentOp.outs.append(i)
- i.ins.append(self.__currentOp)
- else:
- self.__currentOp.outs.append(o)
- o.ins.append(self.__currentOp)
- return self
-
- def To(self, o:Value):
- ret = Model.Out(self, o)
- self.__currentOp = None
- return self
-
- def RelaxedExecution(self, isRelaxed):
- Model.__isRelaxed = isRelaxed
- return self
-
- def isRelaxed():
- return Model.__isRelaxed
-
-
-class FileNames:
- SpecFile = ""
-
-class Example():
- __examples = []
- def __init__(self, list_of_examples):
- Example.__examples.append(list_of_examples)
-
- def dump_dict(d):
- ret = []
- for k, v in d.items():
- key = str(k)
- suffix = "f"
- if type(k) is not int:
- key = str(k.number)
- if not TypeLookup.is_float(k.type.get_element_type()):
- suffix = ""
- init = ", ".join(
- [str(i) + (suffix if str(i).find(".") != -1 else "") for i in v])
- ret.append("{%s, {%s}}" % (key, init))
- return ", ".join(ret)
-
- def dump_mixed_types(d):
- ret = []
-
- float32_dict = {}
- int32_dict = {}
- uint8_dict = {}
-
- for k, v in d.items():
- key_id = k.ID() if type(k) is not int else k
- ty = Operand.operands.search(key_id).type.get_element_type()
- # find out type of the operand addressed by the key
- if (ty == "TENSOR_FLOAT32"):
- float32_dict[k] = v
- elif (ty == "TENSOR_INT32"):
- int32_dict[k] = v
- elif (ty == "TENSOR_OEM_BYTE"):
- uint8_dict[k] = v
- elif (ty == "TENSOR_QUANT8_ASYMM"):
- uint8_dict[k] = v
- else:
- print ("Unhandled type %s"%ty, file = sys.stderr)
- assert 0 and "unsupported example type"
-
- tuple_init = """\
-{{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{{float32_dict}}},
- // int -> INT32 map
- {{{int32_dict}}},
- // int -> QUANT8_ASYMM map
- {{{uint8_dict}}}
-}}"""
- tuple_contents = {
- 'float32_dict': Example.dump_dict(float32_dict),
- 'int32_dict': Example.dump_dict(int32_dict),
- 'uint8_dict': Example.dump_dict(uint8_dict)
- }
- return tuple_init.format(**tuple_contents)
-
-
- def dump(example_file):
- if len(Example.__examples) > 0:
- spec_file = " (from: %s)" % (FileNames.SpecFile)
- print ('// Generated file%s. Do not edit' % (spec_file),
- file = example_file)
- for i, o in Example.__examples:
- print ('// Begin of an example', file = example_file)
- print ('{', file = example_file)
- inputs = Example.dump_mixed_types(i)
- outputs = Example.dump_mixed_types(o)
- print ('//Input(s)\n%s,' % inputs , file = example_file)
- print ('//Output(s)\n%s' % outputs, file = example_file)
- print ('}, // End of an example', file = example_file)
-
- # Similar to dump_dict, but in python. Used by the slicing tool
- # if referenced is not None, only print operands that are present there
- def py_dump_dict(d, referenced):
- ret = []
- for k, v in d.items():
- if referenced != None and k not in referenced:
- continue
- key = str(k)
- init = pprint.pformat(v)
- ret.append("%s: %s" % (key, init))
- return ", ".join(ret)
-
- # similar to dump, but in python. Used by the slicing tool
- # if referenced is not None, only print operands that are present there
- def py_dump(example_file, override, referenced):
- if len(Example.__examples) > 0:
- example_no = 0
- example_template = """\
-input{no} = {{{inputs}}}
-# Only executed during data collection phase
-if collecting_data is True:
- Example((input{no}, {{{outputs}}}))
-"""
- for i, o in Example.__examples:
- print ('# Begin of an example', file = example_file)
- inputs = Example.py_dump_dict(i, referenced)
- output_list = []
- for k, v in override.items():
- output_list.append("%s: [0] * %d" % (k, v))
- outputs = ",".join(output_list)
-
- # TODO: handle >1 outputs
- for k, v in o.items():
- assert k.number == 0
- example_contents = {
- 'no': example_no,
- 'inputs': inputs,
- 'outputs': outputs
- }
- print (example_template.format(**example_contents), file = example_file)
-
-
-def TopologicalSort(format_op):
- start = Input.get_inputs().copy()
- deps = { x: set(x.ins) for x in Uses.all_uses }
-
- while len(start) > 0:
- cur = start.pop()
- if format_op(cur) is False:
- return
- distinct_outs = set(cur.outs)
- for o in distinct_outs:
- deps[o].remove(cur)
- if len(deps[o]) == 0 and o.traversable():
- start.add(o)
-
-class Configuration:
- use_shm_for_weights = False
- def useSHM():
- return Configuration.use_shm_for_weights
-
-# Take a model from command line
-def import_source():
- parser = argparse.ArgumentParser()
- parser.add_argument("spec", help="the spec file")
- parser.add_argument(
- "-m", "--model", help="the output model file", default="-")
- parser.add_argument(
- "-e", "--example", help="the output example file", default="-")
- args = parser.parse_args()
-
- if os.path.exists(args.spec):
- FileNames.SpecFile = os.path.basename(args.spec)
- exec (open(args.spec).read())
-
- return (args.model, args.example)
-
-
-def print_cts_op(model_file, op):
- fmt = op.Definition()
- if fmt is not None:
- print (" %s" % fmt, file = model_file)
- return True
-
-if __name__ == '__main__':
- (model, example) = import_source()
- # Boilerplate
- args = ""
- if len(ModelArgument.get_arguments()) > 0:
- args = ", " + ", ".join(ModelArgument.get_arguments())
-
- print("Output CTS model: %s" % model, file=sys.stderr)
- print("Output example:" + example, file=sys.stderr)
-
- with smart_open(model) as model_file:
- spec_file = " (from: %s)" % (FileNames.SpecFile)
-
- print ('// Generated file%s. Do not edit'%(spec_file), file = model_file)
- print ("void CreateModel(Model *model" + args + ") {", file=model_file)
-
- # Phase 0: types
- Type.dump(model_file)
- # Phase 1: add operands
- print (" // Phase 1, operands", file=model_file)
- Operand.operands.dump(model_file)
-
- # Phase 2: operations
- print (" // Phase 2, operations", file=model_file)
- TopologicalSort(lambda x: print_cts_op(model_file, x))
-
- # Phase 3: add inputs and outputs
- print (" // Phase 3, inputs and outputs", file=model_file)
- inputs = Operand.print_operands(Input.get_inputs(True));
- outputs = Operand.print_operands(Output.get_outputs());
- print (" model->identifyInputsAndOutputs(\n" +
- " {"+", ".join(inputs)+"},\n {" + ", ".join(outputs) + "});",
- file=model_file)
-
- # Phase 4: set relaxed execution if needed
- if (Model.isRelaxed()):
- print (" // Phase 4: set relaxed execution", file=model_file)
- print (" model->relaxComputationFloat32toFloat16(true);", file=model_file)
-
- # Boilerplate
- print (" assert(model->isValid());", file=model_file);
- print ("}", file=model_file)
- print (IgnoredOutput.gen_ignored(), file=model_file)
-
- with smart_open(example) as example_file:
- Example.dump(example_file)
diff --git a/externals/nnapi_test_generator/tests/P_conv/conv_1_h3_w2_SAME.mod.py b/externals/nnapi_test_generator/tests/P_conv/conv_1_h3_w2_SAME.mod.py
deleted file mode 100644
index 8e93749e2..000000000
--- a/externals/nnapi_test_generator/tests/P_conv/conv_1_h3_w2_SAME.mod.py
+++ /dev/null
@@ -1,11 +0,0 @@
-i4 = Int32Scalar("b4", 1)
-i5 = Int32Scalar("b5", 1)
-i6 = Int32Scalar("b6", 1)
-i7 = Int32Scalar("b7", 0)
-i2 = Input("op2", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # input 0
-i3 = Output("op3", "TENSOR_FLOAT32", "{1, 8, 8, 1}") # output 0
-i0 = Parameter("op0", "TENSOR_FLOAT32", "{1, 3, 2, 3}", [-0.966213, -0.467474, -0.82203, -0.579455, 0.0278809, -0.79946, -0.684259, 0.563238, 0.37289, 0.738216, 0.386045, -0.917775, 0.184325, -0.270568, 0.82236, 0.0973683, -0.941308, -0.144706]) # parameters
-i1 = Parameter("op1", "TENSOR_FLOAT32", "{1}", [0]) # parameters
-model = Model()
-model = model.Conv(i2, i0, i1, i4, i5, i6, i7).To(i3)
-
diff --git a/externals/nnapi_test_generator/tests/P_conv/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_conv/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_conv/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_conv/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_conv/stdout.txt.expect
deleted file mode 100644
index 47d92b6b8..000000000
--- a/externals/nnapi_test_generator/tests/P_conv/stdout.txt.expect
+++ /dev/null
@@ -1,41 +0,0 @@
-// Generated file (from: conv_1_h3_w2_SAME.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type0(Type::INT32, {});
- OperandType type3(Type::TENSOR_FLOAT32, {1, 3, 2, 3});
- OperandType type2(Type::TENSOR_FLOAT32, {1, 8, 8, 1});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
- OperandType type4(Type::TENSOR_FLOAT32, {1});
- // Phase 1, operands
- auto b4 = model->addOperand(&type0);
- auto b5 = model->addOperand(&type0);
- auto b6 = model->addOperand(&type0);
- auto b7 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto op0 = model->addOperand(&type3);
- auto op1 = model->addOperand(&type4);
- // Phase 2, operations
- static int32_t b4_init[] = {1};
- model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
- static int32_t b5_init[] = {1};
- model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
- static int32_t b6_init[] = {1};
- model->setOperandValue(b6, b6_init, sizeof(int32_t) * 1);
- static int32_t b7_init[] = {0};
- model->setOperandValue(b7, b7_init, sizeof(int32_t) * 1);
- static float op0_init[] = {-0.966213f, -0.467474f, -0.82203f, -0.579455f, 0.0278809f, -0.79946f, -0.684259f, 0.563238f, 0.37289f, 0.738216f, 0.386045f, -0.917775f, 0.184325f, -0.270568f, 0.82236f, 0.0973683f, -0.941308f, -0.144706f};
- model->setOperandValue(op0, op0_init, sizeof(float) * 18);
- static float op1_init[] = {0.0f};
- model->setOperandValue(op1, op1_init, sizeof(float) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op2},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_depthwise_conv/depthwise_conv.bin.mod.py b/externals/nnapi_test_generator/tests/P_depthwise_conv/depthwise_conv.bin.mod.py
deleted file mode 100644
index 8738ee01b..000000000
--- a/externals/nnapi_test_generator/tests/P_depthwise_conv/depthwise_conv.bin.mod.py
+++ /dev/null
@@ -1,11 +0,0 @@
-model = Model()
-i4 = Int32Scalar("b4", 1)
-i5 = Int32Scalar("b5", 1)
-i6 = Int32Scalar("b6", 1)
-i7 = Int32Scalar("b7", 1)
-i8 = Int32Scalar("b8", 0)
-i2 = Input("op2", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # input 0
-i3 = Output("op3", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # output 0
-i0 = Parameter("op0", "TENSOR_FLOAT32", "{1, 1, 1, 3}", [-0.966213, -0.467474, -0.82203]) # parameters
-i1 = Parameter("op1", "TENSOR_FLOAT32", "{3}", [0, 0, 0]) # parameters
-model = model.DepthWiseConv(i2, i0, i1, i4, i5, i6, i7, i8).To(i3)
diff --git a/externals/nnapi_test_generator/tests/P_depthwise_conv/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_depthwise_conv/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_depthwise_conv/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_depthwise_conv/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_depthwise_conv/stdout.txt.expect
deleted file mode 100644
index 9a22cc3e3..000000000
--- a/externals/nnapi_test_generator/tests/P_depthwise_conv/stdout.txt.expect
+++ /dev/null
@@ -1,43 +0,0 @@
-// Generated file (from: depthwise_conv.bin.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type0(Type::INT32, {});
- OperandType type2(Type::TENSOR_FLOAT32, {1, 1, 1, 3});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
- OperandType type3(Type::TENSOR_FLOAT32, {3});
- // Phase 1, operands
- auto b4 = model->addOperand(&type0);
- auto b5 = model->addOperand(&type0);
- auto b6 = model->addOperand(&type0);
- auto b7 = model->addOperand(&type0);
- auto b8 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type1);
- auto op0 = model->addOperand(&type2);
- auto op1 = model->addOperand(&type3);
- // Phase 2, operations
- static int32_t b4_init[] = {1};
- model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
- static int32_t b5_init[] = {1};
- model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
- static int32_t b6_init[] = {1};
- model->setOperandValue(b6, b6_init, sizeof(int32_t) * 1);
- static int32_t b7_init[] = {1};
- model->setOperandValue(b7, b7_init, sizeof(int32_t) * 1);
- static int32_t b8_init[] = {0};
- model->setOperandValue(b8, b8_init, sizeof(int32_t) * 1);
- static float op0_init[] = {-0.966213f, -0.467474f, -0.82203f};
- model->setOperandValue(op0, op0_init, sizeof(float) * 3);
- static float op1_init[] = {0.0f, 0.0f, 0.0f};
- model->setOperandValue(op1, op1_init, sizeof(float) * 3);
- model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op2, op0, op1, b4, b5, b6, b7, b8}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op2},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_explicit/explicit_add.mod.py b/externals/nnapi_test_generator/tests/P_explicit/explicit_add.mod.py
deleted file mode 100644
index b1c8f99a4..000000000
--- a/externals/nnapi_test_generator/tests/P_explicit/explicit_add.mod.py
+++ /dev/null
@@ -1,7 +0,0 @@
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # input 0
-i2 = Output("op2", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # output 0
-i0 = Internal("op0", "TENSOR_FLOAT32", "{1, 8, 8, 3}") # intermediate result
-model = Model()
-model = model.RawAdd(i1, i1).To(i0)
-model = model.RawAdd(i0, i1).To(i2)
-
diff --git a/externals/nnapi_test_generator/tests/P_explicit/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_explicit/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_explicit/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_explicit/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_explicit/stdout.txt.expect
deleted file mode 100644
index 1221b7bda..000000000
--- a/externals/nnapi_test_generator/tests/P_explicit/stdout.txt.expect
+++ /dev/null
@@ -1,21 +0,0 @@
-// Generated file (from: explicit_add.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type0);
- auto op0 = model->addOperand(&type0);
- // Phase 2, operations
- model->addOperation(ANEURALNETWORKS_ADD, {op1, op1}, {op0});
- model->addOperation(ANEURALNETWORKS_ADD, {op0, op1}, {op2});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op2});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_float/addfloat.mod.py b/externals/nnapi_test_generator/tests/P_float/addfloat.mod.py
deleted file mode 100644
index f0e4f0430..000000000
--- a/externals/nnapi_test_generator/tests/P_float/addfloat.mod.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# model
-i1 = Input("operand1","TENSOR_FLOAT32", "{3,4}")
-i2 = Input("operand2","TENSOR_FLOAT32", "{3,4}")
-i3 = Input("operand3","TENSOR_FLOAT32", "{3,4}")
-o = Output("operand4","TENSOR_FLOAT32", "{3,4}")
-
-Model().Add(i1, i2).Add(i3).Out(o)
-
diff --git a/externals/nnapi_test_generator/tests/P_float/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_float/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_float/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_float/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_float/stdout.txt.expect
deleted file mode 100644
index eb8cc146b..000000000
--- a/externals/nnapi_test_generator/tests/P_float/stdout.txt.expect
+++ /dev/null
@@ -1,23 +0,0 @@
-// Generated file (from: addfloat.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {3,4});
- // Phase 1, operands
- auto operand1 = model->addOperand(&type0);
- auto operand2 = model->addOperand(&type0);
- auto operand3 = model->addOperand(&type0);
- auto operand4 = model->addOperand(&type0);
- auto tmp4 = model->addOperand(&type0);
- // Phase 2, operations
- model->addOperation(ANEURALNETWORKS_ADD, {operand1, operand2}, {tmp4});
- model->addOperation(ANEURALNETWORKS_ADD, {operand3, tmp4}, {operand4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {operand1, operand2, operand3},
- {operand4});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_full/addfloat.mod.py b/externals/nnapi_test_generator/tests/P_full/addfloat.mod.py
deleted file mode 100644
index dbe7701a1..000000000
--- a/externals/nnapi_test_generator/tests/P_full/addfloat.mod.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{2}") # a vector of 2 float32s
-i2 = Input("op2", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
-b0 = Int32Scalar("b0", 0) # an int32_t scalar bias
-i3 = Output("op3", "TENSOR_FLOAT32", "{2}")
-model = model.Operation("ADD", i1, i2, b0).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 2.0],
- i2: # input 1
- [3.0, 4.0]}
-
-output0 = {i3: # output 0
- [4.0, 6.0]}
-
-# Instantiate an example
-Example((input0, output0))
-
-
-
diff --git a/externals/nnapi_test_generator/tests/P_full/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_full/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_full/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_full/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_full/stdout.txt.expect
deleted file mode 100644
index e3d2af3fa..000000000
--- a/externals/nnapi_test_generator/tests/P_full/stdout.txt.expect
+++ /dev/null
@@ -1,46 +0,0 @@
-// Generated file (from: addfloat.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type1(Type::INT32, {});
- OperandType type0(Type::TENSOR_FLOAT32, {2});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type0);
- auto b0 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type0);
- // Phase 2, operations
- static int32_t b0_init[] = {0};
- model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, b0}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-// Generated file (from: addfloat.mod.py). Do not edit
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f}}, {1, {3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {4.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
diff --git a/externals/nnapi_test_generator/tests/P_lstm/lstm.mod.py b/externals/nnapi_test_generator/tests/P_lstm/lstm.mod.py
deleted file mode 100644
index cb1bf6010..000000000
--- a/externals/nnapi_test_generator/tests/P_lstm/lstm.mod.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
-
-model = Model()
-
-n_batch = 1
-n_input = 2
-# n_cell and n_output have the same size when there is no projection.
-n_cell = 4
-n_output = 4
-
-input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input))
-
-input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-
-recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-
-cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32", "{0}")
-
-input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-
-projection_weights = Input("projection_weights", "TENSOR_FLOAT32", "{0,0}")
-projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
-
-output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-
-activation_param = Input("activation_param", "TENSOR_INT32", "{1}")
-cell_clip_param = Input("cell_clip_param", "TENSOR_FLOAT32", "{1}")
-proj_clip_param = Input("proj_clip_param", "TENSOR_FLOAT32", "{1}")
-
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4)))
-output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-
-model = model.Operation("LSTM",
- input,
-
- input_to_input_weights,
- input_to_forget_weights,
- input_to_cell_weights,
- input_to_output_weights,
-
- recurrent_to_input_weights,
- recurrent_to_forget_weights,
- recurrent_to_cell_weights,
- recurrent_to_output_weights,
-
- cell_to_input_weights,
- cell_to_forget_weights,
- cell_to_output_weights,
-
- input_gate_bias,
- forget_gate_bias,
- cell_gate_bias,
- output_gate_bias,
-
- projection_weights,
- projection_bias,
-
- output_state_in,
- cell_state_in,
-
- activation_param,
- cell_clip_param,
- proj_clip_param
-).To([scratch_buffer, output_state_out, cell_state_out, output])
-
-# Example 1. Input in operand 0,
-input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
- input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
- input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
- input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
-
- input_gate_bias: [0.,0.,0.,0.],
- forget_gate_bias: [1.,1.,1.,1.],
- cell_gate_bias: [0.,0.,0.,0.],
- output_gate_bias: [0.,0.,0.,0.],
-
- recurrent_to_input_weights: [
- -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
- -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
- -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
-
- recurrent_to_cell_weights: [
- -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
- -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
- -0.46367589, 0.26016325, -0.03894562, -0.16368064],
-
- recurrent_to_forget_weights: [
- -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
- -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
- 0.28053468, 0.01560611, -0.20127171, -0.01140004],
-
- recurrent_to_output_weights: [
- 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
- 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
- -0.51818722, -0.15390486, 0.0468148, 0.39922136],
-
- cell_to_input_weights: [],
- cell_to_forget_weights: [],
- cell_to_output_weights: [],
-
- projection_weights: [],
- projection_bias: [],
-
- activation_param: [4], # Tanh
- cell_clip_param: [0.],
- proj_clip_param: [0.],
-}
-
-# Instantiate examples
-# TODO: Add more examples after fixing the reference issue
-test_inputs = [
- [2., 3.],
-# [3., 4.],[1., 1.]
-]
-golden_outputs = [
- [-0.02973187, 0.1229473, 0.20885126, -0.15358765,],
-# [-0.03716109, 0.12507336, 0.41193449, -0.20860538],
-# [-0.15053082, 0.09120187, 0.24278517, -0.12222792]
-]
-
-for (input_tensor, output_tensor) in zip(test_inputs, golden_outputs):
- output0 = {
- scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
- cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
- output_state_out: [ 0 for x in range(n_batch * n_output) ],
- output: output_tensor
- }
- input0[input] = input_tensor
- input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
- input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
- Example((input0, output0))
diff --git a/externals/nnapi_test_generator/tests/P_lstm/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_lstm/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_lstm/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_lstm/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_lstm/stdout.txt.expect
deleted file mode 100644
index 2ba320d77..000000000
--- a/externals/nnapi_test_generator/tests/P_lstm/stdout.txt.expect
+++ /dev/null
@@ -1,75 +0,0 @@
-// Generated file (from: lstm.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type5(Type::TENSOR_FLOAT32, {0,0});
- OperandType type3(Type::TENSOR_FLOAT32, {0});
- OperandType type9(Type::TENSOR_FLOAT32, {1, 16});
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
- OperandType type8(Type::TENSOR_FLOAT32, {1});
- OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
- OperandType type4(Type::TENSOR_FLOAT32, {4});
- OperandType type7(Type::TENSOR_INT32, {1});
- // Phase 1, operands
- auto input = model->addOperand(&type0);
- auto input_to_input_weights = model->addOperand(&type1);
- auto input_to_forget_weights = model->addOperand(&type1);
- auto input_to_cell_weights = model->addOperand(&type1);
- auto input_to_output_weights = model->addOperand(&type1);
- auto recurrent_to_intput_weights = model->addOperand(&type2);
- auto recurrent_to_forget_weights = model->addOperand(&type2);
- auto recurrent_to_cell_weights = model->addOperand(&type2);
- auto recurrent_to_output_weights = model->addOperand(&type2);
- auto cell_to_input_weights = model->addOperand(&type3);
- auto cell_to_forget_weights = model->addOperand(&type3);
- auto cell_to_output_weights = model->addOperand(&type3);
- auto input_gate_bias = model->addOperand(&type4);
- auto forget_gate_bias = model->addOperand(&type4);
- auto cell_gate_bias = model->addOperand(&type4);
- auto output_gate_bias = model->addOperand(&type4);
- auto projection_weights = model->addOperand(&type5);
- auto projection_bias = model->addOperand(&type3);
- auto output_state_in = model->addOperand(&type6);
- auto cell_state_in = model->addOperand(&type6);
- auto activation_param = model->addOperand(&type7);
- auto cell_clip_param = model->addOperand(&type8);
- auto proj_clip_param = model->addOperand(&type8);
- auto scratch_buffer = model->addOperand(&type9);
- auto output_state_out = model->addOperand(&type6);
- auto cell_state_out = model->addOperand(&type6);
- auto output = model->addOperand(&type6);
- // Phase 2, operations
- model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param},
- {scratch_buffer, output_state_out, cell_state_out, output});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {1, 2, 0};
- return ignore.find(i) != ignore.end();
-}
-// Generated file (from: lstm.mod.py). Do not edit
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {2.0f, 3.0f}}, {1, {-0.45018822f, -0.02338299f, -0.0870589f, -0.34550029f, 0.04266912f, -0.15680569f, -0.34856534f, 0.43890524f}}, {2, {0.09701663f, 0.20334584f, -0.50592935f, -0.31343272f, -0.40032279f, 0.44781327f, 0.01387155f, -0.35593212f}}, {3, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, -0.20583314f, 0.44344562f, 0.22077113f, -0.29909778f}}, {4, {-0.25065863f, -0.28290087f, 0.04613829f, 0.40525138f, 0.44272184f, 0.03897077f, -0.1556896f, 0.19487578f}}, {5, {-0.0063535f, -0.2042388f, 0.31454784f, -0.35746509f, 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f, -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f, 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f}}, {6, {-0.48684245f, -0.06655136f, 0.42224967f, 0.2112639f, 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f, 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f, 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f}}, {7, {-0.3407414f, 0.24443203f, -0.2078532f, 0.26320225f, 0.05695659f, -0.00123841f, -0.4744786f, -0.35869038f, -0.06418842f, -0.13502428f, -0.501764f, 0.22830659f, -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f}}, {8, {0.43385774f, -0.17194885f, 0.2718237f, 0.09215671f, 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f, 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f, -0.51818722f, -0.15390486f, 0.0468148f, 0.39922136f}}, {9, {}}, {10, {}}, {11, {}}, {12, {0.0f, 0.0f, 0.0f, 0.0f}}, {13, {1.0f, 1.0f, 1.0f, 1.0f}}, {14, {0.0f, 0.0f, 0.0f, 0.0f}}, {15, {0.0f, 0.0f, 0.0f, 0.0f}}, {16, {}}, {17, {}}, {18, {0, 0, 0, 0}}, {19, {0, 0, 0, 0}}, {21, {0.0f}}, {22, {0.0f}}},
- // int -> INT32 map
- {{20, {4}}},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{1, {0, 0, 0, 0}}, {2, {0, 0, 0, 0}}, {3, {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f}}, {0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
diff --git a/externals/nnapi_test_generator/tests/P_quantized_avgpool/averpoolfloat.mod.py b/externals/nnapi_test_generator/tests/P_quantized_avgpool/averpoolfloat.mod.py
deleted file mode 100644
index 17d6e0a4f..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_avgpool/averpoolfloat.mod.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "0.0f, 127.5f, {1, 2, 2, 1}")
-cons1 = Int32Scalar("cons1", 1)
-act = Int32Scalar("act", 0)
-o = Output("op3", "TENSOR_QUANT8_ASYMM", "0.0f, 127.5f, {1, 2, 2, 1}")
-model = model.Operation("AVERAGE_POOL", i1, cons1, cons1, cons1, cons1, cons1, act).To(o)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4]}
-
-output0 = {o: # output 0
- [1, 2, 3, 4]}
-
-# Instantiate an example
-Example((input0, output0))
-
-
-
diff --git a/externals/nnapi_test_generator/tests/P_quantized_avgpool/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_quantized_avgpool/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_avgpool/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_quantized_avgpool/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_quantized_avgpool/stdout.txt.expect
deleted file mode 100644
index b4632d34d..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_avgpool/stdout.txt.expect
+++ /dev/null
@@ -1,48 +0,0 @@
-// Generated file (from: averpoolfloat.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type1(Type::INT32, {});
- OperandType type0(Type::TENSOR_QUANT8_ASYMM, 0.0f, 127.5f, {1, 2, 2, 1});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto cons1 = model->addOperand(&type1);
- auto act = model->addOperand(&type1);
- auto op3 = model->addOperand(&type0);
- // Phase 2, operations
- static int32_t cons1_init[] = {1};
- model->setOperandValue(cons1, cons1_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_AVERAGE_POOL, {op1, cons1, cons1, cons1, cons1, cons1, act}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-// Generated file (from: averpoolfloat.mod.py). Do not edit
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {1, 2, 3, 4}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {1, 2, 3, 4}}}
-}
-}, // End of an example
diff --git a/externals/nnapi_test_generator/tests/P_quantized_conv/quantized.mod.py b/externals/nnapi_test_generator/tests/P_quantized_conv/quantized.mod.py
deleted file mode 100644
index 7ef623513..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_conv/quantized.mod.py
+++ /dev/null
@@ -1,11 +0,0 @@
-i4 = Int32Scalar("b4", 2)
-i5 = Int32Scalar("b5", 2)
-i6 = Int32Scalar("b6", 2)
-i7 = Int32Scalar("b7", 0)
-i2 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}") # input 0
-i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 1}") # output 0
-i0 = Parameter("op0", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}", [1, 1, 1, 1]) # parameters
-i1 = Parameter("op1", "TENSOR_INT32", "{1}", [0]) # parameters
-model = Model()
-model = model.Conv(i2, i0, i1, i4, i5, i6, i7).To(i3)
-
diff --git a/externals/nnapi_test_generator/tests/P_quantized_conv/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_quantized_conv/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_conv/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_quantized_conv/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_quantized_conv/stdout.txt.expect
deleted file mode 100644
index 6b28bdd54..000000000
--- a/externals/nnapi_test_generator/tests/P_quantized_conv/stdout.txt.expect
+++ /dev/null
@@ -1,40 +0,0 @@
-// Generated file (from: quantized.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type0(Type::INT32, {});
- OperandType type3(Type::TENSOR_INT32, {1});
- OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1});
- OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1});
- // Phase 1, operands
- auto b4 = model->addOperand(&type0);
- auto b5 = model->addOperand(&type0);
- auto b6 = model->addOperand(&type0);
- auto b7 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto op0 = model->addOperand(&type1);
- auto op1 = model->addOperand(&type3);
- // Phase 2, operations
- static int32_t b4_init[] = {2};
- model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
- static int32_t b5_init[] = {2};
- model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
- static int32_t b6_init[] = {2};
- model->setOperandValue(b6, b6_init, sizeof(int32_t) * 1);
- static int32_t b7_init[] = {0};
- model->setOperandValue(b7, b7_init, sizeof(int32_t) * 1);
- static uint8_t op0_init[] = {1, 1, 1, 1};
- model->setOperandValue(op0, op0_init, sizeof(uint8_t) * 4);
- static int32_t op1_init[] = {0};
- model->setOperandValue(op1, op1_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op2},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_vts_full/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_vts_full/stderr.txt.expect
deleted file mode 100644
index 3decb4c1c..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_full/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output VTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_vts_full/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_vts_full/stdout.txt.expect
deleted file mode 100644
index 14cd4f99d..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_full/stdout.txt.expect
+++ /dev/null
@@ -1,93 +0,0 @@
-// Generated code. Do not edit
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 16},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::ADD,
- .inputs = {0, 2, 1},
- .outputs = {3},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {3};
- std::vector<uint8_t> operandValues = {
- 0, 0, 0, 0, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-// Generated file (from: vts_full.mod.py). Do not edit
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 8.0f, 10.0f, 12.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
diff --git a/externals/nnapi_test_generator/tests/P_vts_full/vts_full.mod.py b/externals/nnapi_test_generator/tests/P_vts_full/vts_full.mod.py
deleted file mode 100644
index 4ad3b2e4b..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_full/vts_full.mod.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Force VTS mode
-Configuration.vts = True
-# model
-model = Model()
-i0 = Input("operand0","TENSOR_FLOAT32", "{1, 2, 2, 1}")
-b0 = Int32Scalar("b0", 0)
-p0 = Parameter("p0", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [5.0, 6.0, 7.0, 8.0])
-o = Output("out","TENSOR_FLOAT32", "{1, 2, 2, 1}")
-
-model.Operation("ADD", i0, p0, b0).To(o)
-
-input0 = {i0: # input 0
- [1.0, 2.0, 3.0, 4.0]}
-
-output0 = {o: # output 0
- [6.0, 8.0, 10.0, 12.0]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/externals/nnapi_test_generator/tests/P_vts_operands/addfloat.mod.py b/externals/nnapi_test_generator/tests/P_vts_operands/addfloat.mod.py
deleted file mode 100644
index 976cb35ec..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_operands/addfloat.mod.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Force VTS mode
-Configuration.vts = True
-# model
-i1 = Input("operand1","TENSOR_FLOAT32", "{3,4}")
-i2 = Input("operand2","TENSOR_FLOAT32", "{3,4}")
-i3 = Input("operand3","TENSOR_FLOAT32", "{3,4}")
-Parameter("p1", "TENSOR_QUANT8_ASYMM", "{1, 2, 3}", [1, 2, 3, 4, 5, 6])
-Parameter("p2", "TENSOR_FLOAT32", "{}", [42.0])
-o = Output("operand4","TENSOR_FLOAT32", "{3,4}")
-
-Model().Add(i1, i2).Add(i3).Out(o)
-
diff --git a/externals/nnapi_test_generator/tests/P_vts_operands/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_vts_operands/stderr.txt.expect
deleted file mode 100644
index 3decb4c1c..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_operands/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output VTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_vts_operands/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_vts_operands/stdout.txt.expect
deleted file mode 100644
index 2e74d1fc0..000000000
--- a/externals/nnapi_test_generator/tests/P_vts_operands/stdout.txt.expect
+++ /dev/null
@@ -1,103 +0,0 @@
-// Generated code. Do not edit
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {3,4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {3,4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {3,4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 3},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 6},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 6, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {3,4},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {3,4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::ADD,
- .inputs = {0, 1},
- .outputs = {6},
- },
- {
- .type = OperationType::ADD,
- .inputs = {2, 6},
- .outputs = {5},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1, 2};
- const std::vector<uint32_t> outputIndexes = {5};
- std::vector<uint8_t> operandValues = {
- 1, 2, 3, 4, 5, 6, 0, 0, 40, 66
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
diff --git a/externals/nnapi_test_generator/tests/P_weird/stderr.txt.expect b/externals/nnapi_test_generator/tests/P_weird/stderr.txt.expect
deleted file mode 100644
index c5a6e36b9..000000000
--- a/externals/nnapi_test_generator/tests/P_weird/stderr.txt.expect
+++ /dev/null
@@ -1,2 +0,0 @@
-Output CTS model: -
-Output example:-
diff --git a/externals/nnapi_test_generator/tests/P_weird/stdout.txt.expect b/externals/nnapi_test_generator/tests/P_weird/stdout.txt.expect
deleted file mode 100644
index fa67d68ac..000000000
--- a/externals/nnapi_test_generator/tests/P_weird/stdout.txt.expect
+++ /dev/null
@@ -1,51 +0,0 @@
-// Generated file (from: weird_add.mod.py). Do not edit
-void CreateModel(Model *model) {
- OperandType type1(Type::INT32, {});
- OperandType type0(Type::TENSOR_FLOAT32, {2});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type0);
- auto b0 = model->addOperand(&type1);
- auto tmp = model->addOperand(&type0);
- auto tmp2 = model->addOperand(&type0);
- auto op3 = model->addOperand(&type0);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static int32_t b0_init[] = {0};
- model->setOperandValue(b0, b0_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_ADD, {op1, op2, b0}, {tmp});
- model->addOperation(ANEURALNETWORKS_ADD, {tmp, op2, b0}, {tmp2});
- model->addOperation(ANEURALNETWORKS_ADD, {tmp2, op4, b0}, {op3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2, op4},
- {op3});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-// Generated file (from: weird_add.mod.py). Do not edit
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f}}, {1, {3.0f, 4.0f}}, {2, {5.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {9.0f, 12.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
diff --git a/externals/nnapi_test_generator/tests/P_weird/weird_add.mod.py b/externals/nnapi_test_generator/tests/P_weird/weird_add.mod.py
deleted file mode 100644
index a230267a4..000000000
--- a/externals/nnapi_test_generator/tests/P_weird/weird_add.mod.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{2}") # a vector of 2 float32s
-i2 = Input("op2", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
-b0 = Int32Scalar("b0", 0) # an int32_t scalar bias
-tmp = Internal("tmp", "TENSOR_FLOAT32", "{2}")
-tmp2 = Internal("tmp2", "TENSOR_FLOAT32", "{2}")
-o3 = Output("op3", "TENSOR_FLOAT32", "{2}")
-i4 = Input("op4", "TENSOR_FLOAT32", "{2}") # another vector of 2 float32s
-model = model.Operation("ADD", i1, i2, b0).To(tmp)
-model = model.Operation("ADD", tmp, i2, b0).To(tmp2)
-model = model.Operation("ADD", tmp2, i4, b0).To(o3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 2.0],
- i2: # input 1
- [3.0, 4.0],
- i4: # input 4
- [5.0, 6.0]}
-
-output0 = {o3: # output 0
- [9.0, 12.0]}
-
-# Instantiate an example
-Example((input0, output0))
-
-
-
diff --git a/externals/nnapi_test_generator/tests/test.py b/externals/nnapi_test_generator/tests/test.py
deleted file mode 100755
index c987cf680..000000000
--- a/externals/nnapi_test_generator/tests/test.py
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""NN Model Test Compiler Test.
-
-Runs subdirectories of tests for the test generator/compiler.
-"""
-
-import filecmp
-import glob
-import os
-import re
-import shutil
-import subprocess
-import sys
-import unittest
-
-
-__author__ = 'Android'
-
-
-DOTTED_LINE = '................'
-
-class OrigFile:
- OrigDir = None
-
-class TestGeneratorTests(unittest.TestCase):
- """Class to contain all the unittest test cases.
-
- Tests will be dynamically added to this class as methods.
- No static tests, so this class is initially empty.
-
- """
- pass
-
-
-def GenerateTests(dir_name):
- """Creates a test method that can be added as method to GenerateTests."""
- cwd = os.getcwd()
- def Test(self):
- os.chdir(cwd)
- ExecTest(dir_name, self)
- return Test
-
-
-def AddUnitTests(test_dirs):
- """Adds a test to Tests for each directory in test_dirs."""
-
- for t in test_dirs:
- # Must start with 'test_' according to unittest
- test_name = 'test_%s' % t
- test = GenerateTests(t)
- # Add test as method to TestGeneratorTests with test_name as method name
- setattr(TestGeneratorTests, test_name, test)
-
-
-class Options(object):
- verbose = 0
- cleanup = 1
- update_cts = 0
- zero_return = 0
-
-
-def CompareFiles(actual, expect):
- """Compares actual and expect for equality."""
- if not os.path.isfile(actual):
- if Options.verbose:
- print ('Could not find %s' % actual)
- return False
- if not os.path.isfile(expect):
- if Options.verbose:
- print ('Could not find %s' % expect)
- return False
-
- return filecmp.cmp(actual, expect, False)
-
-
-def CopyIfDifferent(src, dst):
- """Updates dst if it is different from src."""
- if not CompareFiles(src, dst):
- if Options.verbose:
- print ('Copying from %s to %s' % (src, dst))
- shutil.copyfile(src, dst)
-
-
-def GetCommandLineArgs(filename):
- """Extracts command line arguments from first comment line in a file."""
- f = open(filename, 'r')
- line = f.readline()
- f.close()
- if line[0] == '/' and line[1] == '/':
- return line[2:].strip()
- else:
- return ''
-
-
-def ReadFileToStr(filename):
- """Returns contents of file as a str."""
- with open(filename, 'r') as f:
- return f.read()
-
-
-def ReportIfDifferFromExpected(tests, name, file1, file2):
- """Fails tests if file1 and file2 differ."""
- if not CompareFiles(file1, file2):
- if Options.verbose:
- err_message = ('%s is different:\n'
- 'expected:\n%s\n%s%s\n\n'
- 'actual:\n%s\n%s%s\n') % (
- name,
- DOTTED_LINE, ReadFileToStr(file1), DOTTED_LINE,
- DOTTED_LINE, ReadFileToStr(file2), DOTTED_LINE)
- else:
- err_message = '%s is different' % name
- tests.fail(err_message)
-
-
-def GetRSFiles():
- """Returns a list of files in cwd with extension '.rs' or '.fs'."""
- rs_files = glob.glob('*.mod.py')
- rs_files.sort()
- return rs_files
-
-
-def GetOutDir():
- return os.path.abspath(os.path.join(OrigFile.OrigDir, "../"))
-
-
-# Declare/define cache variable for GetOutDir to cache results
-# This way we only need to call subprocesses once to get the directory
-GetOutDir.cache = None
-
-
-def CreateCmd(run_vts):
- """Creates the test command to run for the current test."""
- cmd_string = ('%s/%s_generator.py'
- ) % (GetOutDir(), "test" if not run_vts else "vts")
- base_args = cmd_string.split()
- rs_files = GetRSFiles()
-
- # Extra command line arguments can be placed as // comments at the start of
- # any .rs file. We automatically bundle up all of these extra args and invoke
- # llvm-rs-cc with them.
- extra_args_str = ''
- for rs_file in rs_files:
- extra_args_str += GetCommandLineArgs(rs_file)
- extra_args = extra_args_str.split()
-
- args = base_args + extra_args + rs_files
- return args
-
-def Cleanup():
- """Cleans up the cwd of any tmp files created in current test."""
- try:
- os.remove('stdout.txt')
- os.remove('stderr.txt')
- shutil.rmtree('tmp/')
- except OSError:
- pass
-
-
-def CheckTestResult(dir_name, subprocess_ret, tests, args):
- """Checks the result of the subprocess command to see if it passed/failed.
-
- If dir_name starts with 'F_', then subprocess is expected to fail.
- If it instead succeeded, then this test is failed.
- Vice versa with a dir_name starting with 'P_'.
-
- Args:
- dir_name: name of current directory/test name
- subprocess_ret: return code of subprocess
- tests: unittest, call tests.fail(reason) when failure
- args: the arguments for the command that was run
- """
- if dir_name[0:2] == 'F_':
- if subprocess_ret == 0:
- if Options.verbose:
- err_message = ('Command (%s) passed on invalid input\n'
- 'stdout:\n%s\n%s%s\n') % (
- ' '.join(args),
- DOTTED_LINE, ReadFileToStr('stdout.txt'), DOTTED_LINE
- )
- else:
- err_message = 'Command passed on invalid input'
- tests.fail(err_message)
- elif dir_name[0:2] == 'P_':
- if subprocess_ret != 0:
- if Options.verbose:
- err_message = ('Command (%s) failed on valid input\n'
- 'stderr:\n%s\n%s%s\n') % (
- ' '.join(args),
- DOTTED_LINE, ReadFileToStr('stderr.txt'), DOTTED_LINE
- )
- else:
- err_message = 'Command failed on valid input'
- tests.fail(err_message)
- else:
- tests.fail('Invalid test name: ' + dir_name +
- ', should start with F_ or P_')
-
-
-
-def ExecTest(dir_name, tests):
- """Executes an test generator test from dir_name."""
-
- os.chdir(dir_name)
- stdout_file = open('stdout.txt', 'w+')
- stderr_file = open('stderr.txt', 'w+')
- run_vts = (dir_name[2:5] == 'vts')
- args = CreateCmd(run_vts)
-
- if Options.verbose > 1:
- print ('Executing:', ' '.join(args))
-
- # Execute the command and check the resulting shell return value.
- # All tests that are expected to FAIL have directory names that
- # start with 'F_'. Other tests that are expected to PASS have
- # directory names that start with 'P_'.
- ret = 0
- try:
- ret = subprocess.call(args, stdout=stdout_file, stderr=stderr_file)
- except OSError:
- tests.fail('subprocess.call failed: ' + ' '.join(args))
-
- stdout_file.close()
- stderr_file.close()
-
- CheckTestResult(dir_name, ret, tests, args)
-
- ReportIfDifferFromExpected(tests, 'stdout', 'stdout.txt.expect', 'stdout.txt')
- ReportIfDifferFromExpected(tests, 'stderr', 'stderr.txt.expect', 'stderr.txt')
-
- if Options.cleanup:
- Cleanup()
-
-
-def Usage():
- """Print out usage information."""
- print ('Usage: %s [OPTION]... [TESTNAME]...'
- 'Renderscript Compiler Test Harness\n'
- 'Runs TESTNAMEs (all tests by default)\n'
- 'Available Options:\n'
- ' -h, --help Help message\n'
- ' -n, --no-cleanup Don\'t clean up after running tests\n'
- ' -v, --verbose Verbose output. Enter multiple -v to get more verbose.\n'
- ' -z, --zero-return Return 0 as exit code no matter if tests fail. Required for TreeHugger.\n'
- ) % (sys.argv[0]),
- return
-
-
-def main():
- """Runs the unittest suite.
-
- Parses command line arguments, adds test directories as tests.
-
- Returns:
- 0 if '-z' flag is set.
- Else unittest.main() returns with its own error code.
- """
-
- OrigFile.OrigDir = os.path.dirname(os.path.abspath(__file__))
- # Chdir to the directory this file is in since tests are in this directory
- os.chdir(OrigFile.OrigDir)
- files = []
- for arg in sys.argv[1:]:
- if arg in ('-h', '--help'):
- Usage()
- return 0
- elif arg in ('-n', '--no-cleanup'):
- Options.cleanup = 0
- elif arg in ('-u', '--update-cts'):
- Options.update_cts = 1
- elif arg in ('-v', '--verbose'):
- Options.verbose += 1
- elif arg in ('-z', '--zero-return'):
- Options.zero_return = 1
- else:
- # Test list to run
- if os.path.isdir(arg):
- files.append(arg)
- else:
- print >> sys.stderr, 'Invalid test or option: %s' % arg
- return 1
-
- if not files:
- file_names = os.listdir('.')
- # Test names must start with 'F_' or 'P_'
- # 'F_' tests are expected to fail
- # 'P_' tests are expected to pass
- for f in file_names:
- if os.path.isdir(f) and (f[0:2] == 'F_' or f[0:2] == 'P_'):
- files.append(f)
- files.sort()
-
- AddUnitTests(files)
-
- # verbosity=2 is necessary for PythonUnitTestRunner to parse the results
- # Otherwise verbosity does not matter
- # If Options.zero_return is set, do not let unittest.main() exit
- # This is necessary in TreeHugger to distinguish between failing tests and
- # failing to execute the python script
- # If Options.zero_return is not set, let unittest.main() exit
- # In this case it will return a non-zero code if any tests fail
- unittest_exit = Options.zero_return == 0
- unittest.main(verbosity=2,
- argv=[sys.argv[0]] + ['TestGeneratorTests'],
- exit=unittest_exit)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
-
diff --git a/externals/nnapi_test_generator/vts_generator.py b/externals/nnapi_test_generator/vts_generator.py
deleted file mode 100755
index ab34e2bda..000000000
--- a/externals/nnapi_test_generator/vts_generator.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""VTS testcase generator
-
-Implements VTS test backend. Shares most logic with the CTS test
-generator. Invoked by ml/nn/runtime/test/specs/generate_vts_tests.sh;
-See that script for details on how this script is used.
-
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import argparse
-from functools import reduce
-import math
-import os
-import struct
-import sys
-import contextlib
-import test_generator
-import pprint
-# Stuff from test generator
-from test_generator import Configuration
-from test_generator import Example
-from test_generator import Float32Scalar
-from test_generator import IgnoredOutput
-from test_generator import Input
-from test_generator import Int32Scalar
-from test_generator import Internal
-from test_generator import Model
-from test_generator import Operand
-from test_generator import Output
-from test_generator import Parameter
-from test_generator import smart_open
-
-# Take a model from command line
-def import_source():
- parser = argparse.ArgumentParser()
- parser.add_argument("spec", help="the spec file")
- parser.add_argument(
- "-m", "--model", help="the output model file", default="-")
- parser.add_argument(
- "-e", "--example", help="the output example file", default="-")
- args = parser.parse_args()
-
- if os.path.exists(args.spec):
- test_generator.FileNames.SpecFile = os.path.basename(args.spec)
- exec (open(args.spec).read())
-
- return (args.model, args.example)
-
-# Generate operands in VTS format
-def generate_vts_operands():
- # Dump operand definitions
- op_def = """\
- {{
- .type = OperandType::{operand_type},
- .dimensions = {shape},
- .numberOfConsumers = {no_consumers},
- .scale = {scale},
- .zeroPoint = {zero_point},
- .lifetime = OperandLifeTime::{lifetime},
- .location = {{.poolIndex = 0, .offset = {offset}, .length = {length}}},
- }}"""
- offset = 0
- op_definitions = []
- for o in Operand.operands.objects():
- ty = o.type
- no_consumers = len(o.outs) if o.traversable() else 0
- lifetime = o.lifetime()
- length = ty.get_size() if o.is_weight() else 0
- real_shape, scale, zero_point = ty.get_parsed_shape()
- scale = float(scale)
- zero_point = int(zero_point)
- op = {
- "operand_type": ty.get_element_type(),
- "shape": "{%s}" % real_shape,
- "no_consumers": no_consumers,
- "scale": test_generator.pretty_print_as_float(scale),
- "zero_point": str(int(zero_point)),
- "lifetime": lifetime,
- "offset": offset if o.is_weight() else 0,
- "length": length
- }
- offset += length
- op_definitions.append(op_def.format(**op))
-
- op_vec = """\
- const std::vector<Operand> operands = {{
-{0}
- }};""".format(",\n".join(op_definitions))
- return op_vec
-
-# Generate VTS operand values
-def generate_vts_operand_values():
- weights = [o for o in Operand.operands.objects() if o.is_weight()]
- binit = []
- for w in weights:
- ty = w.type.get_element_type()
- if ty == "TENSOR_QUANT8_ASYMM":
- binit += w.initializer
- elif ty in {"TENSOR_FLOAT32", "FLOAT32", "TENSOR_INT32", "INT32"}:
- fmt = "f" if (ty == "TENSOR_FLOAT32" or ty == "FLOAT32") else "i"
- for f in w.initializer:
- binit += [int(x) for x in struct.pack(fmt, f)]
- else:
- assert 0 and "Unsupported VTS operand type"
-
- init_defs = ", ".join([str(x) for x in binit])
- if (init_defs != ""):
- init_defs = "\n %s\n " % init_defs
- byte_vec_fmt = """{%s}""" % init_defs
- return byte_vec_fmt
-
-# Generate VTS operations
-class VTSOps(object):
- vts_ops = []
- def generate_vts_operation(op):
- try:
- opcode =op.optype
- except AttributeError: # not an op, but things like weights
- return
- op_fmt = """\
- {{
- .type = OperationType::{op_code},
- .inputs = {{{ins}}},
- .outputs = {{{outs}}},
- }}"""
- op_content = {
- 'op_code': op.optype,
- 'op_type': op.type.get_element_type(),
- 'ins': ", ".join([str(x.ID()) for x in op.ins]),
- 'outs': ", ".join([str(x.ID()) for x in op.outs]),
- }
- VTSOps.vts_ops.append(op_fmt.format(**op_content))
- return True
-
-def generate_vts_operations(model_file):
- test_generator.TopologicalSort(lambda x: VTSOps.generate_vts_operation(x))
- return ",\n".join(VTSOps.vts_ops)
-
-
-def generate_vts_model(model_file):
- operand_values_fmt = ""
- if Configuration.useSHM():
- # Boilerplate code for passing weights in shared memory
- operand_values_fmt = """\
- std::vector<uint8_t> operandValues = {{}};
- const uint8_t data[] = {operand_values};
-
- // Allocate segment of android shared memory, wrapped in hidl_memory.
- // This object will be automatically freed when sharedMemory is destroyed.
- hidl_memory sharedMemory = allocateSharedMemory(sizeof(data));
-
- // Mmap ashmem into usable address and hold it within the mappedMemory object.
- // MappedMemory will automatically munmap the memory when it is destroyed.
- sp<IMemory> mappedMemory = mapMemory(sharedMemory);
-
- if (mappedMemory != nullptr) {{
- // Retrieve the mmapped pointer.
- uint8_t* mappedPointer =
- static_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
-
- if (mappedPointer != nullptr) {{
- // Acquire the write lock for the shared memory segment, upload the data,
- // and release the lock.
- mappedMemory->update();
- std::copy(data, data + sizeof(data), mappedPointer);
- mappedMemory->commit();
- }}
- }}
-
- const std::vector<hidl_memory> pools = {{sharedMemory}};
-"""
- else:
- # Passing weights via operandValues
- operand_values_fmt = """\
- std::vector<uint8_t> operandValues = {operand_values};
- const std::vector<hidl_memory> pools = {{}};
-"""
-
- operand_values_val = {
- 'operand_values': generate_vts_operand_values()
- }
- operand_values = operand_values_fmt.format(**operand_values_val)
- # operand_values = operand_values_fmt
- model_fmt = """\
-// Generated code. Do not edit
-// Create the model
-Model createTestModel() {{
-{operand_decls}
-
- const std::vector<Operation> operations = {{
-{operations}
- }};
-
- const std::vector<uint32_t> inputIndexes = {{{input_indices}}};
- const std::vector<uint32_t> outputIndexes = {{{output_indices}}};
-{operand_values}
- return {{
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,{relaxed_field}
- }};
-}}
-"""
- model = {
- "operations": generate_vts_operations(sys.stdout),
- "operand_decls": generate_vts_operands(),
- "operand_values": operand_values,
- "output_indices": ", ".join([str(i.ID()) for i in Output.get_outputs()]),
- "input_indices": ", ".join([str(i.ID()) for i in Input.get_inputs(True)]),
- "relaxed_field":
- "\n .relaxComputationFloat32toFloat16 = true," if (Model.isRelaxed()) else ""
- }
- print(model_fmt.format(**model), file = model_file)
-
-def generate_vts(model_file):
- generate_vts_model(model_file)
- print (IgnoredOutput.gen_ignored(), file=model_file)
-
-if __name__ == "__main__":
- (model, example) = import_source()
- print("Output VTS model: %s" % model, file=sys.stderr)
- print("Output example:" + example, file=sys.stderr)
-
- with smart_open(model) as model_file:
- generate_vts(model_file)
- with smart_open(example) as example_file:
- Example.dump(example_file)