summaryrefslogtreecommitdiff
path: root/tools/tflite_benchmark_model
diff options
context:
space:
mode:
Diffstat (limited to 'tools/tflite_benchmark_model')
-rw-r--r--tools/tflite_benchmark_model/.FORMATDENY0
-rw-r--r--tools/tflite_benchmark_model/CMakeLists.txt6
-rw-r--r--tools/tflite_benchmark_model/README.md209
-rw-r--r--tools/tflite_benchmark_model/benchmark_main.cc53
-rw-r--r--tools/tflite_benchmark_model/benchmark_model.cc175
-rw-r--r--tools/tflite_benchmark_model/benchmark_model.h177
-rw-r--r--tools/tflite_benchmark_model/benchmark_params.cc73
-rw-r--r--tools/tflite_benchmark_model/benchmark_params.h118
-rw-r--r--tools/tflite_benchmark_model/benchmark_tflite_model.cc360
-rw-r--r--tools/tflite_benchmark_model/benchmark_tflite_model.h95
-rw-r--r--tools/tflite_benchmark_model/command_line_flags.cc214
-rw-r--r--tools/tflite_benchmark_model/command_line_flags.h141
-rw-r--r--tools/tflite_benchmark_model/logging.h92
-rw-r--r--tools/tflite_benchmark_model/profile_summarizer.cc164
-rw-r--r--tools/tflite_benchmark_model/profile_summarizer.h55
15 files changed, 0 insertions, 1932 deletions
diff --git a/tools/tflite_benchmark_model/.FORMATDENY b/tools/tflite_benchmark_model/.FORMATDENY
deleted file mode 100644
index e69de29bb..000000000
--- a/tools/tflite_benchmark_model/.FORMATDENY
+++ /dev/null
diff --git a/tools/tflite_benchmark_model/CMakeLists.txt b/tools/tflite_benchmark_model/CMakeLists.txt
deleted file mode 100644
index d52690460..000000000
--- a/tools/tflite_benchmark_model/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-file(GLOB_RECURSE SOURCES "*.cc")
-
-add_executable(tflite_benchmark_model ${SOURCES})
-target_compile_definitions(tflite_benchmark_model PUBLIC "TFLITE_PROFILING_ENABLED")
-target_link_libraries(tflite_benchmark_model tensorflow-lite ${LIB_PTHREAD} dl nnfw_util nnfw_support_tflite)
-install(TARGETS tflite_benchmark_model DESTINATION bin)
diff --git a/tools/tflite_benchmark_model/README.md b/tools/tflite_benchmark_model/README.md
deleted file mode 100644
index 93769305b..000000000
--- a/tools/tflite_benchmark_model/README.md
+++ /dev/null
@@ -1,209 +0,0 @@
-# TFLite Model Benchmark Tool
-
-## Description
-
-A simple C++ binary to benchmark a TFLite model and its individual operators,
-both on desktop machines and on Android. The binary takes a TFLite model,
-generates random inputs and then repeatedly runs the model for specified number
-of runs. Aggregrate latency statistics are reported after running the benchmark.
-
-The instructions below are for running the binary on Desktop and Android,
-for iOS please use the
-[iOS benchmark app] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios).
-
-## Parameters
-
-The binary takes the following required parameters:
-
-* `graph`: `string` \
- The path to the TFLite model file.
-* `input_layer`: `string` \
- The name of the input layer, this is typically the first layer of the model.
-* `input_layer_shape`: `string` \
- The shape of the input layer. This is a comma separated string of the shape
- of tensor of input layer.
-
-and the following optional parameters:
-
-* `num_threads`: `int` (default=1) \
- The number of threads to use for running TFLite interpreter.
-* `warmup_runs`: `int` (default=1) \
- The number of warmup runs to do before starting the benchmark.
-* `run_delay`: `float` (default=-1.0) \
- The delay in seconds between subsequent benchmark runs. Non-positive values
- mean use no delay.
-* `use_nnapi`: `bool` (default=false) \
- Whether to use [Android NNAPI] (https://developer.android.com/ndk/guides/neuralnetworks/).
- This API is available on recent Android devices.
-
-## To build/install/run
-
-### On Android:
-
-(0) Refer to https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android to edit the `WORKSPACE` to configure the android NDK/SDK.
-
-(1) Build for your specific platform, e.g.:
-
-```
-bazel build -c opt \
- --config=android_arm \
- --cxxopt='--std=c++11' \
- tensorflow/contrib/lite/tools/benchmark:benchmark_model
-```
-
-(2) Connect your phone. Push the binary to your phone with adb push
- (make the directory if required):
-
-```
-adb push bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model /data/local/tmp
-```
-
-(3) Make the binary executable.
-
-```
-adb shell chmod +x /data/local/tmp/benchmark_model
-```
-
-(4) Push the compute graph that you need to test. For example:
-
-```
-adb push mobilenet_quant_v1_224.tflite /data/local/tmp
-```
-
-(5) Run the benchmark. For example:
-
-```
-adb shell /data/local/tmp/benchmark_model \
- --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
- --input_layer="input" \
- --input_layer_shape="1,224,224,3" \
- --num_threads=4
-```
-
-### On desktop:
-(1) build the binary
-
-```
-bazel build -c opt tensorflow/contrib/lite/tools/benchmark:benchmark_model
-```
-
-(2) Run on your compute graph, similar to the Android case but without the need of adb shell.
-For example:
-
-```
-bazel-bin/tensorflow/contrib/lite/tools/benchmark/benchmark_model \
- --graph=mobilenet_quant_v1_224.tflite \
- --input_layer="Placeholder" \
- --input_layer_shape="1,224,224,3" \
- --num_threads=4
-```
-
-The MobileNet graph used as an example here may be downloaded from
-https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip
-
-
-## Reducing variance between runs on Android.
-
-Most modern Android phones use [ARM big.LITTLE](https://en.wikipedia.org/wiki/ARM_big.LITTLE)
-architecture where some cores are more power hungry but faster than other cores.
-When running benchmarks on these phones there can be significant variance
-between different runs of the benchmark. One way to reduce variance between runs
-is to set the [CPU affinity](https://en.wikipedia.org/wiki/Processor_affinity)
-before running the benchmark. On Android this can be done using the `taskset`
-command.
-E.g. for running the benchmark on big cores on Pixel 2 with a single thread one
-can use the following command:
-
-```
-adb shell tasket f0 /data/local/tmp/benchmark_model \
- --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
- --input_layer="input" \
- --input_layer_shape="1,224,224,3" \
- --num_threads=1
-```
-
-where `f0` is the affinity mask for big cores on Pixel 2.
-Note: The affinity mask varies with the device.
-
-## Profiling model operators
-The benchmark model binary also allows you to profile operators and give execution times of each operator. To do this,
-compile the binary with a compiler flag that enables profiling to be compiled in. Pass **--copt=-DTFLITE_PROFILING_ENABLED**
-to compile benchmark with profiling support.
-For example, to compile with profiling support on Android, add this flag to the previous command:
-
-```
-bazel build -c opt \
- --config=android_arm \
- --cxxopt='--std=c++11' \
- --copt=-DTFLITE_PROFILING_ENABLED \
- tensorflow/contrib/lite/tools/benchmark:benchmark_model
-```
-This compiles TFLite with profiling enabled, now you can run the benchmark binary like before. The binary will produce detailed statistics for each operation similar to those shown below:
-
-```
-
-============================== Run Order ==============================
- [node type] [start] [first] [avg ms] [%] [cdf%] [mem KB] [times called] [Name]
- CONV_2D 0.000 4.269 4.269 0.107% 0.107% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_0/Relu6]
- DEPTHWISE_CONV_2D 4.270 2.150 2.150 0.054% 0.161% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_depthwise/Relu6]
- CONV_2D 6.421 6.107 6.107 0.153% 0.314% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]
- DEPTHWISE_CONV_2D 12.528 1.366 1.366 0.034% 0.348% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_2_depthwise/Relu6]
- CONV_2D 13.895 4.195 4.195 0.105% 0.454% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_2_pointwise/Relu6]
- DEPTHWISE_CONV_2D 18.091 1.260 1.260 0.032% 0.485% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_depthwise/Relu6]
- CONV_2D 19.352 6.652 6.652 0.167% 0.652% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]
- DEPTHWISE_CONV_2D 26.005 0.698 0.698 0.018% 0.670% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_4_depthwise/Relu6]
- CONV_2D 26.703 3.344 3.344 0.084% 0.754% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_4_pointwise/Relu6]
- DEPTHWISE_CONV_2D 30.047 0.646 0.646 0.016% 0.770% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_depthwise/Relu6]
- CONV_2D 30.694 5.800 5.800 0.145% 0.915% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]
- DEPTHWISE_CONV_2D 36.495 0.331 0.331 0.008% 0.924% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_6_depthwise/Relu6]
- CONV_2D 36.826 2.838 2.838 0.071% 0.995% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_6_pointwise/Relu6]
- DEPTHWISE_CONV_2D 39.665 0.439 0.439 0.011% 1.006% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_depthwise/Relu6]
- CONV_2D 40.105 5.293 5.293 0.133% 1.139% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]
- DEPTHWISE_CONV_2D 45.399 0.352 0.352 0.009% 1.147% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_depthwise/Relu6]
- CONV_2D 45.752 5.322 5.322 0.133% 1.281% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]
- DEPTHWISE_CONV_2D 51.075 0.357 0.357 0.009% 1.290% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_depthwise/Relu6]
- CONV_2D 51.432 5.693 5.693 0.143% 1.433% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]
- DEPTHWISE_CONV_2D 57.126 0.366 0.366 0.009% 1.442% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_depthwise/Relu6]
- CONV_2D 57.493 5.472 5.472 0.137% 1.579% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]
- DEPTHWISE_CONV_2D 62.966 0.364 0.364 0.009% 1.588% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_depthwise/Relu6]
- CONV_2D 63.330 5.404 5.404 0.136% 1.724% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6]
- DEPTHWISE_CONV_2D 68.735 0.155 0.155 0.004% 1.728% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_12_depthwise/Relu6]
- CONV_2D 68.891 2.970 2.970 0.074% 1.802% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_12_pointwise/Relu6]
- DEPTHWISE_CONV_2D 71.862 0.206 0.206 0.005% 1.807% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_depthwise/Relu6]
- CONV_2D 72.069 5.888 5.888 0.148% 1.955% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]
- AVERAGE_POOL_2D 77.958 0.036 0.036 0.001% 1.956% 0.000 0 [MobilenetV1/Logits/AvgPool_1a/AvgPool]
- CONV_2D 77.994 1.445 1.445 0.036% 1.992% 0.000 0 [MobilenetV1/Logits/Conv2d_1c_1x1/BiasAdd]
- RESHAPE 79.440 0.002 0.002 0.000% 1.992% 0.000 0 [MobilenetV1/Predictions/Reshape]
- SOFTMAX 79.443 0.029 0.029 0.001% 1.993% 0.000 0 [MobilenetV1/Predictions/Softmax]
-
-============================== Top by Computation Time ==============================
- [node type] [start] [first] [avg ms] [%] [cdf%] [mem KB] [times called] [Name]
- CONV_2D 19.352 6.652 6.652 0.167% 0.167% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_3_pointwise/Relu6]
- CONV_2D 6.421 6.107 6.107 0.153% 0.320% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_1_pointwise/Relu6]
- CONV_2D 72.069 5.888 5.888 0.148% 0.468% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_13_pointwise/Relu6]
- CONV_2D 30.694 5.800 5.800 0.145% 0.613% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_5_pointwise/Relu6]
- CONV_2D 51.432 5.693 5.693 0.143% 0.756% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_9_pointwise/Relu6]
- CONV_2D 57.493 5.472 5.472 0.137% 0.893% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_10_pointwise/Relu6]
- CONV_2D 63.330 5.404 5.404 0.136% 1.029% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_11_pointwise/Relu6]
- CONV_2D 45.752 5.322 5.322 0.133% 1.162% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_8_pointwise/Relu6]
- CONV_2D 40.105 5.293 5.293 0.133% 1.295% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_7_pointwise/Relu6]
- CONV_2D 0.000 4.269 4.269 0.107% 1.402% 0.000 0 [MobilenetV1/MobilenetV1/Conv2d_0/Relu6]
-
-Number of nodes executed: 31
-============================== Summary by node type ==============================
- [Node type] [count] [avg ms] [avg %] [cdf %] [mem KB] [times called]
- CONV_2D 15 1.406 89.270% 89.270% 0.000 0
- DEPTHWISE_CONV_2D 13 0.169 10.730% 100.000% 0.000 0
- SOFTMAX 1 0.000 0.000% 100.000% 0.000 0
- RESHAPE 1 0.000 0.000% 100.000% 0.000 0
- AVERAGE_POOL_2D 1 0.000 0.000% 100.000% 0.000 0
-
-Timings (microseconds): count=50 first=79449 curr=81350 min=77385 max=88213 avg=79732 std=1929
-Memory (bytes): count=0
-31 nodes observed
-
-
-Average inference timings in us: Warmup: 83235, Init: 38467, no stats: 79760.9
-```
-
-
diff --git a/tools/tflite_benchmark_model/benchmark_main.cc b/tools/tflite_benchmark_model/benchmark_main.cc
deleted file mode 100644
index 7e4231c48..000000000
--- a/tools/tflite_benchmark_model/benchmark_main.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "benchmark_tflite_model.h"
-#include "logging.h"
-
-namespace nnfw {
-namespace benchmark {
-
-int Main(int argc, char** argv) {
-#ifdef TFLITE_CUSTOM_OPS_HEADER
- TFLITE_LOG(INFO) << "STARTING with custom ops!";
-#else
- TFLITE_LOG(INFO) << "STARTING!";
-#endif
- BenchmarkTfLiteModel benchmark;
- BenchmarkLoggingListener listener;
- benchmark.AddListener(&listener);
- benchmark.Run(argc, argv);
- return 0;
-}
-} // namespace benchmark
-} // namespace nnfw
-
-int main(int argc, char** argv) { return nnfw::benchmark::Main(argc, argv); }
diff --git a/tools/tflite_benchmark_model/benchmark_model.cc b/tools/tflite_benchmark_model/benchmark_model.cc
deleted file mode 100644
index 7869180bf..000000000
--- a/tools/tflite_benchmark_model/benchmark_model.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "benchmark_model.h"
-
-#include <time.h>
-
-#include <iostream>
-#include <sstream>
-
-#include "tensorflow/contrib/lite/profiling/time.h"
-#include "logging.h"
-
-namespace {
-void SleepForSeconds(double sleep_seconds) {
- if (sleep_seconds <= 0.0) {
- return;
- }
- // Convert the run_delay string into a timespec.
- timespec req;
- req.tv_sec = static_cast<time_t>(sleep_seconds);
- req.tv_nsec = (sleep_seconds - req.tv_sec) * 1000000000;
- // If requested, sleep between runs for an arbitrary amount of time.
- // This can be helpful to determine the effect of mobile processor
- // scaling and thermal throttling.
-#ifdef PLATFORM_WINDOWS
- Sleep(sleep_seconds * 1000);
-#else
- nanosleep(&req, nullptr);
-#endif
-}
-
-} // namespace
-
-namespace nnfw {
-namespace benchmark {
-using tensorflow::Stat;
-
-BenchmarkParams BenchmarkModel::DefaultParams() {
- BenchmarkParams params;
- params.AddParam("num_runs", BenchmarkParam::Create<int32_t>(50));
- params.AddParam("run_delay", BenchmarkParam::Create<float>(-1.0f));
- params.AddParam("num_threads", BenchmarkParam::Create<int32_t>(1));
- params.AddParam("benchmark_name", BenchmarkParam::Create<std::string>(""));
- params.AddParam("output_prefix", BenchmarkParam::Create<std::string>(""));
- params.AddParam("warmup_runs", BenchmarkParam::Create<int32_t>(1));
- return params;
-}
-
-BenchmarkModel::BenchmarkModel() : params_(DefaultParams()) {}
-
-void BenchmarkLoggingListener::OnBenchmarkEnd(const BenchmarkResults &results) {
- auto inference_us = results.inference_time_us();
- auto init_us = results.startup_latency_us();
- auto warmup_us = results.warmup_time_us();
- TFLITE_LOG(INFO) << "Average inference timings in us: "
- << "Warmup: " << warmup_us.avg() << ", "
- << "Init: " << init_us << ", "
- << "no stats: " << inference_us.avg();
-}
-
-std::vector<Flag> BenchmarkModel::GetFlags() {
- return {
- CreateFlag<int32_t>("num_runs", &params_, "number of runs"),
- CreateFlag<float>("run_delay", &params_, "delay between runs in seconds"),
- CreateFlag<int32_t>("num_threads", &params_, "number of threads"),
- CreateFlag<std::string>("benchmark_name", &params_, "benchmark name"),
- CreateFlag<std::string>("output_prefix", &params_,
- "benchmark output prefix"),
- CreateFlag<int32_t>("warmup_runs", &params_,
- "how many runs to initialize model"),
- };
-}
-
-void BenchmarkModel::LogFlags() {
- TFLITE_LOG(INFO) << "Num runs: [" << params_.Get<int32_t>("num_runs") << "]";
- TFLITE_LOG(INFO) << "Inter-run delay (seconds): ["
- << params_.Get<float>("run_delay") << "]";
- TFLITE_LOG(INFO) << "Num threads: [" << params_.Get<int32_t>("num_threads")
- << "]";
- TFLITE_LOG(INFO) << "Benchmark name: ["
- << params_.Get<std::string>("benchmark_name") << "]";
- TFLITE_LOG(INFO) << "Output prefix: ["
- << params_.Get<std::string>("output_prefix") << "]";
- TFLITE_LOG(INFO) << "Warmup runs: [" << params_.Get<int32_t>("warmup_runs")
- << "]";
-}
-
-Stat<int64_t> BenchmarkModel::Run(int num_times, RunType run_type) {
- Stat<int64_t> run_stats;
- TFLITE_LOG(INFO) << "Running benchmark for " << num_times << " iterations ";
- for (int run = 0; run < num_times; run++) {
- listeners_.OnSingleRunStart(run_type);
- int64_t start_us = tflite::profiling::time::NowMicros();
- RunImpl();
- int64_t end_us = tflite::profiling::time::NowMicros();
- listeners_.OnSingleRunEnd();
-
- run_stats.UpdateStat(end_us - start_us);
- SleepForSeconds(params_.Get<float>("run_delay"));
- }
-
- std::stringstream stream;
- run_stats.OutputToStream(&stream);
- TFLITE_LOG(INFO) << stream.str() << std::endl;
-
- return run_stats;
-}
-
-void BenchmarkModel::Run(int argc, char **argv) {
- if (!ParseFlags(argc, argv)) {
- return;
- }
-
- LogFlags();
-
- listeners_.OnBenchmarkStart(params_);
- int64_t initialization_start_us = tflite::profiling::time::NowMicros();
- Init();
- int64_t initialization_end_us = tflite::profiling::time::NowMicros();
- int64_t startup_latency_us = initialization_end_us - initialization_start_us;
- TFLITE_LOG(INFO) << "Initialized session in " << startup_latency_us / 1e3
- << "ms";
-
- uint64_t input_bytes = ComputeInputBytes();
- Stat<int64_t> warmup_time_us =
- Run(params_.Get<int32_t>("warmup_runs"), WARMUP);
- Stat<int64_t> inference_time_us =
- Run(params_.Get<int32_t>("num_runs"), REGULAR);
- listeners_.OnBenchmarkEnd(
- {startup_latency_us, input_bytes, warmup_time_us, inference_time_us});
-}
-
-bool BenchmarkModel::ParseFlags(int argc, char **argv) {
- auto flag_list = GetFlags();
- const bool parse_result =
- Flags::Parse(&argc, const_cast<const char **>(argv), flag_list);
- if (!parse_result) {
- std::string usage = Flags::Usage(argv[0], flag_list);
- TFLITE_LOG(ERROR) << usage;
- return false;
- }
- return ValidateFlags();
-}
-
-} // namespace benchmark
-} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_model.h b/tools/tflite_benchmark_model/benchmark_model.h
deleted file mode 100644
index 5645e2910..000000000
--- a/tools/tflite_benchmark_model/benchmark_model.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
-#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
-
-#include <cmath>
-#include <limits>
-#include <ostream>
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "benchmark_params.h"
-#include "command_line_flags.h"
-#include "tensorflow/core/util/stats_calculator.h"
-
-namespace nnfw {
-namespace benchmark {
-
-enum RunType {
- WARMUP,
- REGULAR,
-};
-
-class BenchmarkResults {
- public:
- BenchmarkResults(int64_t startup_latency_us, uint64_t input_bytes,
- tensorflow::Stat<int64_t> warmup_time_us,
- tensorflow::Stat<int64_t> inference_time_us)
- : startup_latency_us_(startup_latency_us),
- input_bytes_(input_bytes),
- warmup_time_us_(warmup_time_us),
- inference_time_us_(inference_time_us) {}
-
- tensorflow::Stat<int64_t> inference_time_us() const {
- return inference_time_us_;
- }
- tensorflow::Stat<int64_t> warmup_time_us() const { return warmup_time_us_; }
- int64_t startup_latency_us() const { return startup_latency_us_; }
- uint64_t input_bytes() const { return input_bytes_; }
- double throughput_MB_per_second() const {
- double bytes_per_sec = (input_bytes_ * inference_time_us_.count() * 1e6) /
- inference_time_us_.sum();
- return bytes_per_sec / (1024.0 * 1024.0);
- }
-
- private:
- int64_t startup_latency_us_;
- uint64_t input_bytes_;
- tensorflow::Stat<int64_t> warmup_time_us_;
- tensorflow::Stat<int64_t> inference_time_us_;
-};
-
-class BenchmarkListener {
- public:
- virtual void OnBenchmarkStart(const BenchmarkParams& params) {}
- virtual void OnSingleRunStart(RunType runType) {}
- virtual void OnSingleRunEnd() {}
- virtual void OnBenchmarkEnd(const BenchmarkResults& results) {}
- virtual ~BenchmarkListener() {}
-};
-
-// A listener that forwards its method calls to a collection of listeners.
-class BenchmarkListeners : public BenchmarkListener {
- public:
- // Added a listener to the listener collection.
- // |listener| is not owned by the instance of |BenchmarkListeners|.
- // |listener| should not be null and should outlast the instance of
- // |BenchmarkListeners|.
- void AddListener(BenchmarkListener* listener) {
- listeners_.push_back(listener);
- }
-
- void OnBenchmarkStart(const BenchmarkParams& params) override {
- for (auto listener : listeners_) {
- listener->OnBenchmarkStart(params);
- }
- }
-
- void OnSingleRunStart(RunType runType) override {
- for (auto listener : listeners_) {
- listener->OnSingleRunStart(runType);
- }
- }
-
- void OnSingleRunEnd() override {
- for (auto listener : listeners_) {
- listener->OnSingleRunEnd();
- }
- }
-
- void OnBenchmarkEnd(const BenchmarkResults& results) override {
- for (auto listener : listeners_) {
- listener->OnBenchmarkEnd(results);
- }
- }
-
- ~BenchmarkListeners() {}
-
- private:
- // Use vector so listeners are invoked in the order they are added.
- std::vector<BenchmarkListener*> listeners_;
-};
-
-// Benchmark listener that just logs the results of benchmark run.
-class BenchmarkLoggingListener : public BenchmarkListener {
- void OnBenchmarkEnd(const BenchmarkResults& results) override;
-};
-
-template <typename T>
-Flag CreateFlag(const char* name, BenchmarkParams* params,
- const std::string& usage) {
- return Flag(name, [params, name](const T& val) { params->Set<T>(name, val); },
- params->Get<T>(name), usage);
-}
-
-// Benchmarks a model.
-//
-// Subclasses need to implement initialization and running of the model.
-// The results can be collected by adding BenchmarkListener(s).
-class BenchmarkModel {
- public:
- static BenchmarkParams DefaultParams();
- BenchmarkModel();
- BenchmarkModel(BenchmarkParams params) : params_(std::move(params)) {}
- virtual ~BenchmarkModel() {}
- bool ParseFlags(int argc, char** argv);
- virtual void Init() = 0;
- void Run(int argc, char** argv);
- void AddListener(BenchmarkListener* listener) {
- listeners_.AddListener(listener);
- }
-
- protected:
- virtual void LogFlags();
- virtual bool ValidateFlags() { return true; }
- virtual std::vector<Flag> GetFlags();
- virtual uint64_t ComputeInputBytes() = 0;
- virtual tensorflow::Stat<int64_t> Run(int num_times, RunType run_type);
- virtual void RunImpl() = 0;
- BenchmarkParams params_;
- BenchmarkListeners listeners_;
-};
-
-} // namespace benchmark
-} // namespace nnfw
-
-#endif //__TFLITE_BENCHMARK_MODEL_BENCHMARK_MODEL_H__
diff --git a/tools/tflite_benchmark_model/benchmark_params.cc b/tools/tflite_benchmark_model/benchmark_params.cc
deleted file mode 100644
index 7b667a442..000000000
--- a/tools/tflite_benchmark_model/benchmark_params.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "benchmark_params.h"
-
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "logging.h"
-
-namespace nnfw {
-namespace benchmark {
-
-void BenchmarkParam::AssertHasSameType(BenchmarkParam::ParamType a,
- BenchmarkParam::ParamType b) {
- TFLITE_BENCHMARK_CHECK(a == b) << "Type mismatch while accessing parameter.";
-}
-
-template <>
-BenchmarkParam::ParamType BenchmarkParam::GetValueType<int32_t>() {
- return BenchmarkParam::ParamType::TYPE_INT32;
-}
-
-template <>
-BenchmarkParam::ParamType BenchmarkParam::GetValueType<bool>() {
- return BenchmarkParam::ParamType::TYPE_BOOL;
-}
-
-template <>
-BenchmarkParam::ParamType BenchmarkParam::GetValueType<float>() {
- return BenchmarkParam::ParamType::TYPE_FLOAT;
-}
-
-template <>
-BenchmarkParam::ParamType BenchmarkParam::GetValueType<std::string>() {
- return BenchmarkParam::ParamType::TYPE_STRING;
-}
-
-void BenchmarkParams::AssertParamExists(const std::string& name) const {
- TFLITE_BENCHMARK_CHECK(HasParam(name)) << name << " was not found.";
-}
-
-} // namespace benchmark
-} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_params.h b/tools/tflite_benchmark_model/benchmark_params.h
deleted file mode 100644
index 1ac3f4af6..000000000
--- a/tools/tflite_benchmark_model/benchmark_params.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
-#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "logging.h"
-
-namespace nnfw {
-namespace benchmark {
-
-template <typename T>
-class TypedBenchmarkParam;
-
-class BenchmarkParam {
- protected:
- enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
-
- public:
- template <typename T>
- static std::unique_ptr<BenchmarkParam> Create(const T& default_value) {
- return std::unique_ptr<BenchmarkParam>(
- new TypedBenchmarkParam<T>(default_value));
- }
-
- template <typename T>
- TypedBenchmarkParam<T>* AsTyped() {
- AssertHasSameType(GetValueType<T>(), type_);
- return static_cast<TypedBenchmarkParam<T>*>(this);
- }
- virtual ~BenchmarkParam() {}
- BenchmarkParam(ParamType type) : type_(type) {}
-
- private:
- static void AssertHasSameType(ParamType a, ParamType b);
- protected:
- template <typename T>
- static ParamType GetValueType();
-
- const ParamType type_;
-};
-
-template <typename T>
-class TypedBenchmarkParam : public BenchmarkParam {
- public:
- TypedBenchmarkParam(const T& value)
- : BenchmarkParam(GetValueType<T>()), value_(value) {}
- void Set(const T& value) { value_ = value; }
-
- T Get() { return value_; }
-
- private:
- T value_;
-};
-
-class BenchmarkParams {
- public:
- void AddParam(const std::string& name,
- std::unique_ptr<BenchmarkParam> value) {
- params_[name] = std::move(value);
- }
-
- bool HasParam(const std::string& name) const {
- return params_.find(name) != params_.end();
- }
-
- template <typename T>
- void Set(const std::string& name, const T& value) {
- AssertParamExists(name);
- params_.at(name)->AsTyped<T>()->Set(value);
- }
-
- template <typename T>
- T Get(const std::string& name) const {
- AssertParamExists(name);
- return params_.at(name)->AsTyped<T>()->Get();
- }
-
- private:
- void AssertParamExists(const std::string& name) const;
- std::unordered_map<std::string, std::unique_ptr<BenchmarkParam>> params_;
-};
-
-} // namespace benchmark
-} // namespace nnfw
-#endif // __TFLITE_BENCHMARK_MODEL_BENCHMARK_PARAMS_H__
diff --git a/tools/tflite_benchmark_model/benchmark_tflite_model.cc b/tools/tflite_benchmark_model/benchmark_tflite_model.cc
deleted file mode 100644
index d277795a3..000000000
--- a/tools/tflite_benchmark_model/benchmark_tflite_model.cc
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "benchmark_tflite_model.h"
-
-#include <cstdarg>
-#include <cstdlib>
-#include <iostream>
-#include <memory>
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "support/tflite/kernels/register.h"
-#include "tensorflow/contrib/lite/model.h"
-#include "tensorflow/contrib/lite/op_resolver.h"
-#include "tensorflow/contrib/lite/string_util.h"
-#include "logging.h"
-#include "util/profiling/profiling.h"
-#include "support/tflite/nnapi_delegate.h"
-
-#ifdef TFLITE_CUSTOM_OPS_HEADER
-void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
-#endif
-
-namespace nnfw {
-namespace benchmark {
-
-void ProfilingListener::SetInterpreter(tflite::Interpreter* interpreter) {
- TFLITE_BENCHMARK_CHECK(interpreter);
- interpreter_ = interpreter;
- interpreter_->SetProfiler(&profiler_);
-}
-
-void ProfilingListener::OnSingleRunStart(RunType run_type) {
- if (run_type == REGULAR) {
- profiler_.Reset();
- profiler_.StartProfiling();
- }
-}
-
-void ProfilingListener::OnBenchmarkEnd(const BenchmarkResults& results) {
- if (has_profiles_) {
- TFLITE_LOG(INFO) << summarizer_.GetOutputString();
- }
-}
-
-void ProfilingListener::OnSingleRunEnd() {
- profiler_.StopProfiling();
- auto profile_events = profiler_.GetProfileEvents();
- has_profiles_ = !profile_events.empty();
- summarizer_.ProcessProfiles(profile_events, *interpreter_);
-}
-
-namespace {
-
-std::vector<std::string> Split(const std::string& str, const char delim) {
- std::istringstream input(str);
- std::vector<std::string> results;
- std::string item;
- while (std::getline(input, item, delim)) {
- results.push_back(item);
- }
- return results;
-}
-
-template <typename T>
-bool SplitAndParse(const std::string& str, char delim, std::vector<T>* values) {
- std::istringstream input(str);
- bool first = true;
- while (!input.eof()) {
- if (!first) {
- char c;
- input >> c;
- if (c != delim) {
- return false;
- }
- } else {
- first = false;
- }
- T val;
- input >> val;
- if (!input.eof() && !input.good()) {
- return false;
- }
- values->push_back(val);
- }
- return true;
-}
-
-template <typename T>
-void FillRandomValue(T* ptr, const std::vector<int>& sizes,
- const std::function<T()>& random_func) {
- int num_elements = 1;
- for (int dim : sizes) {
- num_elements *= dim;
- }
- for (int i = 0; i < num_elements; ++i) {
- *ptr++ = random_func();
- }
-}
-
-void FillRandomString(tflite::DynamicBuffer* buffer,
- const std::vector<int>& sizes,
- const std::function<std::string()>& random_func) {
- int num_elements = 1;
- for (int dim : sizes) {
- num_elements *= dim;
- }
- for (int i = 0; i < num_elements; ++i) {
- auto str = random_func();
- buffer->AddString(str.data(), str.length());
- }
-}
-
-bool PopulateInputLayerInfo(
- const std::string& names_string, const std::string& shapes_string,
- std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
- std::vector<std::string> names = Split(names_string, ',');
- std::vector<std::string> shapes = Split(shapes_string, ':');
-
- if (names.size() != shapes.size()) {
- TFLITE_LOG(ERROR) << "The number of items in"
- << " --input_layer_shape (" << shapes_string << ", with "
- << shapes.size() << " items)"
- << " must match the number of items in"
- << " --input_layer (" << names_string << ", with "
- << names.size() << " items)."
- << " For example --input_layer=input1,input2"
- << " --input_layer_shape=1,224,224,4:1,20";
- return false;
- }
-
- for (int i = 0; i < names.size(); ++i) {
- info->push_back(BenchmarkTfLiteModel::InputLayerInfo());
- BenchmarkTfLiteModel::InputLayerInfo& input = info->back();
-
- input.name = names[i];
-
- TFLITE_BENCHMARK_CHECK(SplitAndParse(shapes[i], ',', &input.shape))
- << "Incorrect size string specified: " << shapes[i];
- for (int dim : input.shape) {
- if (dim == -1) {
- TFLITE_LOG(ERROR)
- << "Any unknown sizes in the shapes (-1's) must be replaced"
- << " with the size you want to benchmark with.";
- return false;
- }
- }
- }
-
- return true;
-}
-
-BenchmarkParams GetDefaultParams() {
- BenchmarkParams default_params = BenchmarkModel::DefaultParams();
- default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
- default_params.AddParam("input_layer",
- BenchmarkParam::Create<std::string>(""));
- default_params.AddParam("input_layer_shape",
- BenchmarkParam::Create<std::string>(""));
- default_params.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
- return default_params;
-}
-
-} // namespace
-
-BenchmarkTfLiteModel::BenchmarkTfLiteModel()
- : BenchmarkModel(GetDefaultParams()) {
- AddListener(&profiling_listener_);
-}
-
-BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
- : BenchmarkModel(std::move(params)) {
- AddListener(&profiling_listener_);
-}
-
-std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
- std::vector<Flag> flags = BenchmarkTfLiteModel::BenchmarkModel::GetFlags();
- std::vector<Flag> specific_flags = {
- CreateFlag<std::string>("graph", &params_, "graph file name"),
- CreateFlag<std::string>("input_layer", &params_, "input layer names"),
- CreateFlag<std::string>("input_layer_shape", &params_,
- "input layer shape"),
- CreateFlag<bool>("use_nnapi", &params_, "use nnapi api")};
-
- flags.insert(flags.end(), specific_flags.begin(), specific_flags.end());
- return flags;
-}
-
-void BenchmarkTfLiteModel::LogFlags() {
- BenchmarkModel::LogFlags();
- TFLITE_LOG(INFO) << "Graph: [" << params_.Get<std::string>("graph") << "]";
- TFLITE_LOG(INFO) << "Input layers: ["
- << params_.Get<std::string>("input_layer") << "]";
- TFLITE_LOG(INFO) << "Input shapes: ["
- << params_.Get<std::string>("input_layer_shape") << "]";
- TFLITE_LOG(INFO) << "Use nnapi : [" << params_.Get<bool>("use_nnapi") << "]";
-}
-
-bool BenchmarkTfLiteModel::ValidateFlags() {
- if (params_.Get<std::string>("graph").empty()) {
- TFLITE_LOG(ERROR)
- << "Please specify the name of your TF Lite input file with --graph";
- return false;
- }
- return PopulateInputLayerInfo(params_.Get<std::string>("input_layer"),
- params_.Get<std::string>("input_layer_shape"),
- &inputs);
-}
-
-uint64_t BenchmarkTfLiteModel::ComputeInputBytes() {
- TFLITE_BENCHMARK_CHECK(interpreter);
- uint64_t total_input_bytes = 0;
- for (int input : interpreter->inputs()) {
- auto* t = interpreter->tensor(input);
- total_input_bytes += t->bytes;
- }
- return total_input_bytes;
-}
-
-void BenchmarkTfLiteModel::Init() {
- std::string graph = params_.Get<std::string>("graph");
- model = tflite::FlatBufferModel::BuildFromFile(graph.c_str());
- if (!model) {
- TFLITE_LOG(FATAL) << "Failed to mmap model " << graph;
- }
- TFLITE_LOG(INFO) << "Loaded model " << graph;
- model->error_reporter();
- TFLITE_LOG(INFO) << "resolved reporter";
-
-#ifdef TFLITE_CUSTOM_OPS_HEADER
- tflite::MutableOpResolver resolver;
- RegisterSelectedOps(&resolver);
-#else
- tflite::ops::builtin::BuiltinOpResolver resolver;
-#endif
-
- tflite::InterpreterBuilder(*model, resolver)(&interpreter);
- if (!interpreter) {
- TFLITE_LOG(FATAL) << "Failed to construct interpreter";
- }
- profiling_listener_.SetInterpreter(interpreter.get());
- profiling::Context::get().setProfiler(interpreter->GetProfiler());
-
- const int32_t num_threads = params_.Get<int32_t>("num_threads");
-
- if (num_threads != -1) {
- interpreter->SetNumThreads(num_threads);
- }
-
- bool use_nnapi = params_.Get<bool>("use_nnapi");
-
- interpreter->UseNNAPI(use_nnapi);
- auto interpreter_inputs = interpreter->inputs();
-
- if (!inputs.empty()) {
- TFLITE_BENCHMARK_CHECK_EQ(inputs.size(), interpreter_inputs.size())
- << "Inputs mismatch: Model inputs #:" << interpreter_inputs.size()
- << " expected: " << inputs.size();
- }
-
- // TFLITE_BENCHMARK_CHECK that all names and types match
- for (int j = 0; j < inputs.size(); ++j) {
- const InputLayerInfo& input = inputs[j];
- int i = interpreter_inputs[j];
- TfLiteTensor* t = interpreter->tensor(i);
- TFLITE_BENCHMARK_CHECK_EQ(t->name, input.name)
- << "Tensor # " << i << " is named " << t->name << " but flags call it "
- << input.name;
- }
-
- // Resize all non-string tensors.
- for (int j = 0; j < inputs.size(); ++j) {
- const InputLayerInfo& input = inputs[j];
- int i = interpreter_inputs[j];
- TfLiteTensor* t = interpreter->tensor(i);
- if (t->type != kTfLiteString) {
- interpreter->ResizeInputTensor(i, input.shape);
- }
- }
-
- if (interpreter->AllocateTensors() != kTfLiteOk) {
- TFLITE_LOG(FATAL) << "Failed to allocate tensors!";
- }
-
- // Set the values of the input tensors.
- for (int j = 0; j < inputs.size(); ++j) {
- const InputLayerInfo& input = inputs[j];
- int i = interpreter_inputs[j];
- TfLiteTensor* t = interpreter->tensor(i);
- std::vector<int> sizes = input.shape;
-
- // TODO(ahentz): below we ignore the O-th dimension (number of batches).
- if (t->type == kTfLiteFloat32) {
- FillRandomValue<float>(
- interpreter->typed_tensor<float>(i),
- std::vector<int>(sizes.begin() + 1, sizes.end()),
- []() { return static_cast<float>(rand()) / RAND_MAX - 0.5f; });
- } else if (t->type == kTfLiteUInt8) {
- FillRandomValue<uint8_t>(
- interpreter->typed_tensor<uint8_t>(i),
- std::vector<int>(sizes.begin() + 1, sizes.end()),
- []() { return static_cast<uint8_t>(rand()) % 255; });
- } else if (t->type == kTfLiteString) {
- tflite::DynamicBuffer buffer;
- FillRandomString(&buffer, sizes, []() {
- return "we're have some friends over saturday to hang out in the yard";
- });
- buffer.WriteToTensor(interpreter->tensor(i));
- } else {
- TFLITE_LOG(FATAL) << "Don't know how to populate tensor " << t->name
- << " of type " << t->type;
- }
- }
-}
-
-void BenchmarkTfLiteModel::RunImpl() {
- bool use_nnapi = params_.Get<bool>("use_nnapi");
- if (use_nnapi) {
- if (nnfw::NNAPIDelegate().Invoke(interpreter.get()) != kTfLiteOk) {
- TFLITE_LOG(FATAL) << "Failed to invoke!";
- }
- } else {
- if (interpreter->Invoke() != kTfLiteOk) {
- TFLITE_LOG(FATAL) << "Failed to invoke!";
- }
- }
-}
-
-} // namespace benchmark
-} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/benchmark_tflite_model.h b/tools/tflite_benchmark_model/benchmark_tflite_model.h
deleted file mode 100644
index 7892de1f7..000000000
--- a/tools/tflite_benchmark_model/benchmark_tflite_model.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef __TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
-#define __TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "tensorflow/contrib/lite/model.h"
-#include "tensorflow/contrib/lite/profiling/profile_summarizer.h"
-#include "benchmark_model.h"
-
-namespace nnfw {
-namespace benchmark {
-
-// Dumps profiling events if profiling is enabled
-class ProfilingListener : public BenchmarkListener {
- public:
- explicit ProfilingListener() : interpreter_(nullptr), has_profiles_(false) {}
-
- void SetInterpreter(tflite::Interpreter* interpreter);
-
- void OnSingleRunStart(RunType run_type) override;
-
- void OnSingleRunEnd() override;
-
- void OnBenchmarkEnd(const BenchmarkResults& results) override;
-
- private:
- tflite::Interpreter* interpreter_;
- tflite::profiling::Profiler profiler_;
- tflite::profiling::ProfileSummarizer summarizer_;
- bool has_profiles_;
-};
-
-// Benchmarks a TFLite model by running tflite interpreter.
-class BenchmarkTfLiteModel : public BenchmarkModel {
- public:
- BenchmarkTfLiteModel();
- BenchmarkTfLiteModel(BenchmarkParams params);
-
- std::vector<Flag> GetFlags() override;
- void LogFlags() override;
- bool ValidateFlags() override;
- uint64_t ComputeInputBytes() override;
- void Init() override;
- void RunImpl() override;
- virtual ~BenchmarkTfLiteModel() {}
-
- struct InputLayerInfo {
- std::string name;
- std::vector<int> shape;
- };
-
- private:
- std::unique_ptr<tflite::FlatBufferModel> model;
- std::unique_ptr<tflite::Interpreter> interpreter;
- std::vector<InputLayerInfo> inputs;
- ProfilingListener profiling_listener_;
-};
-
-} // namespace benchmark
-} // namespace nnfw
-
-#endif //__TFLITE_BENCHMARK_MODEL_BENCHMARK_TFLITE_MODEL_H__
diff --git a/tools/tflite_benchmark_model/command_line_flags.cc b/tools/tflite_benchmark_model/command_line_flags.cc
deleted file mode 100644
index eacca9f73..000000000
--- a/tools/tflite_benchmark_model/command_line_flags.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "command_line_flags.h"
-
-#include <cstring>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace nnfw {
-namespace {
-
-template <typename T>
-std::string ToString(T val) {
- std::ostringstream stream;
- stream << val;
- return stream.str();
-}
-
-bool ParseFlag(const std::string& arg, const std::string& flag,
- const std::function<bool(const std::string&)>& parse_func,
- bool* value_parsing_ok) {
- *value_parsing_ok = true;
- std::string flag_prefix = "--" + flag + "=";
- if (arg.find(flag_prefix) != 0) {
- return false;
- }
- bool has_value = arg.size() >= flag_prefix.size();
- *value_parsing_ok = has_value;
- if (has_value) {
- *value_parsing_ok = parse_func(arg.substr(flag_prefix.size()));
- }
- return true;
-}
-
-template <typename T>
-bool ParseFlag(const std::string& flag_value,
- const std::function<void(const T&)>& hook) {
- std::istringstream stream(flag_value);
- T read_value;
- stream >> read_value;
- if (!stream.eof() && !stream.good()) {
- return false;
- }
- hook(read_value);
- return true;
-}
-
-bool ParseBoolFlag(const std::string& flag_value,
- const std::function<void(const bool&)>& hook) {
- if (flag_value != "true" && flag_value != "false") {
- return false;
- }
-
- hook(flag_value == "true");
- return true;
-}
-} // namespace
-
-Flag::Flag(const char* name, const std::function<void(const int32_t&)>& hook,
- int32_t default_value, const std::string& usage_text)
- : name_(name),
- type_(TYPE_INT32),
- value_hook_([hook](const std::string& flag_value) {
- return ParseFlag<int32_t>(flag_value, hook);
- }),
- default_for_display_(ToString(default_value)),
- usage_text_(usage_text) {}
-
-Flag::Flag(const char* name, const std::function<void(const int64_t&)>& hook,
- int64_t default_value, const std::string& usage_text)
- : name_(name),
- type_(TYPE_INT64),
- value_hook_([hook](const std::string& flag_value) {
- return ParseFlag<int64_t>(flag_value, hook);
- }),
- default_for_display_(ToString(default_value)),
- usage_text_(usage_text) {}
-
-Flag::Flag(const char* name, const std::function<void(const float&)>& hook,
- float default_value, const std::string& usage_text)
- : name_(name),
- type_(TYPE_FLOAT),
- value_hook_([hook](const std::string& flag_value) {
- return ParseFlag<float>(flag_value, hook);
- }),
- default_for_display_(ToString(default_value)),
- usage_text_(usage_text) {}
-
-Flag::Flag(const char* name, const std::function<void(const bool&)>& hook,
- bool default_value, const std::string& usage_text)
- : name_(name),
- type_(TYPE_BOOL),
- value_hook_([hook](const std::string& flag_value) {
- return ParseBoolFlag(flag_value, hook);
- }),
- default_for_display_(default_value ? "true" : "false"),
- usage_text_(usage_text) {}
-
-Flag::Flag(const char* name,
- const std::function<void(const std::string&)>& hook,
- const std::string& default_value, const std::string& usage_text)
- : name_(name),
- type_(TYPE_STRING),
- value_hook_([hook](const std::string& flag_value) {
- hook(flag_value);
- return true;
- }),
- default_for_display_(default_value),
- usage_text_(usage_text) {}
-
-bool Flag::Parse(const std::string& arg, bool* value_parsing_ok) const {
- return ParseFlag(arg, name_, value_hook_, value_parsing_ok);
-}
-
-std::string Flag::GetTypeName() const {
- switch (type_) {
- case TYPE_INT32:
- return "int32";
- case TYPE_INT64:
- return "int64";
- case TYPE_FLOAT:
- return "float";
- case TYPE_BOOL:
- return "bool";
- case TYPE_STRING:
- return "string";
- }
-
- return "unknown";
-}
-
-/*static*/ bool Flags::Parse(int* argc, const char** argv,
- const std::vector<Flag>& flag_list) {
- bool result = true;
- std::vector<const char*> unknown_flags;
- for (int i = 1; i < *argc; ++i) {
- if (std::string(argv[i]) == "--") {
- while (i < *argc) {
- unknown_flags.push_back(argv[i]);
- ++i;
- }
- break;
- }
-
- bool was_found = false;
- for (const Flag& flag : flag_list) {
- bool value_parsing_ok;
- was_found = flag.Parse(argv[i], &value_parsing_ok);
- if (!value_parsing_ok) {
- result = false;
- }
- if (was_found) {
- break;
- }
- }
- if (!was_found) {
- unknown_flags.push_back(argv[i]);
- }
- }
- int dst = 1; // Skip argv[0]
- for (auto f : unknown_flags) {
- argv[dst++] = f;
- }
- argv[dst++] = nullptr;
- *argc = unknown_flags.size() + 1;
- return result && (*argc < 2 || std::strcmp(argv[1], "--help") != 0);
-}
-
-/*static*/ std::string Flags::Usage(const std::string& cmdline,
- const std::vector<Flag>& flag_list) {
- std::ostringstream usage_text;
- usage_text << "usage: " << cmdline << "\n";
- if (!flag_list.empty()) {
- usage_text << "Flags:\n";
- }
-
- for (const Flag& flag : flag_list) {
- auto type_name = flag.GetTypeName();
- usage_text << "\t";
- usage_text << "--" << flag.name_ << "=" << flag.default_for_display_;
- usage_text << "\t" << type_name << "\t" << flag.usage_text_ << "\n";
- }
- return usage_text.str();
-}
-
-} // namespace nnfw
diff --git a/tools/tflite_benchmark_model/command_line_flags.h b/tools/tflite_benchmark_model/command_line_flags.h
deleted file mode 100644
index 766417d87..000000000
--- a/tools/tflite_benchmark_model/command_line_flags.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
-#define __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
-
-#include <functional>
-#include <string>
-#include <vector>
-
-namespace nnfw {
-// A simple command-line argument parsing module.
-// Dependency free simplified port of core/util/command_line_flags.
-// This class is written for benchmarks and uses inefficient string
-// concatenation. This was written to avoid dependency on tensorflow/core/util
-// which transitively brings in a lot of other dependencies that are not
-// necessary for tflite benchmarking code.
-// The recommended way of using it is with local variables and an initializer
-// list of Flag objects, for example:
-//
-// int some_int = 10;
-// bool some_switch = false;
-// std::string some_name = "something";
-//
-// std::vector<tensorFlow::Flag> flag_list = {
-// Flag::CreateFlag("some_int", &some_int, "an integer that affects X"),
-// Flag::CreateFlag("some_switch", &some_switch, "a bool that affects Y"),
-// Flag::CreateFlag("some_name", &some_name, "a string that affects Z")
-// };
-// // Get usage message before ParseFlags() to capture default values.
-// std::string usage = Flag::Usage(argv[0], flag_list);
-// bool parsed_values_ok = Flags::Parse(&argc, argv, flag_list);
-//
-// tensorflow::port::InitMain(usage.c_str(), &argc, &argv);
-// if (argc != 1 || !parsed_values_ok) {
-// ...output usage and error message...
-// }
-//
-// The argc and argv values are adjusted by the Parse function so all that
-// remains is the program name (at argv[0]) and any unknown arguments fill the
-// rest of the array. This means you can check for flags that weren't understood
-// by seeing if argv is greater than 1.
-// The result indicates if there were any errors parsing the values that were
-// passed to the command-line switches. For example, --some_int=foo would return
-// false because the argument is expected to be an integer.
-//
-// NOTE: Unlike gflags-style libraries, this library is intended to be
-// used in the `main()` function of your binary. It does not handle
-// flag definitions that are scattered around the source code.
-
-// A description of a single command line flag, holding its name, type, usage
-// text, and a pointer to the corresponding variable.
-class Flag {
- public:
- template <typename T>
- static Flag CreateFlag(const char* name, T* val, const char* usage) {
- return Flag(name, [val](const T& v) { *val = v; }, *val, usage);
- }
-
- Flag(const char* name, const std::function<void(const int32_t&)>& hook,
- int32_t default_value, const std::string& usage_text);
- Flag(const char* name, const std::function<void(const int64_t&)>& hook,
- int64_t default_value, const std::string& usage_text);
- Flag(const char* name, const std::function<void(const float&)>& hook,
- float default_value, const std::string& usage_text);
- Flag(const char* name, const std::function<void(const bool&)>& hook,
- bool default_value, const std::string& usage_text);
- Flag(const char* name, const std::function<void(const std::string&)>& hook,
- const std::string& default_value, const std::string& usage_text);
-
- private:
- friend class Flags;
-
- bool Parse(const std::string& arg, bool* value_parsing_ok) const;
-
- std::string name_;
- enum {
- TYPE_INT32,
- TYPE_INT64,
- TYPE_BOOL,
- TYPE_STRING,
- TYPE_FLOAT,
- } type_;
-
- std::string GetTypeName() const;
-
- std::function<bool(const std::string&)> value_hook_;
- std::string default_for_display_;
-
- std::string usage_text_;
-};
-
-class Flags {
- public:
- // Parse the command line represented by argv[0, ..., (*argc)-1] to find flag
- // instances matching flags in flaglist[]. Update the variables associated
- // with matching flags, and remove the matching arguments from (*argc, argv).
- // Return true iff all recognized flag values were parsed correctly, and the
- // first remaining argument is not "--help".
- static bool Parse(int* argc, const char** argv,
- const std::vector<Flag>& flag_list);
-
- // Return a usage message with command line cmdline, and the
- // usage_text strings in flag_list[].
- static std::string Usage(const std::string& cmdline,
- const std::vector<Flag>& flag_list);
-};
-
-} // namespace nnfw
-
-#endif // __TFLITE_BENCHMARK_MODEL_COMMAND_LINE_FLAGS_H__
-
-
diff --git a/tools/tflite_benchmark_model/logging.h b/tools/tflite_benchmark_model/logging.h
deleted file mode 100644
index e694a0926..000000000
--- a/tools/tflite_benchmark_model/logging.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef __TFLITE_BENCHMARK_MODEL_LOGGING_H_
-#define __TFLITE_BENCHMARK_MODEL_LOGGING_H_
-
-// LOG and CHECK macros for benchmarks.
-
-#include <cstdlib>
-#include <iostream>
-#include <sstream>
-
-namespace nnfw {
-namespace logging {
-// A wrapper that logs to stderr.
-//
-// Used for TFLITE_LOG and TFLITE_BENCHMARK_CHECK macros.
-class LoggingWrapper {
- public:
- enum class LogSeverity : int {
- INFO = 0,
- WARN = 1,
- ERROR = 2,
- FATAL = 3,
- };
- LoggingWrapper(LogSeverity severity)
- : severity_(severity), should_log_(true) {}
- LoggingWrapper(LogSeverity severity, bool log)
- : severity_(severity), should_log_(log) {}
- std::stringstream& Stream() { return stream_; }
- ~LoggingWrapper() {
- if (should_log_) {
- std::cerr << stream_.str() << std::endl;
- if (severity_ == LogSeverity::FATAL) {
- std::flush(std::cerr);
- std::abort();
- }
- }
- }
-
- private:
- std::stringstream stream_;
- LogSeverity severity_;
- bool should_log_;
-};
-
-} // namespace logging
-
-} // namespace nnfw
-
-#define TFLITE_LOG(severity) \
- nnfw::logging::LoggingWrapper( \
- nnfw::logging::LoggingWrapper::LogSeverity::severity) \
- .Stream()
-
-#define TFLITE_BENCHMARK_CHECK(condition) \
- nnfw::logging::LoggingWrapper( \
- nnfw::logging::LoggingWrapper::LogSeverity::FATAL, \
- (condition) ? false : true) \
- .Stream()
-
-#define TFLITE_BENCHMARK_CHECK_EQ(a, b) TFLITE_BENCHMARK_CHECK(a == b)
-
-#endif // __TFLITE_BENCHMARK_MODEL_BENCHMARK_LOGGING_H_
diff --git a/tools/tflite_benchmark_model/profile_summarizer.cc b/tools/tflite_benchmark_model/profile_summarizer.cc
deleted file mode 100644
index 4d12b50af..000000000
--- a/tools/tflite_benchmark_model/profile_summarizer.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/contrib/lite/profiling/profile_summarizer.h"
-
-#include <sstream>
-
-#include "tensorflow/contrib/lite/schema/schema_generated.h"
-
-namespace tflite {
-namespace profiling {
-namespace {
-
-using Detail = tensorflow::StatsCalculator::Detail;
-
-struct OperatorDetails {
- std::string name;
- std::vector<std::string> inputs;
- std::vector<std::string> outputs;
-};
-
-std::string GetTensorName(const tflite::Interpreter& interpreter,
- int tensor_index) {
- const auto tensor = interpreter.tensor(tensor_index);
- if (tensor == nullptr || tensor->name == nullptr) {
- return "Unknown";
- }
- return tensor->name;
-}
-std::vector<std::string> GetTensorNames(const tflite::Interpreter& interpreter,
- const TfLiteIntArray* tensor_indices) {
- std::vector<std::string> tensors;
- tensors.reserve(tensor_indices->size);
- for (int i = 0; i < tensor_indices->size; i++) {
- tensors.push_back(GetTensorName(interpreter, tensor_indices->data[i]));
- }
- return tensors;
-}
-
-std::string ToString(const std::vector<std::string>& str_vector) {
- std::stringstream stream;
- stream << "[";
- bool first = true;
- for (const auto& s : str_vector) {
- if (!first) {
- stream << ", ";
- } else {
- first = false;
- }
- stream << s;
- }
- stream << "]";
- return stream.str();
-}
-
-OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
- int node_index) {
- auto node_reg = interpreter.node_and_registration(node_index);
- auto inputs = node_reg->first.inputs;
- auto outputs = node_reg->first.outputs;
- int code = node_reg->second.builtin_code;
- const char* op_name = nullptr;
- if (code == tflite::BuiltinOperator_CUSTOM) {
- const char* custom_name = node_reg->second.custom_name;
- op_name = custom_name ? custom_name : "UnknownCustomOp";
- } else {
- op_name = tflite::EnumNamesBuiltinOperator()[code];
- }
- OperatorDetails details;
- details.name = op_name;
- details.inputs = GetTensorNames(interpreter, inputs);
- details.outputs = GetTensorNames(interpreter, outputs);
- return details;
-}
-
-} // namespace
-
-ProfileSummarizer::ProfileSummarizer()
- : stats_calculator_(new ::tensorflow::StatsCalculator(
- tensorflow::StatSummarizerOptions())) {}
-
-void ProfileSummarizer::ProcessProfiles(
- const std::vector<const ProfileEvent*>& profile_stats,
- const tflite::Interpreter& interpreter) {
- std::vector<const ProfileEvent*> events;
- std::copy_if(profile_stats.begin(), profile_stats.end(),
- std::back_inserter(events), [](const ProfileEvent* e) {
- return e->event_type ==
- ProfileEvent::EventType::OPERATOR_INVOKE_EVENT &&
- e->end_timestamp_us >= e->begin_timestamp_us;
- });
- // Sort with begin_time.
- std::sort(events.begin(), events.end(),
- [](const ProfileEvent* const& a, const ProfileEvent* const& b) {
- return a->begin_timestamp_us < b->begin_timestamp_us;
- });
- if (events.empty()) {
- return;
- }
-
- int64_t base_start_us = events[0]->begin_timestamp_us;
- int node_num = 0;
- int64_t curr_total_us = 0;
- std::map<std::string, Detail> details;
- int prev_op_idx = -1;
- int seq_no = 1;
- for (auto event : events) {
- auto op_details = GetOperatorDetails(interpreter, event->event_metadata);
- bool is_continued = (prev_op_idx == event->event_metadata);
- seq_no = is_continued ? seq_no + 1 : 1;
- auto node_name = ToString(op_details.outputs) + "#" + std::to_string(seq_no);
- auto result = details.emplace(node_name, Detail());
- Detail* detail = &(result.first->second);
- detail->start_us.UpdateStat(event->begin_timestamp_us - base_start_us);
- int64_t node_exec_time =
- event->end_timestamp_us - event->begin_timestamp_us;
- detail->rel_end_us.UpdateStat(node_exec_time);
- curr_total_us += node_exec_time;
- ++node_num;
-
- if (result.second) {
- detail->name = node_name;
- detail->type = op_details.name;
- detail->run_order = node_num;
- detail->times_called = 0;
- }
- if (!is_continued) {
- ++detail->times_called;
- }
- prev_op_idx = event->event_metadata;
- }
- stats_calculator_->UpdateDetails(details);
- stats_calculator_->UpdateRunTotalUs(curr_total_us);
-}
-} // namespace profiling
-} // namespace tflite
diff --git a/tools/tflite_benchmark_model/profile_summarizer.h b/tools/tflite_benchmark_model/profile_summarizer.h
deleted file mode 100644
index a529ff874..000000000
--- a/tools/tflite_benchmark_model/profile_summarizer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_
-#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_
-
-#include <vector>
-
-#include "tensorflow/contrib/lite/interpreter.h"
-#include "tensorflow/contrib/lite/profiling/profiler.h"
-#include "tensorflow/core/util/stats_calculator.h"
-
-namespace tflite {
-namespace profiling {
-
-// Creates a summary of operator invocations in the interpreter.
-class ProfileSummarizer {
- public:
- ProfileSummarizer();
- virtual ~ProfileSummarizer() {}
-
- // Process profile events to update statistics for operator invocations.
- void ProcessProfiles(const std::vector<const ProfileEvent*>& profile_stats,
- const tflite::Interpreter& interpreter);
-
- // Returns a string detailing the accumulated runtime stats in a tab-separated
- // format which can be pasted into a spreadsheet for further analysis.
- std::string GetOutputString() const {
- return stats_calculator_->GetOutputString();
- }
-
- std::string GetShortSummary() const {
- return stats_calculator_->GetShortSummary();
- }
-
- private:
- std::unique_ptr<tensorflow::StatsCalculator> stats_calculator_;
-};
-
-} // namespace profiling
-} // namespace tflite
-
-#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_SUMMARIZER_H_