summaryrefslogtreecommitdiff
path: root/inference-engine/samples/validation_app/Processor.hpp
blob: 51033abc2177da80741c6828288645bd2ffed093 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <iostream>
#include <limits>
#include <string>
#include <memory>

#include <samples/common.hpp>

#include "inference_engine.hpp"

#include "csv_dumper.hpp"
#include "image_decoder.hpp"
#include "console_progress.hpp"

using namespace std;

#define OUTPUT_FLOATING(val) std::fixed << std::setprecision(2) << val

class Processor {
public:
    struct InferenceMetrics {
        int nRuns = 0;
        double minDuration = std::numeric_limits<double>::max();
        double maxDuration = 0;
        double totalTime = 0;

        virtual ~InferenceMetrics() { }  // Type has to be polymorphic
    };

protected:
    std::string modelFileName;
    std::string targetDevice;
    std::string imagesPath;
    int batch;
    InferenceEngine::InferRequest inferRequest;
    InferenceEngine::InputsDataMap inputInfo;
    InferenceEngine::OutputsDataMap outInfo;
    InferenceEngine::CNNNetReader networkReader;
    InferenceEngine::SizeVector inputDims;
    InferenceEngine::SizeVector outputDims;
    double loadDuration;
    PreprocessingOptions preprocessingOptions;

    CsvDumper& dumper;
    InferencePlugin plugin;

    std::string approach;

    double Infer(ConsoleProgress& progress, int filesWatched, InferenceMetrics& im);

public:
    Processor(const std::string& flags_m, const std::string& flags_d, const std::string& flags_i, int flags_b,
            InferenceEngine::InferencePlugin plugin, CsvDumper& dumper, const std::string& approach, PreprocessingOptions preprocessingOptions);

    virtual shared_ptr<InferenceMetrics> Process() = 0;
    virtual void Report(const InferenceMetrics& im) {
        double averageTime = im.totalTime / im.nRuns;

        slog::info << "Inference report:\n";
        slog::info << "\tNetwork load time: " << loadDuration << "ms" << "\n";
        slog::info << "\tModel: " << modelFileName << "\n";
        slog::info << "\tModel Precision: " << networkReader.getNetwork().getPrecision().name() << "\n";
        slog::info << "\tBatch size: " << batch << "\n";
        slog::info << "\tValidation dataset: " << imagesPath << "\n";
        slog::info << "\tValidation approach: " << approach;
        slog::info << slog::endl;

        if (im.nRuns > 0) {
            slog::info << "Average infer time (ms): " << averageTime << " (" << OUTPUT_FLOATING(1000.0 / (averageTime / batch))
                    << " images per second with batch size = " << batch << ")" << slog::endl;
        } else {
            slog::warn << "No images processed" << slog::endl;
        }
    }

    virtual ~Processor() {}
};