summaryrefslogtreecommitdiff
path: root/runtimes/neurun/core/src/exec/DataflowExecutor.h
blob: 935f9976de2af7da627aabce37bf02040ebee401 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/*
 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__
#define __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__

#include <list>
#include <map>
#include <unordered_map>

#include "FunctionSequence.h"
#include "Job.h"
#include "model/OperandIndexSequence.h"
#include "model/Index.h"
#include "model/Model.h"
#include "cpp14/memory.h"
#include "exec/ExecutorBase.h"

namespace neurun
{
namespace exec
{

class DataflowExecutor : public ExecutorBase
{
public:
  using CodeMap = std::unordered_map<model::SubgraphIndex, std::unique_ptr<FunctionSequence>>;

protected:
  virtual void notify(uint32_t finished_job_id);
  bool noWaitingJobs();

public:
  /**
   * @brief Constructs a DataflowExecutor object
   *
   * @param model Model object
   * @param operand_context (Only for input/output operand data access)
   * @param lower_info LowerInfo object (Only to know input/output operands layout)
   * @param code_map Compiled code map
   * @param ranks Operation ranks for ordering execution
   */
  DataflowExecutor(const std::shared_ptr<const model::Model> &model,
                   std::unique_ptr<model::Subgraphs> subgraphs,
                   const std::shared_ptr<compiler::OperandContext> &operand_context,
                   std::unique_ptr<graph::LowerInfoMap> lower_info,
                   std::unique_ptr<backend::TensorManagerSet> tensor_mgrs, CodeMap &&code_map);

  void executeImpl() override;

  void notifyJobEnd(uint32_t job_index);
  void notifyJobBegin(uint32_t job_index);

  void addObserver(std::unique_ptr<IExecutionObserver> ref)
  {
    _observers.emplace_back(std::move(ref));
  };
  void removeObserver(std::unique_ptr<IExecutionObserver> ref) { _observers.remove(ref); };

protected:
  int64_t calculateRank(const std::vector<model::Element> &operations);
  void emplaceToReadyJobs(const uint32_t &id);

protected:
  CodeMap _code_map;
  /**
   * @brief A vector of finished jobs for current execution
   *        After a run it has all the jobs of this execution for the next run
   */
  std::vector<std::unique_ptr<Job>> _finished_jobs;
  /**
   * @brief A vector of waiting jobs for current execution
   *        All the jobs are moved from #_finished_jobs to it when start a run
   */
  std::vector<std::unique_ptr<Job>> _waiting_jobs;
  /**
   * @brief Jobs' output info
   *        Used for notifying after finishing a job
   */
  std::vector<std::list<uint32_t>> _output_info;
  std::vector<uint32_t> _initial_input_info;
  std::vector<uint32_t> _input_info;
  /**
   * @brief A collection of jobs that are ready for execution
   *        Jobs in it are ready to be scheduled.
   *        Ordered by priority from `_indexed_ranks`
   */
  std::multimap<int64_t, std::unique_ptr<Job>, std::greater<int64_t>> _ready_jobs;

  /// @brief Which job runs which op and function.
  std::unordered_map<uint32_t, model::SubgraphIndex> _job_to_subgraph;
};

} // namespace exec
} // namespace neurun

#endif // __NEURUN_EXEC_DATAFLOW_EXECUTOR_H__