summaryrefslogtreecommitdiff
path: root/runtimes
diff options
context:
space:
mode:
authorДилшоджон Пошшоев/AI Tools Lab /SRR/Engineer/삼성전자 <d.poshshoev@samsung.com>2019-09-11 10:08:36 +0300
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>2019-09-11 16:08:36 +0900
commitbf47aa9a8997fcba172d6ff25ff0e71daabd4bca (patch)
tree34a3d55dbc5968b5d8018af2571ec1fbad4e10cd /runtimes
parent405f36a4b0f4bac9b5efaf87fc360f4ed225a25c (diff)
downloadnnfw-bf47aa9a8997fcba172d6ff25ff0e71daabd4bca.tar.gz
nnfw-bf47aa9a8997fcba172d6ff25ff0e71daabd4bca.tar.bz2
nnfw-bf47aa9a8997fcba172d6ff25ff0e71daabd4bca.zip
[neurun] Rename Scheduler to HEScheduler (#7340)
Since this is a scheduler for heterogeneous execution scheduling, it must be called appropriately Signed-off-by: Poshshoev Dilshodzhon <d.poshshoev@samsung.com>
Diffstat (limited to 'runtimes')
-rw-r--r--runtimes/neurun/core/src/compiler/Compiler.cc6
-rw-r--r--runtimes/neurun/core/src/compiler/HEScheduler.cc (renamed from runtimes/neurun/core/src/compiler/Scheduler.cc)82
-rw-r--r--runtimes/neurun/core/src/compiler/HEScheduler.h (renamed from runtimes/neurun/core/src/compiler/Scheduler.h)22
-rw-r--r--runtimes/neurun/test/core/compiler/Scheduler.cc16
4 files changed, 64 insertions, 62 deletions
diff --git a/runtimes/neurun/core/src/compiler/Compiler.cc b/runtimes/neurun/core/src/compiler/Compiler.cc
index 8b07dd1ea..6a378faa9 100644
--- a/runtimes/neurun/core/src/compiler/Compiler.cc
+++ b/runtimes/neurun/core/src/compiler/Compiler.cc
@@ -22,7 +22,7 @@
#include "compiler/IScheduler.h"
#include "compiler/ManualScheduler.h"
-#include "compiler/Scheduler.h"
+#include "compiler/HEScheduler.h"
#include "backend/ExecTime.h"
#include "graph/operation/LowerInfo.h"
#include "dumper/dot/DotDumper.h"
@@ -56,8 +56,8 @@ void Compiler::compile(void)
if (util::getConfigBool(util::config::USE_SCHEDULER))
{
auto scheduler =
- compiler::Scheduler(_graph->operands(), backend::BackendManager::instance().getAll(),
- _graph->getKernelRegistry());
+ compiler::HEScheduler(_graph->operands(), backend::BackendManager::instance().getAll(),
+ _graph->getKernelRegistry());
br = scheduler.schedule(*_graph);
indexed_ranks = scheduler.getIndexedRanks();
}
diff --git a/runtimes/neurun/core/src/compiler/Scheduler.cc b/runtimes/neurun/core/src/compiler/HEScheduler.cc
index 96c1d4207..a3d1a5990 100644
--- a/runtimes/neurun/core/src/compiler/Scheduler.cc
+++ b/runtimes/neurun/core/src/compiler/HEScheduler.cc
@@ -15,7 +15,7 @@
*/
#include "model/Operand.h"
-#include "compiler/Scheduler.h"
+#include "compiler/HEScheduler.h"
#include "graph/Graph.h"
#include "util/ConfigSource.h"
#include "compiler/IExecutionBuilder.h"
@@ -125,15 +125,15 @@ static bool isMergable(const graph::Graph &graph, const model::Operation &node)
return true;
}
-void Scheduler::scheduleShufflingBackends()
+void HEScheduler::scheduleShufflingBackends()
{
- VERBOSE(Scheduler::scheduleNode)
+ VERBOSE(HEScheduler::scheduleNode)
<< "Started task scheduling: uses all backends to get more metrics for data transfer"
<< std::endl;
size_t backend_ind = 0;
for (const auto &rank : _rank_to_op)
{
- VERBOSE(Scheduler::scheduleNode) << "scheduling (" << rank.second.value() << ")" << std::endl;
+ VERBOSE(HEScheduler::scheduleNode) << "scheduling (" << rank.second.value() << ")" << std::endl;
const auto &node = _graph->operations().at(rank.second);
const bool quant = isQuant(*_graph, node);
const auto size = getOperationsFlattenedIOSize(*_graph, node);
@@ -164,15 +164,15 @@ void Scheduler::scheduleShufflingBackends()
continue;
}
_backend_resolver->setBackend(rank.second, _all_backends[backend_ind]);
- VERBOSE(Scheduler::schedule) << "backend for " << node.getName() << " is "
- << _all_backends[backend_ind]->config()->id() << std::endl;
+ VERBOSE(HEScheduler::schedule) << "backend for " << node.getName() << " is "
+ << _all_backends[backend_ind]->config()->id() << std::endl;
++backend_ind;
break;
}
}
}
-bool Scheduler::isNodeProfiled(const model::Operation &node)
+bool HEScheduler::isNodeProfiled(const model::Operation &node)
{
const bool quant = isQuant(*_graph, node);
const auto size = getOperationsFlattenedIOSize(*_graph, node);
@@ -185,10 +185,10 @@ bool Scheduler::isNodeProfiled(const model::Operation &node)
return true;
}
-std::unique_ptr<compiler::BackendResolver> Scheduler::schedule(const graph::Graph &graph)
+std::unique_ptr<compiler::BackendResolver> HEScheduler::schedule(const graph::Graph &graph)
{
_graph = &graph;
- VERBOSE(Scheduler::schedule) << "task scheduling started" << std::endl;
+ VERBOSE(HEScheduler::schedule) << "task scheduling started" << std::endl;
// Make ranks and save in descending order
makeRank();
@@ -212,7 +212,7 @@ std::unique_ptr<compiler::BackendResolver> Scheduler::schedule(const graph::Grap
if (all_nodes_are_profiled)
{
scheduleShufflingBackends();
- VERBOSE(Scheduler::schedule) << "task scheduling finished" << std::endl;
+ VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
return std::move(_backend_resolver);
}
}
@@ -222,12 +222,12 @@ std::unique_ptr<compiler::BackendResolver> Scheduler::schedule(const graph::Grap
{
scheduleNode(rank.second);
}
- VERBOSE(Scheduler::schedule) << "task scheduling finished" << std::endl;
+ VERBOSE(HEScheduler::schedule) << "task scheduling finished" << std::endl;
return std::move(_backend_resolver);
}
-int64_t Scheduler::getOpTime(const backend::Backend *backend, const std::string &operation,
- bool quant, uint32_t size)
+int64_t HEScheduler::getOpTime(const backend::Backend *backend, const std::string &operation,
+ bool quant, uint32_t size)
{
const auto time = _exec_time->getOperationExecTime(backend, operation, quant, size);
if (time != _exec_time->NOT_FOUND)
@@ -236,8 +236,8 @@ int64_t Scheduler::getOpTime(const backend::Backend *backend, const std::string
return _is_supported.at(backend).at(operation) ? 1 : _exec_time->getMax();
}
-int64_t Scheduler::getPermuteTime(const backend::Backend *src_backend,
- const backend::Backend *dst_backend, bool quant, uint32_t size)
+int64_t HEScheduler::getPermuteTime(const backend::Backend *src_backend,
+ const backend::Backend *dst_backend, bool quant, uint32_t size)
{
const auto time = _exec_time->getPermuteTime(src_backend, dst_backend, quant, size);
if (time != _exec_time->NOT_FOUND)
@@ -247,7 +247,7 @@ int64_t Scheduler::getPermuteTime(const backend::Backend *src_backend,
return size / 200;
}
-int64_t Scheduler::tryBackend(const model::Operation &node, const backend::Backend *backend)
+int64_t HEScheduler::tryBackend(const model::Operation &node, const backend::Backend *backend)
{
auto iter = _is_supported.find(backend);
if (iter != _is_supported.end())
@@ -276,9 +276,9 @@ int64_t Scheduler::tryBackend(const model::Operation &node, const backend::Backe
return _is_supported[backend][node.getName()] ? 1 : _exec_time->getMax();
}
-void Scheduler::makeRank()
+void HEScheduler::makeRank()
{
- VERBOSE(Scheduler::makeRank) << "task prioritizing" << std::endl;
+ VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl;
_graph->operations().iterate(
[&](const model::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); });
@@ -288,10 +288,10 @@ void Scheduler::makeRank()
UNUSED_RELEASE(index);
assert(_op_to_rank->find(index) != _op_to_rank->end());
});
- VERBOSE(Scheduler::makeRank) << "task prioritizing finished" << std::endl;
+ VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl;
}
-int64_t Scheduler::DFSMaxRank(const model::OperationIndex &index)
+int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index)
{
auto op_to_rank_it = _op_to_rank->find(index);
if (op_to_rank_it != _op_to_rank->end())
@@ -347,13 +347,13 @@ int64_t Scheduler::DFSMaxRank(const model::OperationIndex &index)
assert(rank >= 0);
_rank_to_op.emplace(rank, index);
_op_to_rank->emplace(index, rank);
- VERBOSE(Scheduler::DFSMaxRank) << "rank of operation (" << index.value() << ")" << node.getName()
- << " is " << rank << std::endl;
+ VERBOSE(HEScheduler::DFSMaxRank) << "rank of operation (" << index.value() << ")"
+ << node.getName() << " is " << rank << std::endl;
return rank;
}
-int64_t Scheduler::DFSChildrenMaxRank(const model::OperationIndex &index)
+int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index)
{
const auto &node = _graph->operations().at(index);
int64_t max_child_rank = 0;
@@ -391,8 +391,8 @@ int64_t Scheduler::DFSChildrenMaxRank(const model::OperationIndex &index)
return max_child_rank;
}
-int64_t Scheduler::backendAvailableTime(const backend::Backend *backend,
- const int64_t &starting_time, const int64_t &time_amount)
+int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend,
+ const int64_t &starting_time, const int64_t &time_amount)
{
const auto backend_times = _backends_avail_time.at(backend);
// finishing and starting times of an op, that will come after current op
@@ -408,9 +408,9 @@ int64_t Scheduler::backendAvailableTime(const backend::Backend *backend,
return prev_op_ft;
}
-void Scheduler::scheduleNode(const model::OperationIndex &index)
+void HEScheduler::scheduleNode(const model::OperationIndex &index)
{
- VERBOSE(Scheduler::scheduleNode) << "scheduling (" << index.value() << ")" << std::endl;
+ VERBOSE(HEScheduler::scheduleNode) << "scheduling (" << index.value() << ")" << std::endl;
int64_t eft = std::numeric_limits<int64_t>::max(), selected_exec_time = 0;
const auto &node = _graph->operations().at(index);
@@ -446,14 +446,14 @@ void Scheduler::scheduleNode(const model::OperationIndex &index)
_backends_avail_time[chosen_backend].emplace(eft, eft - selected_exec_time);
_backend_resolver->setBackend(index, chosen_backend);
- VERBOSE(Scheduler::scheduleNode) << "backend for " << node.getName() << " is "
- << chosen_backend->config()->id() << ". Its eft: " << eft
- << std::endl;
+ VERBOSE(HEScheduler::scheduleNode) << "backend for " << node.getName() << " is "
+ << chosen_backend->config()->id() << ". Its eft: " << eft
+ << std::endl;
}
std::pair<int64_t, int64_t>
-Scheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time)
+HEScheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time)
{
const bool is_linear_exec = "Linear" == util::getConfigString(util::config::EXECUTOR);
const bool is_parallel_exec = "Parallel" == util::getConfigString(util::config::EXECUTOR);
@@ -528,25 +528,25 @@ Scheduler::ESTAndExecTime(const backend::Backend *backend, const model::Operatio
* data transfer.*/
if (!is_parallel_exec)
{
- VERBOSE(Scheduler::ESTAndExecTime)
+ VERBOSE(HEScheduler::ESTAndExecTime)
<< "exec_time of (" << index.value() << ") " << node.getName() << " quant==" << quant
<< " on " << backend->config()->id() << " is " << exec_time
<< " microseconds. Data transfer cost: " << total_transfer_cost << std::endl;
return {total_transfer_cost, exec_time};
}
- VERBOSE(Scheduler::ESTAndExecTime) << "exec_time of (" << index.value() << ") " << node.getName()
- << " quant==" << quant << " on " << backend->config()->id()
- << ": " << exec_time
- << " microseconds. Backend available time: " << prev_op_ft
- << " Parent's max eft: " << max_pred_eft - total_transfer_cost
- << " data transfer cost: " << total_transfer_cost << std::endl;
+ VERBOSE(HEScheduler::ESTAndExecTime)
+ << "exec_time of (" << index.value() << ") " << node.getName() << " quant==" << quant
+ << " on " << backend->config()->id() << ": " << exec_time
+ << " microseconds. Backend available time: " << prev_op_ft
+ << " Parent's max eft: " << max_pred_eft - total_transfer_cost
+ << " data transfer cost: " << total_transfer_cost << std::endl;
return {prev_op_ft, exec_time};
}
-int64_t Scheduler::predMaxEFT(const backend::Backend *backend, const model::Operation &node,
- std::multimap<int64_t, int64_t> &transfer_st_exec_time)
+int64_t HEScheduler::predMaxEFT(const backend::Backend *backend, const model::Operation &node,
+ std::multimap<int64_t, int64_t> &transfer_st_exec_time)
{
int64_t max_pred_eft = 0;
for (const auto &input_operand_idx : node.getInputs())
diff --git a/runtimes/neurun/core/src/compiler/Scheduler.h b/runtimes/neurun/core/src/compiler/HEScheduler.h
index 130c81d0e..2b818f248 100644
--- a/runtimes/neurun/core/src/compiler/Scheduler.h
+++ b/runtimes/neurun/core/src/compiler/HEScheduler.h
@@ -15,12 +15,13 @@
*/
/**
- * @file Scheduler.h
- * @brief This file contains Scheduler class to define and run task scheduler
+ * @file HEScheduler.h
+ * @brief This file contains HEScheduler class to define and run task Heterogeneous Execution
+ * Scheduler
*/
-#ifndef __NEURUN_COMPILER_SCHEDULER_H_
-#define __NEURUN_COMPILER_SCHEDULER_H_
+#ifndef __NEURUN_COMPILER_H_E_SCHEDULER_H_
+#define __NEURUN_COMPILER_H_E_SCHEDULER_H_
#include "compiler/IScheduler.h"
#include "graph/Graph.h"
@@ -39,16 +40,17 @@ namespace compiler
/**
* @brief Class to schedule tasks
*/
-class Scheduler : IScheduler
+class HEScheduler : IScheduler
{
public:
/**
- * @brief Construct a new Scheduler object
+ * @brief Construct a new Heterogeneous Execution Scheduler object
* @param[in] model Graph model
* @param[in] backend_resolver backend resolver
*/
- Scheduler(const neurun::model::Operands &operands, std::vector<const backend::Backend *> backends,
- const std::shared_ptr<backend::custom::KernelRegistry> &registry)
+ HEScheduler(const neurun::model::Operands &operands,
+ std::vector<const backend::Backend *> backends,
+ const std::shared_ptr<backend::custom::KernelRegistry> &registry)
: _is_supported{}, _backends_avail_time{}, _ops_eft{},
_op_to_rank{std::make_shared<model::OperationIndexMap<int64_t>>()},
_all_backends(std::move(backends))
@@ -62,7 +64,7 @@ public:
_all_backends.begin(), _all_backends.end(),
[](const backend::Backend *backend) { return backend->config()->id() == "cpu"; });
if (cpu_backend_it == _all_backends.end())
- throw std::runtime_error("Scheduler could be used only if 'cpu' backend is available");
+ throw std::runtime_error("HEScheduler could be used only if 'cpu' backend is available");
_cpu_backend = *cpu_backend_it;
}
@@ -159,4 +161,4 @@ private:
} // namespace neurun
-#endif // __NEURUN_COMPILER_SCHEDULER_H_
+#endif // __NEURUN_COMPILER_H_E_SCHEDULER_H_
diff --git a/runtimes/neurun/test/core/compiler/Scheduler.cc b/runtimes/neurun/test/core/compiler/Scheduler.cc
index 8b7af0719..8c5424a9d 100644
--- a/runtimes/neurun/test/core/compiler/Scheduler.cc
+++ b/runtimes/neurun/test/core/compiler/Scheduler.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include <compiler/Scheduler.h>
+#include <compiler/HEScheduler.h>
#include <backend/ExecTime.h>
#include <backend/IShapeFixer.h>
@@ -348,7 +348,7 @@ class SchedulerTestWithExecutorParam : public SchedulerTest,
};
//
-// Scheduler tests
+// HEScheduler tests
//
// Test scheduler behavior for straight graph with known execution time of all nodes and permutes.
@@ -376,7 +376,7 @@ TEST_P(SchedulerTestWithExecutorParam, straight_graph_known_exec_time)
et.uploadOperationsExecTime();
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "cpu");
ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "gpu");
@@ -390,7 +390,7 @@ TEST_P(SchedulerTestWithExecutorParam, straight_graph_known_exec_time)
setPermutationsExecutionTime(_mock_backends, OPERAND_SIZE, 1e5);
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "cpu");
ASSERT_EQ(br->getBackend(sub_op_idx)->config()->id(), "cpu");
@@ -429,7 +429,7 @@ TEST_P(SchedulerTestWithExecutorParam, branched_graph_known_exec_time)
et.uploadOperationsExecTime();
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
std::string branch1_expected_backend("npu"), branch2_expected_backend("npu");
@@ -464,7 +464,7 @@ TEST_P(SchedulerTestWithExecutorParam, branched_graph_known_exec_time)
et.uploadOperationsExecTime();
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
ASSERT_EQ(br->getBackend(add_op_idx)->config()->id(), "npu");
ASSERT_EQ(br->getBackend(mul1_op_idx)->config()->id(), "npu");
@@ -511,7 +511,7 @@ TEST_F(SchedulerTest, branched_graph_profiling_mode)
et.uploadOperationsExecTime();
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
ASSERT_EQ(br->getBackend(mul1_op_idx)->config()->id(), "npu");
ASSERT_EQ(br->getBackend(mul2_op_idx)->config()->id(), "npu");
@@ -532,7 +532,7 @@ TEST_F(SchedulerTest, branched_graph_profiling_mode)
et.uploadOperationsExecTime();
// Test scheduler
- auto scheduler = compiler::Scheduler(graph->operands(), _mock_backends, nullptr);
+ auto scheduler = compiler::HEScheduler(graph->operands(), _mock_backends, nullptr);
const auto br = scheduler.schedule(*graph);
ASSERT_NE(br->getBackend(add_op_idx)->config()->id(),
br->getBackend(mul1_op_idx)->config()->id());