/* * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "OperationValidator.h" #include #include "model/operand/Set.h" #include "graph/operation/LowerInfo.h" #include "util/logging.h" namespace neurun { namespace compiler { void OperationValidator::visit(const model::operation::Conv2DNode &) { // DO NOTHING } void OperationValidator::visit(const model::operation::MaxPool2DNode &) { // DO NOTHING } void OperationValidator::visit(const model::operation::AvgPool2DNode &) { // DO NOTHING } void OperationValidator::visit(const model::operation::ConcatNode &node) { (void)node; // NOTE To prevent from unused variable warning // NOTE This implementation assumes concat over feature depth // TODO Remove this assumption assert(_ctx.at(::neurun::model::operand::Index{node.param().axis_index}).asScalar() == 3); } void OperationValidator::visit(const model::operation::FullyConnectedNode &) { // DO NOTHING } void OperationValidator::visit(const model::operation::ReshapeNode &node) { (void)node; // NOTE To prevent from unused variable warning const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(0)}; // NOTE The content of a tensor specified by shape_index should be aligned with // output tensor shape // TODO Check consistency of ouput shape // 'Feature Map' to 'Vector' reshape assert(_ctx.at(input_index).shape().rank() == 4); assert(_ctx.at(output_index).shape().rank() == 2); assert(_ctx.at(output_index).shape().dim(0) == 1); // NOTE Vector element ordering issue arises when H or W is not 1 assert(_ctx.at(input_index).shape().dim(1) == 1); // H assert(_ctx.at(input_index).shape().dim(2) == 1); // W // input(4D)'s C * H * W == output(2D)'s W assert((_ctx.at(input_index).shape().dim(3) * _ctx.at(input_index).shape().dim(1) * _ctx.at(input_index).shape().dim(2)) == _ctx.at(output_index).shape().dim(1)); } void OperationValidator::visit(const model::operation::SoftmaxNode &node) { (void)node; // NOTE To prevent from unused variable warning VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl; const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(0)}; assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank()); // TODO Support 'feature map' input assert(_ctx.at(input_index).shape().rank() == 2); assert(_ctx.at(input_index).shape().dim(0) == 1); assert(_ctx.at(input_index).shape().dim(0) == _ctx.at(output_index).shape().dim(0)); assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(output_index).shape().dim(1)); } void OperationValidator::visit(const model::operation::PermuteNode &node) { (void)node; // NOTE To prevent from unused variable warning VERBOSE(Permute) << "Configure Permute operation" << std::endl; const auto output_index{node.getOutputs().at(0)}; const auto input_index{node.getInputs().at(0)}; assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank()); } void OperationValidator::visit(const model::operation::AddNode &) { // DO NOTHING } } // namespace compiler } // namespace neurun