提交 1519b881 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5772 [MS][LITE][Develop]optimize mode, strip c++

Merge pull request !5772 from chenjianping/lite_dev2
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
#define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
#include <vector>
#include <string>
#include "schema/model_generated.h"
namespace mindspore::lite {
using TensorPtrVector = std::vector<mindspore::schema::Tensor *>;
using Uint32Vector = std::vector<uint32_t>;
using String = std::string;
using NodeType = schema::NodeType;
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
......@@ -13,32 +13,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H
#define MINDSPORE_LITE_INCLUDE_MODEL_H
#include <string>
#include <vector>
#include <memory>
#include "schema/model_generated.h"
namespace mindspore {
#define MS_API __attribute__((visibility("default")))
namespace lite {
/// \brief ModelImpl defined the implement class of Model in MindSpore Lite.
///
/// \note List public class and interface for reference.
class ModelImpl;
#include "include/lite_utils.h"
/// \brief Primitive defined as prototype of operator.
///
/// \note List public class and interface for reference.
namespace mindspore::lite {
class PrimitiveC;
struct Model {
struct Node {
String name_;
NodeType node_type_;
PrimitiveC *primitive_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
};
using NodePtrVector = std::vector<Node *>;
String name_;
String version_;
TensorPtrVector all_tensors_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
NodePtrVector nodes_;
char *buf;
/// \brief Model defined model in MindSpore Lite for managing graph.
class MS_API Model {
public:
/// \brief Static method to create a Model pointer.
///
/// \param[in] model_buf Define the buffer read from a model file.
......@@ -47,65 +45,9 @@ class MS_API Model {
/// \return Pointer of MindSpore Lite Model.
static Model *Import(const char *model_buf, size_t size);
/// \brief Constructor of MindSpore Lite Model using default value for parameters.
///
/// \return Instance of MindSpore Lite Model.
Model() = default;
/// \brief Destructor of MindSpore Lite Model.
virtual ~Model();
/// \brief Get MindSpore Lite Primitive by name.
///
/// \param[in] name Define name of primitive to be returned.
///
/// \return the pointer of MindSpore Lite Primitive.
PrimitiveC *GetOp(const std::string &name) const;
/// \brief Get graph defined in flatbuffers.
///
/// \return the pointer of graph defined in flatbuffers.
const schema::MetaGraph *GetMetaGraph() const;
/// \brief Free MetaGraph in MindSpore Lite Model.
void FreeMetaGraph();
ModelImpl *model_impl() {return model_impl_;}
protected:
ModelImpl *model_impl_ = nullptr;
};
/// \brief ModelBuilder defined by MindSpore Lite.
class MS_API ModelBuilder {
public:
/// \brief OutEdge defined by MindSpore Lite.
struct OutEdge {
std::string nodeId; /**< ID of a node linked by this edge */
size_t outEdgeIndex; /**< Index of this edge */
};
/// \brief Constructor of MindSpore Lite Model using default value for parameters.
///
/// \return Instance of MindSpore Lite ModelBuilder.
ModelBuilder() = default;
/// \brief Destructor of MindSpore Lite ModelBuilder.
virtual ~ModelBuilder() = default;
/// \brief Add primitive into model builder for model building.
///
/// \param[in] op Define the primitive to be added.
/// \param[in] inputs Define input edge of primitive to be added.
///
/// \return ID of the added primitive.
virtual std::string AddOp(const PrimitiveC &op, const std::vector<OutEdge> &inputs) = 0;
/// \brief Finish constructing the model.
///
/// \return the pointer of MindSpore Lite Model.
virtual Model *Construct();
/// \brief Free all the temporary buffer
void Free();
};
} // namespace lite
} // namespace mindspore
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_MODEL_H
......@@ -24,15 +24,15 @@
namespace mindspore {
namespace lite {
std::vector<size_t> GetGraphInputNodes(const schema::MetaGraph *meta_graph) {
MS_ASSERT(nullptr != meta_graph);
std::vector<size_t> GetGraphInputNodes(const lite::Model *model) {
MS_ASSERT(model != nullptr);
std::vector<size_t> ret;
for (auto graph_in_index : *(meta_graph->inputIndex())) {
for (size_t j = 0; j < meta_graph->nodes()->size(); j++) {
auto *cNode = meta_graph->nodes()->GetAs<schema::CNode>(j);
MS_ASSERT(nullptr != cNode);
MS_ASSERT(nullptr != cNode->inputIndex());
if (std::any_of(cNode->inputIndex()->begin(), cNode->inputIndex()->end(),
for (auto graph_in_index : model->input_indices_) {
auto node_size = model->nodes_.size();
for (size_t j = 0; j < node_size; ++j) {
auto node = model->nodes_[j];
MS_ASSERT(node != nullptr);
if (std::any_of(node->input_indices_.begin(), node->input_indices_.end(),
[&](const uint32_t &node_in_index) { return node_in_index == graph_in_index; })) {
if (!IsContain<size_t>(ret, j)) {
ret.emplace_back(j);
......@@ -43,15 +43,15 @@ std::vector<size_t> GetGraphInputNodes(const schema::MetaGraph *meta_graph) {
return ret;
}
std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) {
MS_ASSERT(nullptr != meta_graph);
std::vector<size_t> GetGraphOutputNodes(const lite::Model *model) {
MS_ASSERT(model != nullptr);
std::vector<size_t> ret;
for (auto graph_out_index : *(meta_graph->outputIndex())) {
for (size_t j = 0; j < meta_graph->nodes()->size(); j++) {
auto *cNode = meta_graph->nodes()->GetAs<schema::CNode>(j);
MS_ASSERT(nullptr != cNode);
MS_ASSERT(nullptr != cNode->outputIndex());
if (std::any_of(cNode->outputIndex()->begin(), cNode->outputIndex()->end(),
for (auto graph_out_index : model->output_indices_) {
auto node_size = model->nodes_.size();
for (size_t j = 0; j < node_size; ++j) {
auto node = model->nodes_[j];
MS_ASSERT(node != nullptr);
if (std::any_of(node->output_indices_.begin(), node->output_indices_.end(),
[&](const uint32_t &node_out_index) { return node_out_index == graph_out_index; })) {
if (!IsContain<size_t>(ret, j)) {
ret.emplace_back(j);
......@@ -62,15 +62,17 @@ std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) {
return ret;
}
std::vector<size_t> GetLinkedPostNodeIdx(const schema::MetaGraph &graph, const size_t &tensor_idx) {
std::vector<size_t> GetLinkedPostNodeIdx(const lite::Model *model, const size_t tensor_idx) {
MS_ASSERT(model != nullptr);
std::vector<size_t> post_node_idxes;
for (size_t i = 0; i < graph.nodes()->size(); i++) {
auto node = graph.nodes()->GetAs<schema::CNode>(i);
auto nodes_size = model->nodes_.size();
for (size_t i = 0; i < nodes_size; ++i) {
auto node = model->nodes_[i];
if (node == nullptr) {
continue;
}
auto node_input_idxes = node->inputIndex();
auto is_contain = std::any_of(node_input_idxes->begin(), node_input_idxes->end(),
auto is_contain = std::any_of(node->input_indices_.begin(), node->input_indices_.end(),
[&](const uint32_t &node_input_idx) { return node_input_idx == tensor_idx; });
if (is_contain) {
post_node_idxes.emplace_back(i);
......
......@@ -25,16 +25,17 @@
#include "schema/model_generated.h"
#include "utils//log_adapter.h"
#include "include/errorcode.h"
#include "include/model.h"
namespace mindspore {
namespace lite {
using NODE_ID = std::string;
std::vector<size_t> GetGraphInputNodes(const schema::MetaGraph *meta_graph);
std::vector<size_t> GetGraphInputNodes(const lite::Model *model);
std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph);
std::vector<size_t> GetGraphOutputNodes(const lite::Model *model);
std::vector<size_t> GetLinkedPostNodeIdx(const schema::MetaGraph &graph, const size_t &tensor_idx);
std::vector<size_t> GetLinkedPostNodeIdx(const lite::Model *model, const size_t tensor_idx);
} // namespace lite
} // namespace mindspore
......
......@@ -32,6 +32,7 @@
namespace mindspore {
namespace lite {
static std::vector<schema::PrimitiveType> packed_op = {
schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D,
schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D,
......@@ -39,27 +40,23 @@ static std::vector<schema::PrimitiveType> packed_op = {
// this method will not check whether tensor_idx is a weight tensor index, caller should ensure this.
static bool WeightTensorNeedCopy(const lite::Model *model, const uint32_t tensor_idx) {
MS_ASSERT(nullptr != model);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph);
auto post_node_idxes = GetLinkedPostNodeIdx(*meta_graph, tensor_idx);
MS_ASSERT(model != nullptr);
auto post_node_idxes = GetLinkedPostNodeIdx(model, tensor_idx);
return std::none_of(post_node_idxes.begin(), post_node_idxes.end(), [&](const size_t &post_node_idx) {
auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(post_node_idx);
MS_ASSERT(cNode != nullptr);
return IsContain(packed_op, cNode->primitive()->value_type());
auto node = model->nodes_[post_node_idx];
MS_ASSERT(node != nullptr);
return IsContain(packed_op, static_cast<schema::PrimitiveType>(node->primitive_->Type()));
});
}
int LiteSession::ConvertTensors(const lite::Model *model) {
MS_ASSERT(nullptr != model);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph);
MS_ASSERT(model != nullptr);
copyed_tensor_idxes_.clear();
uint32_t tensorCount = meta_graph->allTensors()->size();
for (uint32_t i = 0; i < tensorCount; i++) {
auto *srcTensor = meta_graph->allTensors()->GetAs<schema::Tensor>(i);
uint32_t tensor_count = model->all_tensors_.size();
for (uint32_t i = 0; i < tensor_count; ++i) {
auto *srcTensor = model->all_tensors_[i];
if (srcTensor == nullptr) {
MS_LOG(ERROR) << i << "th tensor in meta_graph is nullptr";
MS_LOG(ERROR) << i << "th tensor in model is nullptr";
return RET_NULL_PTR;
}
std::vector<int> shape;
......@@ -115,11 +112,9 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
void LiteSession::InitGraphInputTensors(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->inputs_.empty());
MS_ASSERT(meta_graph != nullptr);
for (size_t i = 0; i < meta_graph->inputIndex()->size(); i++) {
auto in_tensor_idx = size_t(meta_graph->inputIndex()->GetAs<uint32_t>(i));
auto graph_in_size = model->input_indices_.size();
for (size_t i = 0; i < graph_in_size; ++i) {
auto in_tensor_idx = model->input_indices_[i];
MS_ASSERT(in_tensor_idx < this->tensors_.size());
auto *in_tensor = this->tensors_.at(in_tensor_idx);
MS_ASSERT(in_tensor != nullptr);
......@@ -137,11 +132,11 @@ void LiteSession::InitGraphInputMSTensors() {
void LiteSession::InitGraphOutputTensors(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->outputs_.empty());
MS_ASSERT(meta_graph != nullptr);
for (size_t i = 0; i < meta_graph->outputIndex()->size(); i++) {
auto out_tensor_idx = size_t(meta_graph->outputIndex()->GetAs<uint32_t>(i));
auto graph_out_size = model->output_indices_.size();
for (size_t i = 0; i < graph_out_size; ++i) {
auto out_tensor_idx = model->output_indices_[i];
MS_ASSERT(out_tensor_idx < this->tensors_.size());
auto *out_tensor = this->tensors_.at(out_tensor_idx);
MS_ASSERT(out_tensor != nullptr);
......@@ -151,19 +146,19 @@ void LiteSession::InitGraphOutputTensors(const lite::Model *model) {
void LiteSession::InitGraphInputMap(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->input_map_.empty());
MS_ASSERT(meta_graph != nullptr);
auto graph_input_node_indexes = GetGraphInputNodes(meta_graph);
auto graph_input_node_indexes = GetGraphInputNodes(model);
auto graph_in_size = model->input_indices_.size();
for (auto in_node_index : graph_input_node_indexes) {
auto *in_node = meta_graph->nodes()->GetAs<schema::CNode>(in_node_index);
MS_ASSERT(nullptr != in_node);
auto in_node = model->nodes_[in_node_index];
MS_ASSERT(in_node != nullptr);
MS_ASSERT(this->input_map_.find(in_node->name()->str()) == this->input_map_.end());
for (size_t i = 0; i < in_node->inputIndex()->size(); i++) {
auto in_tensor_index = size_t(in_node->inputIndex()->GetAs<uint32_t>(i));
auto in_size = in_node->input_indices_.size();
for (size_t i = 0; i < in_size; ++i) {
auto in_tensor_index = size_t(in_node->input_indices_[i]);
bool is_graph_input = false;
for (size_t j = 0; j < meta_graph->inputIndex()->size(); j++) {
if (in_tensor_index == size_t(meta_graph->inputIndex()->GetAs<uint32_t>(j))) {
for (size_t j = 0; j < graph_in_size; ++j) {
if (in_tensor_index == model->input_indices_[j]) {
is_graph_input = true;
break;
}
......@@ -174,28 +169,30 @@ void LiteSession::InitGraphInputMap(const lite::Model *model) {
MS_ASSERT(in_tensor_index < this->tensors_.size());
auto *in_tensor = this->tensors_.at(in_tensor_index);
MS_ASSERT(in_tensor != nullptr);
auto *ms_tensor = new tensor::LiteTensor(in_tensor);
MS_ASSERT(nullptr != ms_tensor);
this->input_map_[in_node->name()->str()].emplace_back(ms_tensor);
auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(in_tensor);
if (ms_tensor == nullptr) {
MS_LOG(ERROR) << "new lite tensor fail!";
return;
}
this->input_map_[in_node->name_].emplace_back(ms_tensor);
}
}
}
void LiteSession::InitGraphOutputNodeMap(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->output_node_map_.empty());
MS_ASSERT(meta_graph != nullptr);
auto graph_output_node_indexes = GetGraphOutputNodes(meta_graph);
auto graph_output_node_indexes = GetGraphOutputNodes(model);
auto graph_out_size = model->output_indices_.size();
for (auto out_node_index : graph_output_node_indexes) {
auto *out_node = meta_graph->nodes()->GetAs<schema::CNode>(out_node_index);
MS_ASSERT(nullptr != out_node);
auto out_node = model->nodes_[out_node_index];
MS_ASSERT(out_node != nullptr);
MS_ASSERT(this->output_map_.find(out_node->name()->str()) == this->output_map_.end());
for (size_t i = 0; i < out_node->outputIndex()->size(); i++) {
auto out_tensor_index = size_t(out_node->outputIndex()->GetAs<uint32_t>(i));
auto out_size = out_node->output_indices_.size();
for (size_t i = 0; i < out_size; ++i) {
auto out_tensor_index = out_node->output_indices_[i];
bool is_graph_output = false;
for (size_t j = 0; j < meta_graph->outputIndex()->size(); j++) {
if (out_tensor_index == size_t(meta_graph->outputIndex()->GetAs<uint32_t>(j))) {
for (size_t j = 0; j < graph_out_size; ++j) {
if (out_tensor_index == model->output_indices_[j]) {
is_graph_output = true;
break;
}
......@@ -206,34 +203,39 @@ void LiteSession::InitGraphOutputNodeMap(const lite::Model *model) {
MS_ASSERT(out_tensor_index < this->tensors_.size());
auto *out_tensor = this->tensors_.at(out_tensor_index);
MS_ASSERT(out_tensor != nullptr);
auto *ms_tensor = new tensor::LiteTensor(out_tensor);
MS_ASSERT(nullptr != ms_tensor);
this->output_node_map_[out_node->name()->str()].emplace_back(ms_tensor);
auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor);
if (ms_tensor == nullptr) {
MS_LOG(ERROR) << "new lite tensor fail!";
return;
}
this->output_node_map_[out_node->name_].emplace_back(ms_tensor);
}
}
}
void LiteSession::InitGraphOutputTensorNames(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->output_tensor_names_.empty());
MS_ASSERT(meta_graph != nullptr);
for (auto output_index : *meta_graph->outputIndex()) {
this->output_tensor_names_.emplace_back(std::to_string(output_index));
auto out_size = model->output_indices_.size();
for (size_t i = 0; i < out_size; ++i) {
this->output_tensor_names_.emplace_back(std::to_string(model->output_indices_[i]));
}
}
void LiteSession::InitGraphOutputTensorMap(const lite::Model *model) {
MS_ASSERT(model != nullptr);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(this->output_tensor_map_.empty());
MS_ASSERT(meta_graph != nullptr);
for (auto graph_out_index : *(meta_graph->outputIndex())) {
auto graph_out_size = model->output_indices_.size();
for (size_t i = 0; i < graph_out_size; ++i) {
size_t graph_out_index = model->output_indices_[i];
MS_ASSERT(graph_out_index < this->tensors_.size());
auto *out_tensor = this->tensors_.at(graph_out_index);
MS_ASSERT(out_tensor != nullptr);
auto *ms_tensor = new tensor::LiteTensor(out_tensor);
MS_ASSERT(nullptr != ms_tensor);
auto *ms_tensor = new (std::nothrow) tensor::LiteTensor(out_tensor);
if (ms_tensor == nullptr) {
MS_LOG(ERROR) << "new lite tensor fail!";
return;
}
this->output_tensor_map_.insert(std::make_pair(std::to_string(graph_out_index), ms_tensor));
}
}
......@@ -272,7 +274,7 @@ int LiteSession::CompileGraph(Model *model) {
}
executor->Prepare(this->kernels_);
model->FreeMetaGraph();
model->Free();
return RET_OK;
}
......
......@@ -13,34 +13,59 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/primitive_c.h"
#include "include/model.h"
#include "utils/log_adapter.h"
#include "include/errorcode.h"
#include "src/common/graph_util.h"
namespace mindspore::lite {
class ModelImpl {
public:
static ModelImpl *Import(const char *model_buf, size_t size);
ModelImpl() = default;
explicit ModelImpl(const char *model_buf, size_t size) : model_buf_(model_buf), buf_size_(size) {
meta_graph_ = schema::GetMetaGraph(model_buf);
namespace {
bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) {
for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) {
Model::Node *node = new (std::nothrow) Model::Node();
if (node == nullptr) {
MS_LOG(ERROR) << "new node fail!";
return false;
}
auto c_node = meta_graph->nodes()->GetAs<schema::CNode>(i);
auto src_prim = c_node->primitive();
node->primitive_ = PrimitiveC::UnPackFromSchemaPrimitive(const_cast<schema::Primitive *>(src_prim));
if (node->primitive_ == nullptr) {
MS_LOG(ERROR) << "unpack primitive == nullptr!";
return false;
}
node->primitive_->SetQuantType(c_node->quantType());
node->name_ = c_node->name()->c_str();
node->node_type_ = c_node->nodeType();
auto count = c_node->inputIndex()->size();
for (uint32_t j = 0; j < count; ++j) {
node->input_indices_.push_back(size_t(c_node->inputIndex()->GetAs<uint32_t>(j)));
}
count = c_node->outputIndex()->size();
for (uint32_t j = 0; j < count; ++j) {
node->output_indices_.push_back(size_t(c_node->outputIndex()->GetAs<uint32_t>(j)));
}
model->nodes_.push_back(node);
}
virtual ~ModelImpl();
PrimitiveC *GetOp(const std::string &name) const;
const schema::MetaGraph *meta_graph() const;
void FreeMetaGraph();
int BuildOps();
return true;
}
protected:
const char *model_buf_;
size_t buf_size_;
const schema::MetaGraph *meta_graph_ = nullptr;
std::map<std::string, PrimitiveC *> ops_;
};
bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model) {
auto tensor_count = meta_graph->allTensors()->size();
for (uint32_t i = 0; i < tensor_count; ++i) {
auto *tensor = meta_graph->allTensors()->GetAs<schema::Tensor>(i);
if (tensor == nullptr) {
MS_LOG(ERROR) << i << "th tensor in model is nullptr";
return false;
}
model->all_tensors_.push_back(const_cast<mindspore::schema::Tensor *>(tensor));
}
return true;
}
} // namespace
ModelImpl *ModelImpl::Import(const char *model_buf, size_t size) {
Model *Model::Import(const char *model_buf, size_t size) {
if (model_buf == nullptr) {
MS_LOG(ERROR) << "The model buf is nullptr";
return nullptr;
......@@ -50,95 +75,61 @@ ModelImpl *ModelImpl::Import(const char *model_buf, size_t size) {
MS_LOG(ERROR) << "The buffer is invalid and fail to create graph.";
return nullptr;
}
auto *inner_model_buf = new (std::nothrow) char[size];
if (inner_model_buf == nullptr) {
MS_LOG(ERROR) << "new model buf fail.";
return nullptr;
}
memcpy(inner_model_buf, model_buf, size);
auto model = new (std::nothrow) ModelImpl(inner_model_buf, size);
Model *model = new (std::nothrow) Model();
if (model == nullptr) {
MS_LOG(ERROR) << "Create modelImpl failed";
MS_LOG(ERROR) << "new model fail!";
return nullptr;
}
auto ret = model->BuildOps();
if (0 != ret) {
MS_LOG(ERROR) << "BuildOps failed";
model->buf = reinterpret_cast<char *>(malloc(size));
if (model->buf == nullptr) {
MS_LOG(ERROR) << "new inner model buf fail!";
return nullptr;
}
return model;
}
PrimitiveC *ModelImpl::GetOp(const std::string &name) const {
auto iter = ops_.find(name);
if (iter == ops_.end()) {
memcpy(model->buf, model_buf, size);
auto meta_graph = schema::GetMetaGraph(model->buf);
if (meta_graph == nullptr) {
MS_LOG(ERROR) << "meta_graph is nullptr!";
return nullptr;
} else {
return iter->second;
}
}
ModelImpl::~ModelImpl() {
delete[](this->model_buf_);
for (auto iter : ops_) {
delete (iter.second);
if (meta_graph->name() != nullptr) {
model->name_ = meta_graph->name()->c_str();
}
ops_.clear();
}
void ModelImpl::FreeMetaGraph() {
delete[](this->model_buf_);
model_buf_ = nullptr;
}
const schema::MetaGraph *ModelImpl::meta_graph() const { return this->meta_graph_; }
int ModelImpl::BuildOps() {
if (this->meta_graph_ == nullptr) {
MS_LOG(ERROR) << "mete_graph is nullptr";
return -1;
if (meta_graph->version() != nullptr) {
model->version_ = meta_graph->version()->c_str();
}
MS_ASSERT(nullptr != meta_graph_->nodes());
for (size_t i = 0; i < meta_graph_->nodes()->size(); i++) {
auto cNode = meta_graph_->nodes()->GetAs<schema::CNode>(i);
auto name = cNode->name()->str();
auto srcPrim = cNode->primitive();
auto prim = PrimitiveC::UnPackFromSchemaPrimitive(const_cast<schema::Primitive *>(srcPrim));
prim->SetQuantType(cNode->quantType());
this->ops_[name] = prim;
auto in_count = meta_graph->inputIndex()->size();
for (uint32_t i = 0; i < in_count; ++i) {
model->input_indices_.push_back(size_t(meta_graph->inputIndex()->GetAs<uint32_t>(i)));
}
return 0;
}
Model *Model::Import(const char *model_buf, size_t size) {
auto model = new Model();
model->model_impl_ = ModelImpl::Import(model_buf, size);
if (model_buf == nullptr) {
MS_LOG(ERROR) << "model buf is null";
auto out_count = meta_graph->outputIndex()->size();
for (uint32_t i = 0; i < out_count; ++i) {
model->output_indices_.push_back(size_t(meta_graph->outputIndex()->GetAs<uint32_t>(i)));
}
if (!ConvertNodes(meta_graph, model)) {
delete model;
return nullptr;
}
if (model->model_impl_ == nullptr) {
MS_LOG(ERROR) << "model impl is null";
if (!ConvertTensors(meta_graph, model)) {
delete model;
return nullptr;
}
return model;
}
Model::~Model() { delete (this->model_impl_); }
mindspore::lite::PrimitiveC *Model::GetOp(const std::string &name) const {
MS_ASSERT(nullptr != model_impl_);
return const_cast<PrimitiveC *>(model_impl_->GetOp(name));
}
void Model::FreeMetaGraph() {
MS_ASSERT(nullptr != model_impl_);
model_impl_->FreeMetaGraph();
}
const schema::MetaGraph *Model::GetMetaGraph() const {
MS_ASSERT(nullptr != model_impl_);
return model_impl_->meta_graph();
void Model::Free() {
if (this->buf != nullptr) {
free(this->buf);
this->buf = nullptr;
}
auto nodes_size = this->nodes_.size();
for (size_t i = 0; i < nodes_size; ++i) {
auto node = this->nodes_[i];
MS_ASSERT(node != nullptr);
delete node;
}
this->nodes_.clear();
}
} // namespace mindspore::lite
......@@ -79,43 +79,42 @@ int Scheduler::ReSizeKernels(const std::vector<kernel::LiteKernel *> &kernels) {
}
int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor *> *tensors) {
MS_ASSERT(nullptr != model);
MS_ASSERT(nullptr != tensors);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph);
MS_ASSERT(model != nullptr);
MS_ASSERT(tensors != nullptr);
bool infer_shape_interrupt = false;
uint32_t kernelCount = meta_graph->nodes()->size();
for (uint32_t i = 0; i < kernelCount; i++) {
auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(i);
uint32_t kernelCount = model->nodes_.size();
for (uint32_t i = 0; i < kernelCount; ++i) {
auto node = model->nodes_[i];
MS_ASSERT(node != nullptr);
std::vector<tensor::Tensor *> inputs;
std::vector<tensor::Tensor *> outputs;
auto inIndexes = cNode->inputIndex();
for (size_t j = 0; j < inIndexes->size(); j++) {
inputs.emplace_back(tensors->at(size_t(inIndexes->GetAs<uint32_t>(j))));
auto in_size = node->input_indices_.size();
for (size_t j = 0; j < in_size; ++j) {
inputs.emplace_back(tensors->at(node->input_indices_[j]));
}
auto outIndexes = cNode->outputIndex();
if (outIndexes != nullptr) {
for (size_t j = 0; j < outIndexes->size(); j++) {
outputs.emplace_back(tensors->at(size_t(outIndexes->GetAs<uint32_t>(j))));
}
auto out_size = node->output_indices_.size();
for (size_t j = 0; j < out_size; ++j) {
outputs.emplace_back(tensors->at(node->output_indices_[j]));
}
auto *primitive = model->GetOp(cNode->name()->str());
auto *primitive = node->primitive_;
if (primitive == nullptr) {
MS_LOG(ERROR) << "Op " << cNode->name()->str() << " should exist in model, type: "
<< schema::EnumNamePrimitiveType(cNode->primitive()->value_type());
MS_LOG(ERROR) << "Op " << node->name_ << " should exist in model!";
return RET_ERROR;
}
primitive->SetInferFlag(!infer_shape_interrupt);
auto ret = primitive->InferShape(inputs, outputs);
if (ret == RET_INFER_INVALID) {
MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << cNode->name()->str()
<< ", type: " << schema::EnumNamePrimitiveType(cNode->primitive()->value_type())
MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name_
<< ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()))
<< "flag set to false.";
primitive->SetInferFlag(false);
infer_shape_interrupt = true;
} else if (ret != RET_OK) {
MS_LOG(ERROR) << "InferShape failed, name: " << cNode->name()->str()
<< ", type: " << schema::EnumNamePrimitiveType(cNode->primitive()->value_type());
MS_LOG(ERROR) << "InferShape failed, name: " << node->name_
<< ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()));
return RET_INFER_ERR;
}
}
......@@ -125,33 +124,34 @@ int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor *
int Scheduler::InitOp2Kernel(const lite::Model *model, std::vector<tensor::Tensor *> *tensors,
std::vector<kernel::LiteKernel *> *kernels) {
MS_ASSERT(nullptr != model);
MS_ASSERT(nullptr != tensors);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph);
uint32_t kernelCount = meta_graph->nodes()->size();
auto graph_output_node_indexes = GetGraphOutputNodes(meta_graph);
for (uint32_t i = 0; i < kernelCount; i++) {
auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(i);
MS_ASSERT(model != nullptr);
MS_ASSERT(tensors != nullptr);
uint32_t kernelCount = model->nodes_.size();
auto graph_output_node_indexes = GetGraphOutputNodes(model);
for (uint32_t i = 0; i < kernelCount; ++i) {
auto node = model->nodes_[i];
MS_ASSERT(node != nullptr);
std::vector<tensor::Tensor *> inputs;
std::vector<tensor::Tensor *> outputs;
auto inIndexes = cNode->inputIndex();
for (size_t j = 0; j < inIndexes->size(); j++) {
inputs.emplace_back(tensors->at(size_t(inIndexes->GetAs<uint32_t>(j))));
auto in_size = node->input_indices_.size();
for (size_t j = 0; j < in_size; ++j) {
inputs.emplace_back(tensors->at(node->input_indices_[j]));
}
auto outIndexes = cNode->outputIndex();
for (size_t j = 0; j < outIndexes->size(); j++) {
outputs.emplace_back(tensors->at(size_t(outIndexes->GetAs<uint32_t>(j))));
auto out_size = node->output_indices_.size();
for (size_t j = 0; j < out_size; ++j) {
outputs.emplace_back(tensors->at(node->output_indices_[j]));
}
auto *primitive = model->GetOp(cNode->name()->str());
auto *kernel = this->ScheduleNode(inputs, outputs, primitive, cNode);
if (nullptr == kernel) {
MS_LOG(ERROR) << "ScheduleNode return nullptr, name: " << cNode->name()->str()
<< ", type: " << schema::EnumNamePrimitiveType(cNode->primitive()->value_type());
auto *primitive = node->primitive_;
MS_ASSERT(primitive != nullptr);
auto *kernel = this->ScheduleNode(inputs, outputs, primitive, node);
if (kernel == nullptr) {
MS_LOG(ERROR) << "ScheduleNode return nullptr, name: " << node->name_
<< ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()));
return RET_ERROR;
}
SetKernelTensorDataType(kernel);
kernel->set_name(cNode->name()->str());
kernel->set_name(node->name_);
kernel->set_is_model_output(IsContain(graph_output_node_indexes, size_t(i)));
kernels->emplace_back(kernel);
}
......@@ -231,20 +231,20 @@ kernel::LiteKernel *Scheduler::CreateSubKernel(const std::vector<kernel::LiteKer
kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector<tensor::Tensor *> &in_tensors,
const std::vector<tensor::Tensor *> &out_tensors,
const mindspore::lite::PrimitiveC *primitive, const schema::CNode *cnode) {
MS_ASSERT(nullptr != primitive);
const mindspore::lite::PrimitiveC *primitive, const Model::Node *node) {
MS_ASSERT(primitive != nullptr);
TypeId data_type = GetFirstFp32Fp16OrInt8Type(in_tensors);
kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, static_cast<schema::PrimitiveType>(primitive->Type())};
if (context_->device_ctx_.type == DT_GPU) {
desc.arch = kernel::KERNEL_ARCH::kGPU;
auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc);
if (nullptr != kernel) {
if (kernel != nullptr) {
kernel->set_desc(desc);
return kernel;
} else {
MS_LOG(ERROR) << "Not supported GPU Op "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())) << " "
<< (cnode->name()->str());
<< node->name_;
}
}
......
......@@ -36,7 +36,7 @@ class Scheduler {
kernel::LiteKernel *ScheduleNode(const std::vector<tensor::Tensor *> &in_tensors,
const std::vector<tensor::Tensor *> &out_tensors,
const mindspore::lite::PrimitiveC *primitive,
const schema::CNode *cnode);
const Model::Node *cnode);
private:
int InitOp2Kernel(const lite::Model *model, std::vector<tensor::Tensor *> *tensors,
......
......@@ -494,10 +494,11 @@ while read line; do
continue
fi
echo ${model_name} >> "${run_benchmark_log_file}"
echo 'convert mode name: '${model_name}' begin.'
echo './converter_lite --fmk=TFLITE --modelFile='${models_path}'/'${model_name}' --outputFile='${ms_models_path}'/'${model_name}_posttraining' --quantType=PostTraining --config_file='${models_path}'/'${model_name}'_posttraining.config' >> "${run_benchmark_log_file}"
./converter_lite --fmk=TFLITE --modelFile=$models_path/${model_name} --outputFile=${ms_models_path}/${model_name}_posttraining --quantType=PostTraining --config_file=${models_path}/${model_name}_posttraining.config || Convert_status=$?
done < ${models_tflite_posttraining_config}
# Convert TFLite AwareTraining models:
while read line; do
model_name=${line}
......
......@@ -353,7 +353,7 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
return RET_ERROR;
}
auto model = lite::Model::Import(graphBuf, size);
auto model_version = model->GetMetaGraph()->version()->str();
auto model_version = model->version_;
if (model_version != Version()) {
MS_LOG(WARNING) << "model version is "<< model_version << ", inference version is " << Version() << " not equal";
}
......
......@@ -324,7 +324,6 @@ int TimeProfile::RunTimeProfile() {
MS_LOG(ERROR) << "Import model file failed while running " << modelName.c_str();
std::cerr << "Import model file failed while running " << modelName.c_str() << std::endl;
delete session_;
delete model;
return RET_ERROR;
}
auto ret = session_->CompileGraph(model);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册