提交 9934d066 编写于 作者: Y yoni

tod fix compilation issues

上级 eface185
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
class Model; struct Model;
} }
namespace lite::tensor { namespace lite::tensor {
class Tensor; class Tensor;
......
...@@ -66,7 +66,7 @@ int ApplyMomentumCPUKernel::Init() { ...@@ -66,7 +66,7 @@ int ApplyMomentumCPUKernel::Init() {
// Only for test with uninitialized Data // Only for test with uninitialized Data
size_t elem_num = in_tensors_[0]->ElementsNum(); size_t elem_num = in_tensors_[0]->ElementsNum();
auto accumulate = reinterpret_cast<float *>(in_tensors_[1]->Data()); auto accumulate = reinterpret_cast<float *>(in_tensors_[1]->Data());
for (int i =0; i < elem_num; i++) accumulate[i] = 0.0; for (size_t i =0; i < elem_num; i++) accumulate[i] = 0.0;
workspace = new float[elem_num]; workspace = new float[elem_num];
return 0; return 0;
......
...@@ -27,10 +27,6 @@ using mindspore::lite::RET_ERROR; ...@@ -27,10 +27,6 @@ using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK; using mindspore::lite::RET_OK;
namespace mindspore::kernel { namespace mindspore::kernel {
namespace {
constexpr int kArithGradOpInputNum = 3;
constexpr int kArithGradOpOutputNum = 2;
} // namespace
int ArithmeticGradCPUKernel::Init() { int ArithmeticGradCPUKernel::Init() {
auto dx1 = out_tensors_[0]; auto dx1 = out_tensors_[0];
......
...@@ -37,9 +37,10 @@ int ConvolutionGradFilterCPUKernel::Init() { ...@@ -37,9 +37,10 @@ int ConvolutionGradFilterCPUKernel::Init() {
MS_ASSERT(x_tensor != nullptr); MS_ASSERT(x_tensor != nullptr);
auto *dy_tensor = in_tensors_.at(0); auto *dy_tensor = in_tensors_.at(0);
MS_ASSERT(dy_tensor != nullptr); MS_ASSERT(dy_tensor != nullptr);
#if 0
auto *weight_tensor = out_tensors_.at(0); auto *weight_tensor = out_tensors_.at(0);
MS_ASSERT(weight_tensor != nullptr); MS_ASSERT(weight_tensor != nullptr);
#endif
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_); auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter_);
conv_param->output_batch_ = dy_tensor->shape().at(kNHWC_N); conv_param->output_batch_ = dy_tensor->shape().at(kNHWC_N);
conv_param->input_batch_ = x_tensor->shape().at(kNHWC_N); conv_param->input_batch_ = x_tensor->shape().at(kNHWC_N);
......
...@@ -138,7 +138,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { ...@@ -138,7 +138,7 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() {
sm_params_.n_dim_ = 2; sm_params_.n_dim_ = 2;
sm_params_.element_size_ = data_size; sm_params_.element_size_ = data_size;
sm_params_.axis_ = 1; sm_params_.axis_ = 1;
for (int i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims[i]; for (size_t i = 0; i < dims.size(); i++) sm_params_.input_shape_[i] = dims[i];
return RET_OK; return RET_OK;
} }
......
...@@ -92,7 +92,6 @@ int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor * ...@@ -92,7 +92,6 @@ int Scheduler::InferShape(const lite::Model *model, std::vector<tensor::Tensor *
for (size_t j = 0; j < in_size; ++j) { for (size_t j = 0; j < in_size; ++j) {
inputs.emplace_back(tensors->at(node->input_indices_[j])); inputs.emplace_back(tensors->at(node->input_indices_[j]));
} }
auto out_size = node->output_indices_.size(); auto out_size = node->output_indices_.size();
for (size_t j = 0; j < out_size; ++j) { for (size_t j = 0; j < out_size; ++j) {
outputs.emplace_back(tensors->at(node->output_indices_[j])); outputs.emplace_back(tensors->at(node->output_indices_[j]));
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <algorithm>
#include "utils/log_adapter.h" #include "utils/log_adapter.h"
#include "common/common_test.h" #include "common/common_test.h"
...@@ -295,7 +296,8 @@ TEST_F(TestActGradFp32, hswishGradFp32) { ...@@ -295,7 +296,8 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
printf("single thread running time : %f ms\n", time_avg / 1000.0f); printf("single thread running time : %f ms\n", time_avg / 1000.0f);
printf("==================output data=================\n"); printf("==================output data=================\n");
for (int i = 0; i < std::min(output_data_size, 20UL); i++) { size_t min = (output_data_size < 20UL) ? output_data_size : 20UL;
for (size_t i = 0; i < min; i++) {
std::cout << output_data[i] << " ,"; std::cout << output_data[i] << " ,";
} }
std::cout << std::endl; std::cout << std::endl;
......
...@@ -181,7 +181,6 @@ TEST_F(NetworkTest, tuning_layer) { ...@@ -181,7 +181,6 @@ TEST_F(NetworkTest, tuning_layer) {
} }
meta_graph->inputIndex = {6, 0}; // XXX TODO why is it reverse? meta_graph->inputIndex = {6, 0}; // XXX TODO why is it reverse?
meta_graph->outputIndex = {5, 14}; meta_graph->outputIndex = {5, 14};
const int NUM_OF_OUTPUTS = 2;
auto input0 = std::make_unique<schema::TensorT>(); auto input0 = std::make_unique<schema::TensorT>();
input0->nodeType = schema::NodeType::NodeType_ValueNode; input0->nodeType = schema::NodeType::NodeType_ValueNode;
...@@ -452,7 +451,7 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin ...@@ -452,7 +451,7 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
int32_t res = 0; int32_t res = 0;
if (auto dir = opendir(path.c_str())) { if (auto dir = opendir(path.c_str())) {
while (auto f = readdir(dir)) { while (auto f = readdir(dir)) {
if (!f->d_name || f->d_name[0] == '.') continue; if (f->d_name[0] == '.') continue;
if (f->d_type == DT_DIR) fileIterator(session, path + f->d_name + "/", cb); if (f->d_type == DT_DIR) fileIterator(session, path + f->d_name + "/", cb);
if (f->d_type == DT_REG) if (f->d_type == DT_REG)
...@@ -462,11 +461,10 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin ...@@ -462,11 +461,10 @@ int32_t fileIterator(mindspore::session::TrainSession *session, const std::strin
} }
return res; return res;
} }
#if 0
void replaceExt(const std::string &src, std::string *dst) { void replaceExt(const std::string &src, std::string *dst) {
dst = &std::move(src.substr(0, src.find_last_of('.')) + ".emb"); *dst = src.substr(0, src.find_last_of('.')) + ".emb";
} }
#endif
int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &in, const std::string &out) { int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &in, const std::string &out) {
// setup input // setup input
auto inputs = session->GetInputs(); auto inputs = session->GetInputs();
...@@ -494,7 +492,6 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string & ...@@ -494,7 +492,6 @@ int32_t runEffNet(mindspore::session::TrainSession *session, const std::string &
} }
TEST_F(NetworkTest, efficient_net) { TEST_F(NetworkTest, efficient_net) {
const int NUM_OF_INPUTS = 1;
char *buf = nullptr; char *buf = nullptr;
size_t net_size = 0; size_t net_size = 0;
std::string net = "./test_data/nets/efficientnet_b0_f.ms"; std::string net = "./test_data/nets/efficientnet_b0_f.ms";
...@@ -520,7 +517,7 @@ TEST_F(NetworkTest, efficient_net) { ...@@ -520,7 +517,7 @@ TEST_F(NetworkTest, efficient_net) {
int32_t res = 0; int32_t res = 0;
if (in.find(".bin") != std::string::npos) { if (in.find(".bin") != std::string::npos) {
std::string out; std::string out;
replaceExt(in, out); replaceExt(in, &out);
res = runEffNet(session, in, out); res = runEffNet(session, in, out);
std::cout << "input file: " << in << (res ? " Fail" : " Pass") << std::endl; std::cout << "input file: " << in << (res ? " Fail" : " Pass") << std::endl;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册