未验证 提交 2a792de7 编写于 作者: 石晓伟 提交者: GitHub

change the function in op_teller, test=release/1.7 (#22834)

* change the function in op_teller, test=develop

* correct the commit-id, test=develop
上级 7a92e759
......@@ -44,7 +44,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
${LITE_PROJECT}
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/PaddlePaddle/Paddle-Lite.git"
GIT_TAG 922ace19a45f30075618f71428523e7a2d5898d6
GIT_TAG 0f875ef367bd2dbfa2e557eb2a2fc841bacdf6cf
PREFIX ${LITE_SOURCES_DIR}
UPDATE_COMMAND ""
BUILD_COMMAND ${LITE_BUILD_COMMAND}
......
......@@ -28,7 +28,7 @@ namespace lite {
struct SimpleOpTeller : public Teller {
SimpleOpTeller() {
const std::map<std::string, std::string>& op2path =
OpKernelInfoCollector::Global().GetOp2PathDict();
paddle::lite::GetOp2PathDict();
auto is_non_inst = [](const std::string& op) -> bool {
const std::vector<std::string> ops = {"feed", "fetch", "while"};
return std::find(ops.begin(), ops.end(), op) != ops.end();
......
......@@ -157,7 +157,10 @@ void TensorCopyAsync(paddle::lite::Tensor* dst, const framework::LoDTensor& src,
dst->Resize(framework::vectorize(src.dims()));
const void* src_data = src.data<void>();
void* dst_data = dst->mutable_data(bytes);
VLOG(3) << "[CopyAsync fluid -> lite] Bytes = " << bytes << ", src = " << &src
<< ", dst = " << dst << ", src_type = " << src.type();
MemoryCopyAsync(dst_place, dst_data, src_place, src_data, bytes, ctx);
VLOG(3) << "[Lite memory size] Bytes = " << dst->memory_size();
}
template <>
......@@ -172,7 +175,10 @@ void TensorCopyAsync(framework::LoDTensor* dst, const paddle::lite::Tensor& src,
const void* src_data = src.raw_data();
// When Lite is ready, the source type needs to be modified here.
void* dst_data = dst->mutable_data(dst_place, dst->type());
VLOG(3) << "[CopyAsync lite -> fluid] Bytes = " << bytes << ", src = " << &src
<< ", dst = " << dst << ", src_type = " << dst->type();
MemoryCopyAsync(dst_place, dst_data, src_place, src_data, bytes, ctx);
VLOG(3) << "[Lite memory size] Bytes = " << src.memory_size();
}
} // namespace utils
......
......@@ -77,7 +77,8 @@ class LiteEngineOp : public framework::OperatorBase {
inference::analysis::GetFromScope<framework::LoDTensor>(scope,
in_names_[i]);
paddle::lite::Tensor *dst_t = engine_->GetInput(i);
VLOG(3) << "fluid -> lite: " << in_names_[i];
VLOG(3) << "[Copy] fluid -> lite (" << in_names_[i] << " -> "
<< engine_->GetInputNames()[i] << ")";
inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx);
}
#ifdef PADDLE_WITH_CUDA
......@@ -94,7 +95,8 @@ class LiteEngineOp : public framework::OperatorBase {
framework::LoDTensor *dst_t =
&inference::analysis::GetFromScope<framework::LoDTensor>(
scope, out_names_[i]);
VLOG(3) << "lite -> fluid: " << out_names_[i];
VLOG(3) << "[Copy] lite -> fluid (" << out_names_[i] << " -> "
<< engine_->GetOutputNames()[i] << ")";
inference::lite::utils::TensorCopyAsync(dst_t, src_t, *ctx);
}
#ifdef PADDLE_WITH_CUDA
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册