提交 4ef6c915 编写于 作者: D Dmitry Kurtaev

Fix multiple inputs for models from Intel's Model Optimizer

上级 070ec313
......@@ -1944,7 +1944,8 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
ld.layerInstance = Ptr<Layer>(new InfEngineBackendLayer(it.second));
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE] = backendNode;
cvNet.connect(0, 0, lid, 0);
for (int i = 0; i < inputsNames.size(); ++i)
cvNet.connect(0, i, lid, i);
}
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
......
......@@ -866,6 +866,44 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy)
normAssert(outDefault, out);
}
// 1. Create a .prototxt file with the following network:
// layer {
// type: "Input" name: "data" top: "data"
// input_param { shape { dim: 1 dim: 2 dim: 3 } }
// }
// layer {
// type: "Input" name: "second_input" top: "second_input"
// input_param { shape { dim: 1 dim: 2 dim: 3 } }
// }
// layer {
// type: "Eltwise" name: "output" top: "output"
// bottom: "data" bottom: "second_input"
// eltwise_param { operation: SUM }
// }
//
// 2. Create a .caffemodel file using Caffe:
//
// import caffe
// net = caffe.Net('/path/to/prototxt', caffe.TEST)
// net.save('/path/to/caffemodel')
//
// 3. Convert using ModelOptimizer.
TEST(Test_DLDT, two_inputs)
{
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
int inpSize[] = {1, 2, 3};
Mat firstInp(3, &inpSize[0], CV_32F);
Mat secondInp(3, &inpSize[0], CV_32F);
randu(firstInp, -1, 1);
randu(secondInp, -1, 1);
net.setInput(firstInp, "data");
net.setInput(secondInp, "second_input");
Mat out = net.forward();
normAssert(out, firstInp + secondInp);
}
#endif // HAVE_INF_ENGINE
}} // namespace
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册