提交 d9bf522b 编写于 作者: A Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

......@@ -77,9 +77,11 @@ endif(MSVC)
add_library(${PNG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs})
target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES})
ocv_warnings_disable(CMAKE_C_FLAGS -Wundef -Wcast-align -Wimplicit-fallthrough -Wunused-parameter -Wsign-compare)
ocv_warnings_disable(CMAKE_C_FLAGS -Wnull-pointer-subtraction) # clang15
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-but-set-variable) # clang15
ocv_warnings_disable(CMAKE_C_FLAGS -Wundef -Wcast-align -Wimplicit-fallthrough -Wunused-parameter -Wsign-compare
-Wmaybe-uninitialized
-Wnull-pointer-subtraction # clang15
-Wunused-but-set-variable # clang15
)
set_target_properties(${PNG_LIBRARY}
PROPERTIES OUTPUT_NAME ${PNG_LIBRARY}
......
......@@ -452,9 +452,10 @@ ocv_warnings_disable(CMAKE_C_FLAGS -Wno-unused-but-set-variable -Wmissing-protot
-Wcast-align -Wshadow -Wno-maybe-uninitialized -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast
-Wmisleading-indentation
-Wimplicit-fallthrough
-Wunused-parameter # clang
-Warray-parameter
-Wstrict-prototypes # clang15
)
ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-parameter) # clang
ocv_warnings_disable(CMAKE_C_FLAGS -Wstrict-prototypes) # clang15
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations -Wunused-parameter -Wmissing-prototypes
-Wundef # tiffiop.h: #if __clang_major__ >= 4
)
......
......@@ -252,7 +252,7 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
fs << "}";
}
// save current stage
char buf[10];
char buf[32];
sprintf(buf, "%s%d", "stage", i );
string stageFilename = dirName + buf + ".xml";
FileStorage fs( stageFilename, FileStorage::WRITE );
......
......@@ -197,8 +197,18 @@ private:
void parseOperatorSet();
const std::string str_domain_ai_onnx = "ai.onnx";
bool useLegacyNames;
bool getParamUseLegacyNames()
{
bool param = utils::getConfigurationParameterBool("OPENCV_DNN_ONNX_USE_LEGACY_NAMES", false);
return param;
}
const std::string extractNodeName(const opencv_onnx::NodeProto& node_proto);
};
class ONNXLayerHandler : public detail::LayerHandler
{
public:
......@@ -233,6 +243,7 @@ ONNXImporter::ONNXImporter(Net& net, const char *onnxFile)
: layerHandler(DNN_DIAGNOSTICS_RUN ? new ONNXLayerHandler(this) : nullptr)
, dstNet(net)
, onnx_opset(0)
, useLegacyNames(getParamUseLegacyNames())
{
hasDynamicShapes = false;
CV_Assert(onnxFile);
......@@ -256,6 +267,7 @@ ONNXImporter::ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
: layerHandler(DNN_DIAGNOSTICS_RUN ? new ONNXLayerHandler(this) : nullptr)
, dstNet(net)
, onnx_opset(0)
, useLegacyNames(getParamUseLegacyNames())
{
hasDynamicShapes = false;
CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)");
......@@ -278,6 +290,7 @@ ONNXImporter::ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer)
populateNet();
}
inline void replaceLayerParam(LayerParams& layerParams, const String& oldKey, const String& newKey)
{
if (layerParams.has(oldKey)) {
......@@ -909,11 +922,14 @@ const ONNXImporter::DispatchMap& ONNXImporter::getDispatchMap(const opencv_onnx:
return it->second;
}
const std::string& extractNodeName(const opencv_onnx::NodeProto& node_proto)
const std::string ONNXImporter::extractNodeName(const opencv_onnx::NodeProto& node_proto)
{
// We need to rework DNN outputs API, this is a workaround for #21698
if (node_proto.has_name() && !node_proto.name().empty())
{
return node_proto.name();
if (useLegacyNames)
return node_proto.name();
return cv::format("onnx_node!%s", node_proto.name().c_str());
}
for (int i = 0; i < node_proto.output_size(); ++i)
{
......@@ -923,7 +939,9 @@ const std::string& extractNodeName(const opencv_onnx::NodeProto& node_proto)
// the second method is to use an empty string in place of an input or output name.
if (!name.empty())
{
return name;
if (useLegacyNames)
return name.c_str();
return cv::format("onnx_node_output_%d!%s", i, name.c_str());
}
}
CV_Error(Error::StsAssert, "Couldn't deduce Node name.");
......
......@@ -265,14 +265,32 @@ TEST_P(Test_Int8_layers, Mish)
testLayer("mish", "ONNX", 0.0015, 0.0025);
}
TEST_P(Test_Int8_layers, Softmax)
TEST_P(Test_Int8_layers, Softmax_Caffe)
{
testLayer("layer_softmax", "Caffe", 0.0011, 0.0036);
}
TEST_P(Test_Int8_layers, Softmax_keras_TF)
{
testLayer("keras_softmax", "TensorFlow", 0.00093, 0.0027);
}
TEST_P(Test_Int8_layers, Softmax_slim_TF)
{
testLayer("slim_softmax", "TensorFlow", 0.0016, 0.0034);
}
TEST_P(Test_Int8_layers, Softmax_slim_v2_TF)
{
testLayer("slim_softmax_v2", "TensorFlow", 0.0029, 0.017);
}
TEST_P(Test_Int8_layers, Softmax_ONNX)
{
testLayer("softmax", "ONNX", 0.0016, 0.0028);
}
TEST_P(Test_Int8_layers, Softmax_log_ONNX)
{
testLayer("log_softmax", "ONNX", 0.014, 0.025);
}
TEST_P(Test_Int8_layers, DISABLED_Softmax_unfused_ONNX) // FIXIT Support 'Identity' layer for outputs (#22022)
{
testLayer("softmax_unfused", "ONNX", 0.0009, 0.0021);
}
......@@ -389,7 +407,7 @@ TEST_P(Test_Int8_layers, Slice_strided_tf)
testLayer("strided_slice", "TensorFlow", 0.008, 0.0142);
}
TEST_P(Test_Int8_layers, Slice_onnx)
TEST_P(Test_Int8_layers, DISABLED_Slice_onnx) // FIXIT Support 'Identity' layer for outputs (#22022)
{
testLayer("slice", "ONNX", 0.0046, 0.0077);
}
......
......@@ -1855,6 +1855,11 @@ TEST_P(Test_ONNX_layers, Quantized_Constant)
testONNXModels("quantized_constant", npy, 0.002, 0.008);
}
TEST_P(Test_ONNX_layers, OutputRegistration)
{
testONNXModels("output_registration", npy, 0, 0, false, true, 2);
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
class Test_ONNX_nets : public Test_ONNX_layers
......
......@@ -8718,7 +8718,7 @@ static void StackLowerThanAddress(const void* ptr, bool* result) {
// Make sure AddressSanitizer does not tamper with the stack here.
GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
static bool StackGrowsDown() {
int dummy;
int dummy = 0;
bool result;
StackLowerThanAddress(&dummy, &result);
return result;
......
......@@ -160,7 +160,7 @@ void TrackerDaSiamRPNImpl::trackerInit(Mat img)
dnn::blobFromImage(zCrop, blob, 1.0, Size(trackState.exemplarSize, trackState.exemplarSize), Scalar(), trackState.swapRB, false, CV_32F);
siamRPN.setInput(blob);
Mat out1;
siamRPN.forward(out1, "63");
siamRPN.forward(out1, "onnx_node_output_0!63");
siamKernelCL1.setInput(out1);
siamKernelR1.setInput(out1);
......@@ -169,8 +169,8 @@ void TrackerDaSiamRPNImpl::trackerInit(Mat img)
Mat r1 = siamKernelR1.forward();
std::vector<int> r1_shape = { 20, 256, 4, 4 }, cls1_shape = { 10, 256, 4, 4 };
siamRPN.setParam(siamRPN.getLayerId("65"), 0, r1.reshape(0, r1_shape));
siamRPN.setParam(siamRPN.getLayerId("68"), 0, cls1.reshape(0, cls1_shape));
siamRPN.setParam(siamRPN.getLayerId("onnx_node_output_0!65"), 0, r1.reshape(0, r1_shape));
siamRPN.setParam(siamRPN.getLayerId("onnx_node_output_0!68"), 0, cls1.reshape(0, cls1_shape));
}
bool TrackerDaSiamRPNImpl::update(InputArray image, Rect& boundingBox)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册