From c0d5b7eceb04fbb043de846df184c52009f9f61e Mon Sep 17 00:00:00 2001 From: wenbin Date: Thu, 2 Dec 2021 10:26:23 +0800 Subject: [PATCH] simplify_with_basic_ops_pass UT (#37704) * first commit * more uts * file name duplicated * timeout * Update CMakeLists.txt change TIMEOUT from 120 to 240 * Update CMakeLists.txt more time * Update CMakeLists.txt timeout * Update CMakeLists.txt 60s --- .../ir/simplify_with_basic_ops_pass.cc | 5 + .../unittests/ir/inference/CMakeLists.txt | 1 + ...t_simplify_with_basic_ops_pass_autoscan.py | 154 ++++++++++++++++++ 3 files changed, 160 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py diff --git a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc index b2b1a7515f0..2d60129165a 100644 --- a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc +++ b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include "glog/logging.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -231,3 +232,7 @@ void SimplifyWithBasicOpsPass::ReplaceOutputVar(Node* op, Node* old_var, REGISTER_PASS(simplify_with_basic_ops_pass, paddle::framework::ir::SimplifyWithBasicOpsPass); +REGISTER_PASS_CAPABILITY(simplify_with_basic_ops_pass) + .AddCombination( + paddle::framework::compatible::OpVersionComparatorCombination().EQ( + "scale", 0)); diff --git a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt index 4126e604cc1..f59f686e78a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt @@ -71,6 +71,7 @@ set_tests_properties(test_trt_matmul_quant_dequant PROPERTIES TIMEOUT 100) set_tests_properties(test_trt_conv3d_op PROPERTIES TIMEOUT 60) set_tests_properties(test_trt_conv3d_transpose_op PROPERTIES TIMEOUT 60) set_tests_properties(test_trt_nearest_interp_v2_op PROPERTIES TIMEOUT 30) +set_tests_properties(test_simplify_with_basic_ops_pass_autoscan PROPERTIES TIMEOUT 60) if (WITH_MKLDNN AND TENSORRT_FOUND AND WITH_GPU) set_tests_properties(test_emb_eltwise_layernorm_fuse_pass PROPERTIES TIMEOUT 120) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py b/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py new file mode 100644 index 00000000000..03e9feb418a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_simplify_with_basic_ops_pass_autoscan.py @@ -0,0 +1,154 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import PassAutoScanTest, SkipReasons +from program_config import TensorConfig, ProgramConfig, OpConfig +import numpy as np +import paddle.inference as paddle_infer +from functools import partial +from typing import Optional, List, Callable, Dict, Any, Set +import unittest + +import hypothesis +from hypothesis import given, settings, seed, example, assume +import hypothesis.strategies as st + + +class TestSimplifyWithBasicOpsPassUpscale(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + return True + + def sample_program_config(self, draw): + #scale = draw(st.floats(min_value=0.01, max_value=1.0)) + #bias = draw(st.floats(min_value=0.01, max_value=2.0)) + #bias_after_scale = draw(st.booleans()) + fix_seed = draw(st.booleans()) + dropout_implementation = "upscale_in_train" + dropout_prob = draw(st.floats(min_value=0.0, max_value=1.0)) + seed = draw(st.integers(min_value=0, max_value=512)) + x_shape = draw( + st.lists( + st.integers( + min_value=1, max_value=4), min_size=2, max_size=4)) + is_test = True + + dropout_op = OpConfig( + "dropout", + inputs={"X": ["input_data"]}, + outputs={"Out": ["dropout_output"]}, + fix_seed=fix_seed, + dropout_implementation=dropout_implementation, + dropout_prob=dropout_prob, + seed=seed, + is_test=is_test) + relu_op = OpConfig( + "relu", + inputs={"X": ["dropout_output"]}, + outputs={"Out": ["relu_out"]}) + ops = [dropout_op, relu_op] + + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={"input_data": TensorConfig(shape=x_shape), }, + outputs=["relu_out"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config(use_gpu=True) + yield config, ['relu'], (1e-5, 1e-5) + config = self.create_inference_config(use_gpu=False) + yield config, ['relu'], (1e-5, 1e-5) + config = self.create_trt_inference_config() + config.enable_tensorrt_engine( + max_batch_size=4, + workspace_size=102400, + min_subgraph_size=0, + precision_mode=paddle_infer.PrecisionType.Float32, + use_static=False, + use_calib_mode=False) + yield config, ['relu'], (1e-5, 1e-5) + + def test(self): + self.run_and_statis( + quant=False, + max_examples=30, + passes=["simplify_with_basic_ops_pass"], + min_success_num=30) + + +class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest): + def is_program_valid(self, program_config: ProgramConfig) -> bool: + return True + + def sample_program_config(self, draw): + fix_seed = draw(st.booleans()) + dropout_implementation = "downgrade_in_infer" + dropout_prob = draw(st.floats(min_value=0.0, max_value=1.0)) + seed = draw(st.integers(min_value=0, max_value=512)) + x_shape = draw( + st.lists( + st.integers( + min_value=1, max_value=4), min_size=2, max_size=4)) + is_test = True + + dropout_op = OpConfig( + "dropout", + inputs={"X": ["input_data"]}, + outputs={"Out": ["dropout_output"]}, + fix_seed=fix_seed, + dropout_implementation=dropout_implementation, + dropout_prob=dropout_prob, + seed=seed, + is_test=is_test) + relu_op = OpConfig( + "relu", + inputs={"X": ["dropout_output"]}, + outputs={"Out": ["relu_out"]}) + ops = [dropout_op, relu_op] + + program_config = ProgramConfig( + ops=ops, + weights={}, + inputs={"input_data": TensorConfig(shape=x_shape), }, + outputs=["relu_out"]) + + return program_config + + def sample_predictor_configs(self, program_config): + config = self.create_inference_config(use_gpu=True) + yield config, ['scale', 'relu'], (1e-5, 1e-5) + config = self.create_inference_config(use_gpu=False) + yield config, ['scale', 'relu'], (1e-5, 1e-5) + config = self.create_trt_inference_config() + config.enable_tensorrt_engine( + max_batch_size=4, + workspace_size=102400, + min_subgraph_size=0, + precision_mode=paddle_infer.PrecisionType.Float32, + use_static=False, + use_calib_mode=False) + yield config, ['scale', 'relu'], (1e-5, 1e-5) + + def test(self): + self.run_and_statis( + quant=False, + max_examples=30, + passes=["simplify_with_basic_ops_pass"], + min_success_num=30) + + +if __name__ == "__main__": + unittest.main() -- GitLab