未验证 提交 7b1bb874 编写于 作者: S Sing_chan 提交者: GitHub

test inference_api_test when run in windows-inference ci (#37710)

* test inference_api_test when run in windows-inference ci

* test if test failed the code run correctly

* put the failed test back
上级 1432e3d2
......@@ -27,9 +27,9 @@ from paddle.fluid.core import AnalysisConfig
class TensorRTPoolTest(InferencePassTest):
def setUp(self):
self.bs = 1
self.channel = 3
self.height = 8
self.width = 8
self.channel = 2
self.height = 2
self.width = 2
self.pool_size = 2
self.pool_type = 'max'
self.pool_stride = 1
......
......@@ -53,10 +53,6 @@ if [ -f "$PADDLE_ROOT/added_ut" ];then
echo "========================================"
exit 8;
fi
if nvcc --version | grep 11.2; then
echo "Only test added_ut temporarily when running in CI-Windows-inference of CUDA 11.2."
exit 0;
fi
fi
set -e
......@@ -107,7 +103,6 @@ disable_win_trt_test="^test_trt_convert_conv2d$|\
^test_trt_convert_emb_eltwise_layernorm$|\
^test_trt_convert_pool2d$|\
^test_trt_conv3d_op$|\
^test_trt_matmul_quant_dequant$|\
^test_trt_subgraph_pass$|\
^test_trt_convert_dropout$|\
^test_trt_convert_hard_sigmoid$|\
......@@ -121,6 +116,16 @@ disable_win_trt_test="^test_trt_convert_conv2d$|\
^test_trt_convert_matmul$|\
^test_trt_convert_scale$"
# /*==================Fixed Disabled Windows GPU inference_api_test unittests==============================*/
disable_win_inference_api_test="^test_analyzer_capi_exp_pd_config$|\
^trt_quant_int8_yolov3_r50_test$|\
^test_trt_dynamic_shape_ernie$|\
^test_trt_dynamic_shape_ernie_fp16_ser_deser$|\
^lite_resnet50_test$|\
^test_trt_dynamic_shape_transformer_prune$|\
^lite_mul_model_test$|\
^paddle_infer_api_copy_tensor_tester$"
# /*============================================================================*/
# /*==================Fixed Disabled Windows CPU OPENBLAS unittests==============================*/
......@@ -180,6 +185,7 @@ long_time_test="^test_gru_op$|\
^test_transformer$|\
^test_imperative_auto_mixed_precision$|\
^test_imperative_optimizer_v2$|\
^test_trt_matmul_quant_dequant$|\
^test_strided_slice_op$"
if [ ${WITH_GPU:-OFF} == "ON" ];then
......@@ -331,6 +337,24 @@ function show_ut_retry_result() {
set +e
export FLAGS_call_stack_level=2
if nvcc --version | grep 11.2; then
echo "Only test added_ut and inference_api_test temporarily when running in CI-Windows-inference of CUDA 11.2."
export CUDA_VISIBLE_DEVICES=0
tmpfile=$tmp_dir/$RANDOM
inference_api_test=^$(ls "paddle/fluid/inference/tests/api" | sed -n 's/\.exe$//pg' | awk BEGIN{RS=EOF}'{gsub(/\n/,"$|^");print}' | sed 's/|\^$//g')
(ctest -R "$inference_api_test" -E "$disable_win_inference_api_test" --output-on-failure -C Release -j 2 | tee $tmpfile ) &
wait;
collect_failed_tests
set -e
rm -f $tmp_dir/*
if [[ "$failed_test_lists" != "" ]]; then
unittests_retry
show_ut_retry_result
fi
exit 0;
fi
if [ "${WITH_GPU:-OFF}" == "ON" ];then
run_unittest_gpu $cpu_parallel_job 10
run_unittest_gpu $tetrad_parallel_job 4
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册