未验证 提交 2a24a6bb 编写于 作者: H HongyuJia 提交者: GitHub

[CustomOP Unittest] Polish unit test, unify check_output (#52737)

* [CustomOP Unittest] Polish unit test, unify check_output

* fix test_static_save_and_run_inference_predictor
上级 8e9bfa7f
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import check_output, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle import static from paddle import static
...@@ -100,23 +100,18 @@ class TestCustomConjJit(unittest.TestCase): ...@@ -100,23 +100,18 @@ class TestCustomConjJit(unittest.TestCase):
self.dtypes = ['float32', 'float64'] self.dtypes = ['float32', 'float64']
self.shape = [2, 20, 2, 3] self.shape = [2, 20, 2, 3]
def check_output(self, out, pd_out, name): def test_dynamic(self):
np.testing.assert_array_equal( for dtype in self.dtypes:
out, np_input = np.random.random(self.shape).astype(dtype)
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def run_dynamic(self, dtype, np_input):
out, x_grad = conj_dynamic(custom_ops.custom_conj, dtype, np_input) out, x_grad = conj_dynamic(custom_ops.custom_conj, dtype, np_input)
pd_out, pd_x_grad = conj_dynamic(paddle.conj, dtype, np_input) pd_out, pd_x_grad = conj_dynamic(paddle.conj, dtype, np_input)
self.check_output(out, pd_out, "out") check_output(out, pd_out, "out")
self.check_output(x_grad, pd_x_grad, "x's grad") check_output(x_grad, pd_x_grad, "x's grad")
def run_static(self, dtype, np_input): def test_static(self):
for dtype in self.dtypes:
np_input = np.random.random(self.shape).astype(dtype)
out, x_grad = conj_static( out, x_grad = conj_static(
custom_ops.custom_conj, self.shape, dtype, np_input custom_ops.custom_conj, self.shape, dtype, np_input
) )
...@@ -124,18 +119,8 @@ class TestCustomConjJit(unittest.TestCase): ...@@ -124,18 +119,8 @@ class TestCustomConjJit(unittest.TestCase):
paddle.conj, self.shape, dtype, np_input paddle.conj, self.shape, dtype, np_input
) )
self.check_output(out, pd_out, "out") check_output(out, pd_out, "out")
self.check_output(x_grad, pd_x_grad, "x's grad") check_output(x_grad, pd_x_grad, "x's grad")
def test_dynamic(self):
for dtype in self.dtypes:
np_input = np.random.random(self.shape).astype(dtype)
self.run_dynamic(dtype, np_input)
def test_static(self):
for dtype in self.dtypes:
np_input = np.random.random(self.shape).astype(dtype)
self.run_static(dtype, np_input)
# complex only used in dynamic mode now # complex only used in dynamic mode now
def test_complex_dynamic(self): def test_complex_dynamic(self):
...@@ -143,7 +128,16 @@ class TestCustomConjJit(unittest.TestCase): ...@@ -143,7 +128,16 @@ class TestCustomConjJit(unittest.TestCase):
np_input = np.random.random(self.shape).astype( np_input = np.random.random(self.shape).astype(
dtype dtype
) + 1j * np.random.random(self.shape).astype(dtype) ) + 1j * np.random.random(self.shape).astype(dtype)
self.run_dynamic(to_complex(dtype), np_input)
out, x_grad = conj_dynamic(
custom_ops.custom_conj, to_complex(dtype), np_input
)
pd_out, pd_x_grad = conj_dynamic(
paddle.conj, to_complex(dtype), np_input
)
check_output(out, pd_out, "out")
check_output(x_grad, pd_x_grad, "x's grad")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,13 @@ import os ...@@ -16,7 +16,13 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import (
check_output,
check_output_allclose,
extra_cc_args,
extra_nvcc_args,
paddle_includes,
)
import paddle import paddle
from paddle import static from paddle import static
...@@ -342,26 +348,6 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -342,26 +348,6 @@ class TestCustomInplaceJit(unittest.TestCase):
np.random.random((3, 2)).astype("float32"), np.random.random((3, 2)).astype("float32"),
] ]
def check_output(self, out, pd_out, name):
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def check_output_allclose(self, out, pd_out, name):
np.testing.assert_allclose(
out,
pd_out,
rtol=5e-5,
atol=1e-2,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def test_static_add(self): def test_static_add(self):
for device in self.devices: for device in self.devices:
for dtype in self.dtypes: for dtype in self.dtypes:
...@@ -391,15 +377,15 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -391,15 +377,15 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_x, self.np_x,
self.np_y, self.np_y,
) )
self.check_output(custom_x, custom_out, "inplace_custom_x") check_output(custom_x, custom_out, "inplace_custom_x")
self.check_output( check_output(
custom_x_grad, custom_out_grad, "inplace_custom_x_grad" custom_x_grad, custom_out_grad, "inplace_custom_x_grad"
) )
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
self.check_output(custom_out_grad, pd_out_grad, "out_grad") check_output(custom_out_grad, pd_out_grad, "out_grad")
def test_dynamic_add(self): def test_dynamic_add(self):
for device in self.devices: for device in self.devices:
...@@ -431,14 +417,14 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -431,14 +417,14 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_y, self.np_y,
) )
self.check_output(custom_x, custom_out, "inplace_custom_x") check_output(custom_x, custom_out, "inplace_custom_x")
self.check_output(pd_x, pd_out, "inplace_pd_x") check_output(pd_x, pd_out, "inplace_pd_x")
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
def test_static_add_vector(self): def test_static_add_vector(self):
for device in self.devices: for device in self.devices:
...@@ -468,10 +454,10 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -468,10 +454,10 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_y, self.np_y,
) )
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
self.check_output(custom_out_grad, pd_out_grad, "out_grad") check_output(custom_out_grad, pd_out_grad, "out_grad")
def test_dynamic_add_vector(self): def test_dynamic_add_vector(self):
for device in self.devices: for device in self.devices:
...@@ -503,14 +489,14 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -503,14 +489,14 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_y, self.np_y,
) )
self.check_output(custom_x, custom_out, "inplace_custom_x") check_output(custom_x, custom_out, "inplace_custom_x")
self.check_output(pd_x, pd_out, "inplace_pd_x") check_output(pd_x, pd_out, "inplace_pd_x")
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
def test_static_relu_net(self): def test_static_relu_net(self):
for device in self.devices: for device in self.devices:
...@@ -543,11 +529,11 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -543,11 +529,11 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_y, self.np_y,
self.np_z, self.np_z,
) )
self.check_output_allclose(custom_x, pd_x, "x") check_output_allclose(custom_x, pd_x, "x")
self.check_output_allclose(custom_y, pd_y, "y") check_output_allclose(custom_y, pd_y, "y")
self.check_output_allclose(custom_out, pd_out, "out") check_output_allclose(custom_out, pd_out, "out")
self.check_output_allclose(custom_x_grad, pd_x_grad, "x_grad") check_output_allclose(custom_x_grad, pd_x_grad, "x_grad")
self.check_output_allclose(custom_y_grad, pd_y_grad, "y_grad") check_output_allclose(custom_y_grad, pd_y_grad, "y_grad")
def test_dynamic_relu_net(self): def test_dynamic_relu_net(self):
for device in self.devices: for device in self.devices:
...@@ -581,11 +567,11 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -581,11 +567,11 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_z, self.np_z,
) )
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
def test_static_multi_inplace(self): def test_static_multi_inplace(self):
for device in self.devices: for device in self.devices:
...@@ -630,27 +616,23 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -630,27 +616,23 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_a, self.np_a,
self.np_b, self.np_b,
) )
self.check_output(custom_x, pd_out_xy, "inplace_custom_x") check_output(custom_x, pd_out_xy, "inplace_custom_x")
self.check_output( check_output(
custom_x_grad, custom_out_xy_grad, "inplace_custom_x_grad" custom_x_grad, custom_out_xy_grad, "inplace_custom_x_grad"
) )
self.check_output(custom_a, pd_out_ab, "inplace_custom_a") check_output(custom_a, pd_out_ab, "inplace_custom_a")
self.check_output( check_output(
custom_a_grad, custom_out_ab_grad, "inplace_custom_a_grad" custom_a_grad, custom_out_ab_grad, "inplace_custom_a_grad"
) )
self.check_output(custom_out_xy, pd_out_xy, "outxy") check_output(custom_out_xy, pd_out_xy, "outxy")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
self.check_output( check_output(custom_out_xy_grad, pd_out_xy_grad, "outxy_grad")
custom_out_xy_grad, pd_out_xy_grad, "outxy_grad" check_output(custom_out_ab, pd_out_ab, "outab")
) check_output(custom_a_grad, pd_a_grad, "a_grad")
self.check_output(custom_out_ab, pd_out_ab, "outab") check_output(custom_b_grad, pd_b_grad, "b_grad")
self.check_output(custom_a_grad, pd_a_grad, "a_grad") check_output(custom_out_ab_grad, pd_out_ab_grad, "outab_grad")
self.check_output(custom_b_grad, pd_b_grad, "b_grad")
self.check_output(
custom_out_ab_grad, pd_out_ab_grad, "outab_grad"
)
def test_dynamic_multi_inplace(self): def test_dynamic_multi_inplace(self):
for device in self.devices: for device in self.devices:
...@@ -696,21 +678,21 @@ class TestCustomInplaceJit(unittest.TestCase): ...@@ -696,21 +678,21 @@ class TestCustomInplaceJit(unittest.TestCase):
self.np_b, self.np_b,
) )
self.check_output(custom_x, custom_out_xy, "inplace_custom_x") check_output(custom_x, custom_out_xy, "inplace_custom_x")
self.check_output(pd_x, pd_out_xy, "inplace_pd_x") check_output(pd_x, pd_out_xy, "inplace_pd_x")
self.check_output(custom_a, custom_out_ab, "inplace_custom_a") check_output(custom_a, custom_out_ab, "inplace_custom_a")
self.check_output(pd_a, pd_out_ab, "inplace_pd_a") check_output(pd_a, pd_out_ab, "inplace_pd_a")
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_out_xy, pd_out_xy, "outxy") check_output(custom_out_xy, pd_out_xy, "outxy")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
self.check_output(custom_a, pd_a, "a") check_output(custom_a, pd_a, "a")
self.check_output(custom_b, pd_b, "b") check_output(custom_b, pd_b, "b")
self.check_output(custom_out_ab, pd_out_ab, "outab") check_output(custom_out_ab, pd_out_ab, "outab")
self.check_output(custom_a_grad, pd_a_grad, "a_grad") check_output(custom_a_grad, pd_a_grad, "a_grad")
self.check_output(custom_b_grad, pd_b_grad, "b_grad") check_output(custom_b_grad, pd_b_grad, "b_grad")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import check_output, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -99,15 +99,6 @@ class TestCustomLinearJit(unittest.TestCase): ...@@ -99,15 +99,6 @@ class TestCustomLinearJit(unittest.TestCase):
self.np_weight = np.full([2, 4], fill_value=0.5, dtype="float32") self.np_weight = np.full([2, 4], fill_value=0.5, dtype="float32")
self.np_bias = np.ones([4], dtype="float32") self.np_bias = np.ones([4], dtype="float32")
def check_output(self, out, pd_out, name):
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def test_static(self): def test_static(self):
for device in self.devices: for device in self.devices:
for dtype in self.dtypes: for dtype in self.dtypes:
...@@ -132,12 +123,10 @@ class TestCustomLinearJit(unittest.TestCase): ...@@ -132,12 +123,10 @@ class TestCustomLinearJit(unittest.TestCase):
self.np_weight, self.np_weight,
self.np_bias, self.np_bias,
) )
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output( check_output(custom_weight_grad, pd_weight_grad, "weight_grad")
custom_weight_grad, pd_weight_grad, "weight_grad" check_output(custom_bias_grad, pd_bias_grad, "bias_grad")
)
self.check_output(custom_bias_grad, pd_bias_grad, "bias_grad")
def test_dynamic(self): def test_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -168,12 +157,10 @@ class TestCustomLinearJit(unittest.TestCase): ...@@ -168,12 +157,10 @@ class TestCustomLinearJit(unittest.TestCase):
self.np_weight, self.np_weight,
self.np_bias, self.np_bias,
) )
self.check_output(custom_out, pd_out, "custom_out") check_output(custom_out, pd_out, "custom_out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output( check_output(custom_weight_grad, pd_weight_grad, "weight_grad")
custom_weight_grad, pd_weight_grad, "weight_grad" check_output(custom_bias_grad, pd_bias_grad, "bias_grad")
)
self.check_output(custom_bias_grad, pd_bias_grad, "bias_grad")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import check_output, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle import static from paddle import static
...@@ -465,44 +465,6 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -465,44 +465,6 @@ class TestCustomOptionalJit(unittest.TestCase):
np.random.random((3, 2)).astype("float32"), np.random.random((3, 2)).astype("float32"),
] ]
def check_output(self, out, pd_out, name):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
if isinstance(out, list) and isinstance(pd_out, list):
for idx in range(len(out)):
np.testing.assert_array_equal(
out[idx],
pd_out[idx],
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out[idx], name, pd_out[idx]
),
)
else:
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def check_output_allclose(self, out, pd_out, name):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
np.testing.assert_allclose(
out,
pd_out,
rtol=5e-5,
atol=1e-2,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def test_optional_static_add(self): def test_optional_static_add(self):
for device in self.devices: for device in self.devices:
for dtype in self.dtypes: for dtype in self.dtypes:
...@@ -526,9 +488,9 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -526,9 +488,9 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
def test_optional_dynamic_add(self): def test_optional_dynamic_add(self):
for device in self.devices: for device in self.devices:
...@@ -553,9 +515,9 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -553,9 +515,9 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
def test_optional_inplace_static_add(self): def test_optional_inplace_static_add(self):
for device in self.devices: for device in self.devices:
...@@ -576,13 +538,11 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -576,13 +538,11 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_tuple[0], pd_tuple[0], "x") check_output(custom_tuple[0], pd_tuple[0], "x")
self.check_output(custom_tuple[1], pd_tuple[1], "out") check_output(custom_tuple[1], pd_tuple[1], "out")
self.check_output(custom_tuple[2], pd_tuple[2], "x_grad") check_output(custom_tuple[2], pd_tuple[2], "x_grad")
if len(custom_tuple) > 3: if len(custom_tuple) > 3:
self.check_output( check_output(custom_tuple[3], pd_tuple[3], "y_grad")
custom_tuple[3], pd_tuple[3], "y_grad"
)
def test_optional_inplace_dynamic_add(self): def test_optional_inplace_dynamic_add(self):
for device in self.devices: for device in self.devices:
...@@ -619,16 +579,16 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -619,16 +579,16 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(pd_y, pd_outy, "inplace_pd_y") check_output(pd_y, pd_outy, "inplace_pd_y")
self.check_output(custom_y, custom_outy, "inplace_custom_y") check_output(custom_y, custom_outy, "inplace_custom_y")
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_outx, pd_outx, "outx") check_output(custom_outx, pd_outx, "outx")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_outy, pd_outy, "outy") check_output(custom_outy, pd_outy, "outy")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
def test_optional_vector_static_add(self): def test_optional_vector_static_add(self):
for device in self.devices: for device in self.devices:
...@@ -653,9 +613,9 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -653,9 +613,9 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
def test_optional_vector_dynamic_add(self): def test_optional_vector_dynamic_add(self):
for device in self.devices: for device in self.devices:
...@@ -680,9 +640,9 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -680,9 +640,9 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
def test_optional_inplace_vector_static_add(self): def test_optional_inplace_vector_static_add(self):
for device in self.devices: for device in self.devices:
...@@ -703,16 +663,12 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -703,16 +663,12 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(custom_tuple[0], pd_tuple[0], "x") check_output(custom_tuple[0], pd_tuple[0], "x")
self.check_output(custom_tuple[1], pd_tuple[1], "out") check_output(custom_tuple[1], pd_tuple[1], "out")
self.check_output(custom_tuple[2], pd_tuple[2], "x_grad") check_output(custom_tuple[2], pd_tuple[2], "x_grad")
if len(custom_tuple) > 3: if len(custom_tuple) > 3:
self.check_output( check_output(custom_tuple[3], pd_tuple[3], "y1_grad")
custom_tuple[3], pd_tuple[3], "y1_grad" check_output(custom_tuple[4], pd_tuple[4], "y2_grad")
)
self.check_output(
custom_tuple[4], pd_tuple[4], "y2_grad"
)
def test_optional_inplace_vector_dynamic_add(self): def test_optional_inplace_vector_dynamic_add(self):
for device in self.devices: for device in self.devices:
...@@ -749,16 +705,16 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -749,16 +705,16 @@ class TestCustomOptionalJit(unittest.TestCase):
np_y, np_y,
) )
self.check_output(pd_y, pd_outy, "inplace_pd_y") check_output(pd_y, pd_outy, "inplace_pd_y")
self.check_output(custom_y, custom_outy, "inplace_custom_y") check_output(custom_y, custom_outy, "inplace_custom_y")
self.check_output(custom_x, pd_x, "x") check_output(custom_x, pd_x, "x")
self.check_output(custom_outx, pd_outx, "outx") check_output(custom_outx, pd_outx, "outx")
self.check_output(custom_y, pd_y, "y") check_output(custom_y, pd_y, "y")
self.check_output(custom_outy, pd_outy, "outy") check_output(custom_outy, pd_outy, "outy")
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_x_grad, pd_x_grad, "x_grad") check_output(custom_x_grad, pd_x_grad, "x_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -18,6 +18,7 @@ import sys ...@@ -18,6 +18,7 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from utils import check_output, check_output_allclose
import paddle import paddle
from paddle import static from paddle import static
...@@ -205,13 +206,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -205,13 +206,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
pd_out = custom_relu_static( pd_out = custom_relu_static(
custom_op, device, dtype, x, False custom_op, device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
def test_dynamic(self): def test_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -226,20 +221,8 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -226,20 +221,8 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
pd_out, pd_x_grad = custom_relu_dynamic( pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False custom_op, device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out, check_output(x_grad, pd_x_grad, "x_grad")
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
np.testing.assert_array_equal(
x_grad,
pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.format(
x_grad, pd_x_grad
),
)
def test_static_save_and_load_inference_model(self): def test_static_save_and_load_inference_model(self):
paddle.enable_static() paddle.enable_static()
...@@ -263,13 +246,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -263,13 +246,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
feed={feed_target_names[0]: np_data}, feed={feed_target_names[0]: np_data},
fetch_list=fetch_targets, fetch_list=fetch_targets,
) )
np.testing.assert_array_equal( check_output(predict, predict_infer, "predict")
predict,
predict_infer,
err_msg='custom op predict: {},\n custom op infer predict: {}'.format(
predict, predict_infer
),
)
paddle.disable_static() paddle.disable_static()
def test_static_save_and_run_inference_predictor(self): def test_static_save_and_run_inference_predictor(self):
...@@ -298,12 +275,9 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -298,12 +275,9 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
predictor.get_output_names()[0] predictor.get_output_names()[0]
) )
predict_infer = output_tensor.copy_to_cpu() predict_infer = output_tensor.copy_to_cpu()
self.assertTrue( predict = np.array(predict).flatten()
np.isclose(predict, predict_infer, rtol=5e-5).any(), predict_infer = np.array(predict_infer).flatten()
"custom op predict: {},\n custom op infer predict: {}".format( check_output_allclose(predict, predict_infer, "predict")
predict, predict_infer
),
)
paddle.disable_static() paddle.disable_static()
def test_double_grad_dynamic(self): def test_double_grad_dynamic(self):
...@@ -318,20 +292,8 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -318,20 +292,8 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
pd_out, pd_dx_grad = custom_relu_double_grad_dynamic( pd_out, pd_dx_grad = custom_relu_double_grad_dynamic(
self.custom_ops[0], device, dtype, x, False self.custom_ops[0], device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out, check_output(dx_grad, pd_dx_grad, "dx_grad")
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
np.testing.assert_array_equal(
dx_grad,
pd_dx_grad,
err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.format(
dx_grad, pd_dx_grad
),
)
def test_with_dataloader(self): def test_with_dataloader(self):
for device in self.devices: for device in self.devices:
...@@ -355,13 +317,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -355,13 +317,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
image = paddle.to_tensor(image) image = paddle.to_tensor(image)
out = self.custom_ops[0](image) out = self.custom_ops[0](image)
pd_out = paddle.nn.functional.relu(image) pd_out = paddle.nn.functional.relu(image)
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
if batch_id == 5: if batch_id == 5:
break break
......
...@@ -18,6 +18,7 @@ import sys ...@@ -18,6 +18,7 @@ import sys
import unittest import unittest
import numpy as np import numpy as np
from utils import check_output, check_output_allclose
import paddle import paddle
from paddle import static from paddle import static
...@@ -183,13 +184,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -183,13 +184,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
pd_out = custom_relu_static( pd_out = custom_relu_static(
self.custom_op, self.device, dtype, x, False self.custom_op, self.device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
def test_dynamic(self): def test_dynamic(self):
for dtype in self.dtypes: for dtype in self.dtypes:
...@@ -200,20 +195,8 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -200,20 +195,8 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
pd_out, pd_x_grad = custom_relu_dynamic( pd_out, pd_x_grad = custom_relu_dynamic(
self.custom_op, self.device, dtype, x, False self.custom_op, self.device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out, check_output(x_grad, pd_x_grad, "x_grad")
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
np.testing.assert_array_equal(
x_grad,
pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.format(
x_grad, pd_x_grad
),
)
def test_static_save_and_load_inference_model(self): def test_static_save_and_load_inference_model(self):
paddle.enable_static() paddle.enable_static()
...@@ -237,14 +220,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -237,14 +220,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
feed={feed_target_names[0]: np_data}, feed={feed_target_names[0]: np_data},
fetch_list=fetch_targets, fetch_list=fetch_targets,
) )
np.testing.assert_allclose( check_output(predict, predict_infer, "predict")
predict,
predict_infer,
atol=1e-2,
err_msg='custom op predict: {},\n custom op infer predict: {}'.format(
predict, predict_infer
),
)
paddle.disable_static() paddle.disable_static()
def test_static_save_and_run_inference_predictor(self): def test_static_save_and_run_inference_predictor(self):
...@@ -272,15 +248,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -272,15 +248,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
predict_infer = output_tensor.copy_to_cpu() predict_infer = output_tensor.copy_to_cpu()
predict = np.array(predict).flatten() predict = np.array(predict).flatten()
predict_infer = np.array(predict_infer).flatten() predict_infer = np.array(predict_infer).flatten()
np.testing.assert_allclose( check_output_allclose(predict, predict_infer, "predict")
predict,
predict_infer,
rtol=5e-5,
atol=1e-2,
err_msg="custom op predict: {},\n custom op infer predict: {}".format(
predict, predict_infer
),
)
paddle.disable_static() paddle.disable_static()
def test_func_double_grad_dynamic(self): def test_func_double_grad_dynamic(self):
...@@ -292,20 +260,8 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -292,20 +260,8 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
pd_out, pd_dx_grad = custom_relu_double_grad_dynamic( pd_out, pd_dx_grad = custom_relu_double_grad_dynamic(
self.custom_op, self.device, dtype, x, False self.custom_op, self.device, dtype, x, False
) )
np.testing.assert_array_equal( check_output(out, pd_out, "out")
out, check_output(dx_grad, pd_dx_grad, "dx_grad")
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
np.testing.assert_array_equal(
dx_grad,
pd_dx_grad,
err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.format(
dx_grad, pd_dx_grad
),
)
def test_with_dataloader(self): def test_with_dataloader(self):
paddle.disable_static() paddle.disable_static()
...@@ -328,14 +284,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase): ...@@ -328,14 +284,7 @@ class TestNewCustomOpXpuSetUpInstall(unittest.TestCase):
for batch_id, (image, _) in enumerate(train_loader()): for batch_id, (image, _) in enumerate(train_loader()):
out = self.custom_op(image) out = self.custom_op(image)
pd_out = paddle.nn.functional.relu(image) pd_out = paddle.nn.functional.relu(image)
np.testing.assert_allclose( check_output_allclose(out, pd_out, "out", atol=1e-2)
out,
pd_out,
atol=1e-2,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
if batch_id == 5: if batch_id == 5:
break break
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import check_output, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle.utils.cpp_extension import get_build_directory, load from paddle.utils.cpp_extension import get_build_directory, load
...@@ -47,13 +47,7 @@ class TestCustomSimpleSliceJit(unittest.TestCase): ...@@ -47,13 +47,7 @@ class TestCustomSimpleSliceJit(unittest.TestCase):
x = paddle.to_tensor(np_x) x = paddle.to_tensor(np_x)
custom_op_out = custom_ops.custom_simple_slice(x, 2, 3) custom_op_out = custom_ops.custom_simple_slice(x, 2, 3)
np_out = np_x[2:3] np_out = np_x[2:3]
np.testing.assert_array_equal( check_output(custom_op_out, np_out, "out")
custom_op_out,
np_out,
err_msg='custom op: {},\n numpy: {}'.format(
np_out, custom_op_out.numpy()
),
)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,12 @@ import os ...@@ -16,7 +16,12 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes from utils import (
check_output_allclose,
extra_cc_args,
extra_nvcc_args,
paddle_includes,
)
import paddle import paddle
from paddle.utils.cpp_extension import get_build_directory, load from paddle.utils.cpp_extension import get_build_directory, load
...@@ -77,30 +82,9 @@ class TestCustomTanhDoubleGradJit(unittest.TestCase): ...@@ -77,30 +82,9 @@ class TestCustomTanhDoubleGradJit(unittest.TestCase):
pd_out, pd_dx_grad, pd_dout = custom_tanh_double_grad_dynamic( pd_out, pd_dx_grad, pd_dout = custom_tanh_double_grad_dynamic(
paddle.tanh, device, dtype, x paddle.tanh, device, dtype, x
) )
np.testing.assert_allclose( check_output_allclose(out, pd_out, "out", rtol=1e-05)
out, check_output_allclose(dx_grad, pd_dx_grad, "out", rtol=1e-05)
pd_out, check_output_allclose(dout, pd_dout, "dout", rtol=1e-05)
rtol=1e-05,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out
),
)
np.testing.assert_allclose(
dx_grad,
pd_dx_grad,
rtol=1e-05,
err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.format(
dx_grad, pd_dx_grad
),
)
np.testing.assert_allclose(
dout,
pd_dout,
rtol=1e-05,
err_msg='custom op out grad: {},\n paddle api out grad: {}'.format(
dout, pd_dout
),
)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -16,7 +16,12 @@ import os ...@@ -16,7 +16,12 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, paddle_includes from utils import (
check_output,
check_output_allclose,
extra_cc_args,
paddle_includes,
)
import paddle import paddle
from paddle import static from paddle import static
...@@ -260,7 +265,7 @@ class TestJITLoad(unittest.TestCase): ...@@ -260,7 +265,7 @@ class TestJITLoad(unittest.TestCase):
pd_out = test_custom_add_static( pd_out = test_custom_add_static(
self.add, device, dtype, x, False self.add, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
out = test_custom_subtract_static( out = test_custom_subtract_static(
self.subtract, device, dtype, x self.subtract, device, dtype, x
...@@ -268,7 +273,7 @@ class TestJITLoad(unittest.TestCase): ...@@ -268,7 +273,7 @@ class TestJITLoad(unittest.TestCase):
pd_out = test_custom_subtract_static( pd_out = test_custom_subtract_static(
self.subtract, device, dtype, x, False self.subtract, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
out = test_custom_multiply_static( out = test_custom_multiply_static(
self.multiply, device, dtype, x self.multiply, device, dtype, x
...@@ -276,13 +281,13 @@ class TestJITLoad(unittest.TestCase): ...@@ -276,13 +281,13 @@ class TestJITLoad(unittest.TestCase):
pd_out = test_custom_multiply_static( pd_out = test_custom_multiply_static(
self.multiply, device, dtype, x, False self.multiply, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
out = test_custom_divide_static(self.divide, device, dtype, x) out = test_custom_divide_static(self.divide, device, dtype, x)
pd_out = test_custom_divide_static( pd_out = test_custom_divide_static(
self.divide, device, dtype, x, False self.divide, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
def _test_dynamic(self): def _test_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -297,9 +302,9 @@ class TestJITLoad(unittest.TestCase): ...@@ -297,9 +302,9 @@ class TestJITLoad(unittest.TestCase):
pd_out, pd_x_grad = test_custom_add_dynamic( pd_out, pd_x_grad = test_custom_add_dynamic(
self.add, device, dtype, x, False self.add, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
np.testing.assert_allclose( check_output_allclose(
x_grad, pd_x_grad, rtol=1e-5, atol=1e-8 x_grad, pd_x_grad, "x_grad", rtol=1e-5, atol=1e-8
) )
out, x_grad = test_custom_subtract_dynamic( out, x_grad = test_custom_subtract_dynamic(
...@@ -308,9 +313,9 @@ class TestJITLoad(unittest.TestCase): ...@@ -308,9 +313,9 @@ class TestJITLoad(unittest.TestCase):
pd_out, pd_x_grad = test_custom_subtract_dynamic( pd_out, pd_x_grad = test_custom_subtract_dynamic(
self.subtract, device, dtype, x, False self.subtract, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
np.testing.assert_allclose( check_output_allclose(
x_grad, pd_x_grad, rtol=1e-5, atol=1e-8 x_grad, pd_x_grad, "x_grad", rtol=1e-5, atol=1e-8
) )
out, x_grad = test_custom_multiply_dynamic( out, x_grad = test_custom_multiply_dynamic(
...@@ -319,9 +324,9 @@ class TestJITLoad(unittest.TestCase): ...@@ -319,9 +324,9 @@ class TestJITLoad(unittest.TestCase):
pd_out, pd_x_grad = test_custom_multiply_dynamic( pd_out, pd_x_grad = test_custom_multiply_dynamic(
self.multiply, device, dtype, x, False self.multiply, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
np.testing.assert_allclose( check_output_allclose(
x_grad, pd_x_grad, rtol=1e-5, atol=1e-8 x_grad, pd_x_grad, "x_grad", rtol=1e-5, atol=1e-8
) )
out, x_grad = test_custom_divide_dynamic( out, x_grad = test_custom_divide_dynamic(
...@@ -330,7 +335,7 @@ class TestJITLoad(unittest.TestCase): ...@@ -330,7 +335,7 @@ class TestJITLoad(unittest.TestCase):
pd_out, pd_x_grad = test_custom_divide_dynamic( pd_out, pd_x_grad = test_custom_divide_dynamic(
self.divide, device, dtype, x, False self.divide, device, dtype, x, False
) )
np.testing.assert_allclose(out, pd_out, rtol=1e-5, atol=1e-8) check_output_allclose(out, pd_out, "out", rtol=1e-5, atol=1e-8)
def _test_logical_operants(self): def _test_logical_operants(self):
for device in self.devices: for device in self.devices:
...@@ -342,19 +347,19 @@ class TestJITLoad(unittest.TestCase): ...@@ -342,19 +347,19 @@ class TestJITLoad(unittest.TestCase):
out = self.custom_module.custom_logical_and(x, y) out = self.custom_module.custom_logical_and(x, y)
pd_out = paddle.bitwise_and(x, y) pd_out = paddle.bitwise_and(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_logical_or(x, y) out = self.custom_module.custom_logical_or(x, y)
pd_out = paddle.bitwise_or(x, y) pd_out = paddle.bitwise_or(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_logical_xor(x, y) out = self.custom_module.custom_logical_xor(x, y)
pd_out = paddle.bitwise_xor(x, y) pd_out = paddle.bitwise_xor(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_logical_not(x) out = self.custom_module.custom_logical_not(x)
pd_out = paddle.bitwise_not(x) pd_out = paddle.bitwise_not(x)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
def _test_compare_operants(self): def _test_compare_operants(self):
for device in self.devices: for device in self.devices:
...@@ -366,27 +371,27 @@ class TestJITLoad(unittest.TestCase): ...@@ -366,27 +371,27 @@ class TestJITLoad(unittest.TestCase):
out = self.custom_module.custom_less_than(x, y) out = self.custom_module.custom_less_than(x, y)
pd_out = paddle.less_than(x, y) pd_out = paddle.less_than(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_less_equal(x, y) out = self.custom_module.custom_less_equal(x, y)
pd_out = paddle.less_equal(x, y) pd_out = paddle.less_equal(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_equal(x, y) out = self.custom_module.custom_equal(x, y)
pd_out = paddle.equal(x, y) pd_out = paddle.equal(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_not_equal(x, y) out = self.custom_module.custom_not_equal(x, y)
pd_out = paddle.not_equal(x, y) pd_out = paddle.not_equal(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_greater_than(x, y) out = self.custom_module.custom_greater_than(x, y)
pd_out = paddle.greater_than(x, y) pd_out = paddle.greater_than(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
out = self.custom_module.custom_greater_equal(x, y) out = self.custom_module.custom_greater_equal(x, y)
pd_out = paddle.greater_equal(x, y) pd_out = paddle.greater_equal(x, y)
np.testing.assert_equal(out.numpy(), pd_out.numpy()) check_output(out.numpy(), pd_out.numpy(), "out")
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -16,7 +16,7 @@ import os ...@@ -16,7 +16,7 @@ import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, paddle_includes from utils import check_output, extra_cc_args, paddle_includes
import paddle import paddle
from paddle import static from paddle import static
...@@ -105,15 +105,6 @@ class TestMultiOutputDtypes(unittest.TestCase): ...@@ -105,15 +105,6 @@ class TestMultiOutputDtypes(unittest.TestCase):
self.np_y = np.random.uniform(-1, 1, [4, 8]).astype("float32") self.np_y = np.random.uniform(-1, 1, [4, 8]).astype("float32")
self.np_z = np.random.uniform(-1, 1, [4, 8]).astype("float32") self.np_z = np.random.uniform(-1, 1, [4, 8]).astype("float32")
def check_output(self, out, pd_out, name):
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def run_static(self, device, dtype): def run_static(self, device, dtype):
paddle.set_device(device) paddle.set_device(device)
x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype) x_data = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
...@@ -140,14 +131,12 @@ class TestMultiOutputDtypes(unittest.TestCase): ...@@ -140,14 +131,12 @@ class TestMultiOutputDtypes(unittest.TestCase):
one_int32 = one_int32.numpy() one_int32 = one_int32.numpy()
# Fake_float64 # Fake_float64
self.assertTrue('float64' in str(zero_float64.dtype)) self.assertTrue('float64' in str(zero_float64.dtype))
np.testing.assert_array_equal( check_output(
zero_float64, np.zeros([4, 8]).astype('float64') zero_float64, np.zeros([4, 8]).astype('float64'), "zero_float64"
) )
# ZFake_int32 # ZFake_int32
self.assertTrue('int32' in str(one_int32.dtype)) self.assertTrue('int32' in str(one_int32.dtype))
np.testing.assert_array_equal( check_output(one_int32, np.ones([4, 8]).astype('int32'), "one_int32")
one_int32, np.ones([4, 8]).astype('int32')
)
def test_multi_out_static(self): def test_multi_out_static(self):
paddle.enable_static() paddle.enable_static()
...@@ -193,10 +182,10 @@ class TestMultiOutputDtypes(unittest.TestCase): ...@@ -193,10 +182,10 @@ class TestMultiOutputDtypes(unittest.TestCase):
self.np_y, self.np_y,
self.np_z, self.np_z,
) )
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
# NOTE: In static mode, the output gradient of custom operator has been optimized to shape=[1]. However, native paddle op's output shape = [4, 8], hence we need to fetch pd_w_grad[0][0] (By the way, something wrong with native paddle's gradient, the outputs with other indexes instead of pd_w_grad[0][0] is undefined in this unittest.) # NOTE: In static mode, the output gradient of custom operator has been optimized to shape=[1]. However, native paddle op's output shape = [4, 8], hence we need to fetch pd_w_grad[0][0] (By the way, something wrong with native paddle's gradient, the outputs with other indexes instead of pd_w_grad[0][0] is undefined in this unittest.)
self.check_output(custom_w_grad, pd_w_grad[0][0], "w_grad") check_output(custom_w_grad, pd_w_grad[0][0], "w_grad")
self.check_output(custom_y_grad, pd_y_grad[0][0], "y_grad") check_output(custom_y_grad, pd_y_grad[0][0], "y_grad")
def test_discrete_out_dynamic(self): def test_discrete_out_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -223,9 +212,9 @@ class TestMultiOutputDtypes(unittest.TestCase): ...@@ -223,9 +212,9 @@ class TestMultiOutputDtypes(unittest.TestCase):
self.np_y, self.np_y,
self.np_z, self.np_z,
) )
self.check_output(custom_out, pd_out, "out") check_output(custom_out, pd_out, "out")
self.check_output(custom_w_grad, pd_w_grad, "w_grad") check_output(custom_w_grad, pd_w_grad, "w_grad")
self.check_output(custom_y_grad, pd_y_grad, "y_grad") check_output(custom_y_grad, pd_y_grad, "y_grad")
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -16,6 +16,8 @@ import os ...@@ -16,6 +16,8 @@ import os
import sys import sys
from site import getsitepackages from site import getsitepackages
import numpy as np
from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS
IS_MAC = sys.platform.startswith('darwin') IS_MAC = sys.platform.startswith('darwin')
...@@ -39,3 +41,43 @@ for site_packages_path in getsitepackages(): ...@@ -39,3 +41,43 @@ for site_packages_path in getsitepackages():
extra_cc_args = ['-w', '-g'] if not IS_WINDOWS else ['/w'] extra_cc_args = ['-w', '-g'] if not IS_WINDOWS else ['/w']
extra_nvcc_args = ['-O3'] extra_nvcc_args = ['-O3']
extra_compile_args = {'cc': extra_cc_args, 'nvcc': extra_nvcc_args} extra_compile_args = {'cc': extra_cc_args, 'nvcc': extra_nvcc_args}
def check_output(out, pd_out, name):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
if isinstance(out, list) and isinstance(pd_out, list):
for idx in range(len(out)):
np.testing.assert_array_equal(
out[idx],
pd_out[idx],
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out[idx], name, pd_out[idx]
),
)
else:
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
def check_output_allclose(out, pd_out, name, rtol=5e-5, atol=1e-2):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
np.testing.assert_allclose(
out,
pd_out,
rtol,
atol,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out
),
)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册