未验证 提交 97227e6d 编写于 作者: C cnn 提交者: GitHub

2.0rc api add all any (#28199)

* reduce trt warning message (#28011)

add paddle.enable_static() on sample code

alias recude_all-->all, reduce_any-->any

add import reduce_all and reduce_any in python/paddle/tensor/math.py

import all and any in python/paddle/tensor/__init__.py

remove all and any OP in python/paddle/tensor/logic.py, add all and any OP in python/paddle/tensor/math.py

fix import error

remove TestAllAPI temporary

* fix doc of recdue_all and reduce_any, test=document_fix

* fix typo

* fix unittest for all and any API
Co-authored-by: NPei Yang <peiyang@baidu.com>
上级 4ccc1716
......@@ -103,8 +103,6 @@ from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS
# from .tensor.logic import reduce_all #DEFINE_ALIAS
# from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import equal_all #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
......@@ -161,6 +159,8 @@ from .tensor.math import reciprocal #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import all #DEFINE_ALIAS
from .tensor.math import any #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS
......
......@@ -315,6 +315,8 @@ def fc(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# when input is single tensor
data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
......@@ -468,6 +470,9 @@ def embedding(input,
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
data = fluid.data(name='x', shape=[None, 1], dtype='int64')
# example 1
......@@ -731,6 +736,8 @@ def linear_chain_crf(input, label, param_attr=None, length=None):
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
#define net structure, using LodTensor
train_program = fluid.Program()
......@@ -855,6 +862,8 @@ def crf_decoding(input, param_attr, label=None, length=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
# LoDTensor-based example
num_labels = 10
......@@ -1458,6 +1467,9 @@ def conv2d(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
......@@ -1728,6 +1740,8 @@ def conv3d(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu")
"""
......@@ -2377,6 +2391,7 @@ def adaptive_pool2d(input,
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
......@@ -2531,6 +2546,7 @@ def adaptive_pool3d(input,
#
import paddle
paddle.enable_static()
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
......@@ -2726,6 +2742,8 @@ def batch_norm(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.batch_norm(input=hidden1)
......@@ -2735,6 +2753,8 @@ def batch_norm(input,
# batch_norm with momentum as Variable
import paddle.fluid as fluid
import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler
import paddle
paddle.enable_static()
def get_decay_momentum(momentum_init, decay_steps, decay_rate):
global_step = lr_scheduler._decay_step_counter()
......@@ -3134,6 +3154,8 @@ def instance_norm(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
......@@ -3269,6 +3291,7 @@ def data_norm(input,
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.randn(shape=[32,100])
hidden2 = paddle.static.nn.data_norm(input=x)
......@@ -3451,6 +3474,8 @@ def layer_norm(input,
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
x = fluid.data(name='x', shape=[-1, 32, 32], dtype='float32')
hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1)
place = fluid.CPUPlace()
......@@ -3566,6 +3591,9 @@ def group_norm(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32')
x = fluid.layers.group_norm(input=data, groups=4)
"""
......@@ -3887,6 +3915,8 @@ def conv2d_transpose(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3)
"""
......@@ -4177,6 +4207,8 @@ def conv3d_transpose(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3)
"""
......@@ -4659,7 +4691,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
......@@ -4672,11 +4704,12 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
will be named automatically. The default value is None.
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
......@@ -4684,15 +4717,15 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = layers.cast(x, 'bool')
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
x = paddle.cast(x, 'bool')
out = layers.reduce_all(x) # False
out = layers.reduce_all(x, dim=0) # [True, False]
out = layers.reduce_all(x, dim=-1) # [False, True]
out = paddle.reduce_all(x) # False
out = paddle.reduce_all(x, dim=0) # [True, False]
out = paddle.reduce_all(x, dim=-1) # [False, True]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
out = paddle.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
"""
......@@ -4719,7 +4752,7 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor, the input data type should be `bool`.
input (Tensor): the input tensor, it's data type should be `bool`.
dim (list|int|optional): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
......@@ -4728,14 +4761,15 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
name(str|None): A name for this layer(optional). If set None, the layer
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
......@@ -4743,15 +4777,15 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = layers.cast(x, 'bool')
x = paddle.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
x = paddle.cast(x, 'bool')
out = layers.reduce_any(x) # True
out = layers.reduce_any(x, dim=0) # [True, False]
out = layers.reduce_any(x, dim=-1) # [True, False]
out = paddle.reduce_any(x) # True
out = paddle.reduce_any(x, dim=0) # [True, False]
out = paddle.reduce_any(x, dim=-1) # [True, False]
# keep_dim=False, x.shape=(2,2), out.shape=(2,)
out = layers.reduce_any(x, dim=1,
out = paddle.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
# keep_dim=True, x.shape=(2,2), out.shape=(2,1)
......@@ -5613,6 +5647,8 @@ def im2sequence(input,
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
data = fluid.data(name='data', shape=[None, 3, 32, 32],
dtype='float32')
output = fluid.layers.im2sequence(
......@@ -5669,6 +5705,8 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
Examples:
>>> # for LodTensor inputs
>>> import paddle.fluid as fluid
>>> import paddle
>>> paddle.enable_static()
>>> x = fluid.data(name='x', shape=[9, 16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
......@@ -5982,6 +6020,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
global_step = fluid.layers.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
"""
......@@ -9730,6 +9770,8 @@ def prelu(x, mode, param_attr=None, name=None):
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
from paddle.fluid.param_attr import ParamAttr
x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
......@@ -14307,6 +14349,9 @@ def deformable_conv(input,
#deformable conv v2:
import paddle.fluid as fluid
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
......
......@@ -767,5 +767,117 @@ class API_TestSumOp(unittest.TestCase):
self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
class TestAllAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.all(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.all(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.all(x)
np_out1 = out1.numpy()
expect_res1 = np.all(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.all(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.all(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.all(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.all(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.all(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.all(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
class TestAnyAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.any(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.any(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.any(x)
np_out1 = out1.numpy()
expect_res1 = np.any(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.any(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.any(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.any(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.any(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.any(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.any(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main()
......@@ -66,8 +66,6 @@ from .logic import logical_not #DEFINE_ALIAS
from .logic import logical_or #DEFINE_ALIAS
from .logic import logical_xor #DEFINE_ALIAS
from .logic import not_equal #DEFINE_ALIAS
# from .logic import reduce_all #DEFINE_ALIAS
# from .logic import reduce_any #DEFINE_ALIAS
from .logic import allclose #DEFINE_ALIAS
from .logic import equal_all #DEFINE_ALIAS
# from .logic import isnan #DEFINE_ALIAS
......@@ -163,6 +161,8 @@ from .math import isfinite #DEFINE_ALIAS
from .math import isinf #DEFINE_ALIAS
from .math import isnan #DEFINE_ALIAS
from .math import prod #DEFINE_ALIAS
from .math import all #DEFINE_ALIAS
from .math import any #DEFINE_ALIAS
from .random import multinomial #DEFINE_ALIAS
from .random import standard_normal
from .random import normal
......
......@@ -27,6 +27,8 @@ from ..fluid.layers import logical_and #DEFINE_ALIAS
from ..fluid.layers import logical_not #DEFINE_ALIAS
from ..fluid.layers import logical_or #DEFINE_ALIAS
from ..fluid.layers import logical_xor #DEFINE_ALIAS
from ..fluid.layers import reduce_all #DEFINE_ALIAS
from ..fluid.layers import reduce_any #DEFINE_ALIAS
__all__ = [
'equal',
......
......@@ -21,7 +21,7 @@ from paddle.common_ops_import import *
from paddle.tensor import cast
import paddle
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn
......@@ -46,6 +46,8 @@ from ..fluid.layers import exp #DEFINE_ALIAS
from ..fluid.layers import floor #DEFINE_ALIAS
from ..fluid.layers import log #DEFINE_ALIAS
from ..fluid.layers import reciprocal #DEFINE_ALIAS
from ..fluid.layers import reduce_all #DEFINE_ALIAS
from ..fluid.layers import reduce_any #DEFINE_ALIAS
# from ..fluid.layers import reduce_max #DEFINE_ALIAS
# from ..fluid.layers import reduce_min #DEFINE_ALIAS
# from ..fluid.layers import reduce_prod #DEFINE_ALIAS
......@@ -1933,3 +1935,201 @@ def increment(x, value=1.0, name=None):
outputs={'Out': [x]},
attrs={'step': float(value)})
return x
def all(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical and`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# set as static mode
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = layers.cast(x, 'bool')
# out1 should be [False]
out1 = paddle.all(x) # [False]
print(out1)
# out2 should be [True, False]
out2 = paddle.all(x, axis=0) # [True, False]
print(out2)
# keep_dim=False, out3 should be [False, True], out.shape should be (2,)
out3 = paddle.all(x, axis=-1) # [False, True]
print(out3)
# keep_dim=True, out4 should be [[False], [True]], out.shape should be (2,1)
out4 = paddle.all(x, axis=1, keep_dim=True)
out4 = layers.cast(out4, 'int32') # [[False], [True]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def any(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical or`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
# set as static mode
paddle.disable_static()
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
x = layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = layers.cast(x, 'bool')
# out1 should be [True]
out1 = paddle.any(x) # [True]
print(out1)
# out2 should be [True, False]
out2 = paddle.any(x, axis=0) # [True, False]
print(out2)
# keep_dim=False, out3 should be [True, False], out.shape should be (2,)
out3 = paddle.any(x, axis=-1) # [True, False]
print(out3)
# keep_dim=True, result should be [[True], [False]], out.shape should be (2,1)
out4 = paddle.any(x, axis=1, keep_dim=True)
out4 = layers.cast(out4, 'int32') # [[True], [False]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
dtype_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return core.ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册