提交 868256f0 编写于 作者: R Rohan Jain 提交者: TensorFlower Gardener

tf.parallel_stack is implemented as a graph rewrite and therefore has no...

tf.parallel_stack is implemented as a graph rewrite and therefore has no support in eager mode. Added documentation to parallel_stack and raising an error now to reflect that.

PiperOrigin-RevId: 340016749
Change-Id: I7c695dfb98549bbc56105592b1b1c6b1ea06b231
上级 377434ff
...@@ -2309,23 +2309,6 @@ cuda_py_test( ...@@ -2309,23 +2309,6 @@ cuda_py_test(
], ],
) )
cuda_py_test(
name = "stack_op_test",
size = "small",
srcs = ["stack_op_test.py"],
tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:errors",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:variables",
"//third_party/py/numpy",
],
)
cuda_py_test( cuda_py_test(
name = "map_fn_test", name = "map_fn_test",
size = "small", size = "small",
......
...@@ -82,3 +82,20 @@ cuda_py_test( ...@@ -82,3 +82,20 @@ cuda_py_test(
"//third_party/py/numpy", "//third_party/py/numpy",
], ],
) )
cuda_py_test(
name = "stack_op_test",
size = "small",
srcs = ["stack_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:errors",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:variables",
"//third_party/py/numpy",
],
)
...@@ -64,27 +64,28 @@ class StackOpTest(test.TestCase): ...@@ -64,27 +64,28 @@ class StackOpTest(test.TestCase):
c = array_ops.stack(xs, axis=axis) c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data) self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self): def testSimpleParallelCPU(self):
np.random.seed(7) # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=False): with ops.Graph().as_default():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): np.random.seed(7)
with self.subTest(shape=shape): with test_util.device(use_gpu=False):
data = self.randn(shape, np.float32) for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
xs = list(map(constant_op.constant, data)) with self.subTest(shape=shape):
c = array_ops.parallel_stack(xs) data = self.randn(shape, np.float32)
self.assertAllEqual(c, data) xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self): def testSimpleParallelGPU(self):
np.random.seed(7) # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=True): with ops.Graph().as_default():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): with test_util.device(use_gpu=True):
with self.subTest(shape=shape): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
data = self.randn(shape, np.float32) with self.subTest(shape=shape):
xs = list(map(constant_op.constant, data)) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(xs) xs = list(map(constant_op.constant, data))
self.assertAllEqual(c, data) c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testConst(self): def testConst(self):
...@@ -113,37 +114,39 @@ class StackOpTest(test.TestCase): ...@@ -113,37 +114,39 @@ class StackOpTest(test.TestCase):
self.assertEqual(cl.op.type, "Const") self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data) self.assertAllEqual(cl, data)
@test_util.run_deprecated_v1
def testConstParallelCPU(self): def testConstParallelCPU(self):
np.random.seed(7) # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=False): with ops.Graph().as_default():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10): np.random.seed(7)
with self.subTest(shape=shape): with test_util.device(use_gpu=False):
data = self.randn(shape, np.float32) for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
if len(shape) == 1: with self.subTest(shape=shape):
data_list = list(data) data = self.randn(shape, np.float32)
cl = array_ops.parallel_stack(data_list) if len(shape) == 1:
self.assertAllEqual(cl, data) data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data) c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data) self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self): def testConstParallelGPU(self):
np.random.seed(7) # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=True): with ops.Graph().as_default():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): np.random.seed(7)
with self.subTest(shape=shape): with test_util.device(use_gpu=True):
data = self.randn(shape, np.float32) for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
if len(shape) == 1: with self.subTest(shape=shape):
data_list = list(data) data = self.randn(shape, np.float32)
cl = array_ops.parallel_stack(data_list) if len(shape) == 1:
self.assertAllEqual(cl, data) data_list = list(data)
cl = array_ops.parallel_stack(data_list)
data = self.randn(shape, np.float32) self.assertAllEqual(cl, data)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data) data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1 @test_util.run_deprecated_v1
def testGradientsAxis0(self): def testGradientsAxis0(self):
...@@ -176,53 +179,57 @@ class StackOpTest(test.TestCase): ...@@ -176,53 +179,57 @@ class StackOpTest(test.TestCase):
out_shape) out_shape)
self.assertLess(err, 1e-6) self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testZeroSizeCPU(self): def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=False): with ops.Graph().as_default():
for shape in (0,), (3, 0), (0, 3): # Verify that stack doesn't crash for zero size inputs
with self.subTest(shape=shape): with test_util.device(use_gpu=False):
x = np.zeros((2,) + shape).astype(np.int32) for shape in (0,), (3, 0), (0, 3):
p = array_ops.stack(list(x)).eval() with self.subTest(shape=shape):
self.assertAllEqual(p, x) x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testZeroSizeGPU(self): def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs # tf.parallel_stack is only supported in graph mode.
with self.session(use_gpu=True): with ops.Graph().as_default():
for shape in (0,), (3, 0), (0, 3): # Verify that stack doesn't crash for zero size inputs
with self.subTest(shape=shape): with test_util.device(use_gpu=True):
x = np.zeros((2,) + shape).astype(np.int32) for shape in (0,), (3, 0), (0, 3):
p = array_ops.stack(list(x)).eval() with self.subTest(shape=shape):
self.assertAllEqual(p, x) x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
p = array_ops.parallel_stack(list(x)).eval() self.assertAllEqual(p, x)
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testAxis0DefaultCPU(self): def testAxis0DefaultCPU(self):
with self.session(use_gpu=False): # tf.parallel_stack is only supported in graph mode.
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with ops.Graph().as_default():
stacked = array_ops.stack(t).eval() with test_util.device(use_gpu=False):
parallel_stacked = array_ops.parallel_stack(t).eval() t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]]) expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected) self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected) self.assertAllEqual(parallel_stacked, expected)
@test_util.run_deprecated_v1
def testAxis0DefaultGPU(self): def testAxis0DefaultGPU(self):
with self.session(use_gpu=True): # tf.parallel_stack is only supported in graph mode.
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with ops.Graph().as_default():
stacked = array_ops.stack(t).eval() with test_util.device(use_gpu=True):
parallel_stacked = array_ops.parallel_stack(t).eval() t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]]) parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected) expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self): def testAgainstNumpy(self):
# For 1 to 5 dimensions. # For 1 to 5 dimensions.
......
...@@ -1330,13 +1330,23 @@ def parallel_stack(values, name="parallel_stack"): ...@@ -1330,13 +1330,23 @@ def parallel_stack(values, name="parallel_stack"):
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z]) tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
@compatibility(eager)
parallel_stack is not compatible with eager execution.
@end_compatibility
Args: Args:
values: A list of `Tensor` objects with the same shape and type. values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional). name: A name for this operation (optional).
Returns: Returns:
output: A stacked `Tensor` with the same type as `values`. output: A stacked `Tensor` with the same type as `values`.
Raises:
RuntimeError: if executed in eager mode.
""" """
if context.executing_eagerly():
raise RuntimeError("tf.parallel_stack() is not compatible with "
"eager execution.")
with ops.name_scope(name): with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0]) value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape() value_shape = ops.convert_to_tensor(value_t).get_shape()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册