提交 868256f0 编写于 作者: R Rohan Jain 提交者: TensorFlower Gardener

tf.parallel_stack is implemented as a graph rewrite and therefore has no...

tf.parallel_stack is implemented as a graph rewrite and therefore has no support in eager mode. Added documentation to parallel_stack and raising an error now to reflect that.

PiperOrigin-RevId: 340016749
Change-Id: I7c695dfb98549bbc56105592b1b1c6b1ea06b231
上级 377434ff
......@@ -2309,23 +2309,6 @@ cuda_py_test(
],
)
cuda_py_test(
name = "stack_op_test",
size = "small",
srcs = ["stack_op_test.py"],
tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:errors",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:variables",
"//third_party/py/numpy",
],
)
cuda_py_test(
name = "map_fn_test",
size = "small",
......
......@@ -82,3 +82,20 @@ cuda_py_test(
"//third_party/py/numpy",
],
)
cuda_py_test(
name = "stack_op_test",
size = "small",
srcs = ["stack_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:errors",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:variables",
"//third_party/py/numpy",
],
)
......@@ -64,27 +64,28 @@ class StackOpTest(test.TestCase):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConst(self):
......@@ -113,37 +114,39 @@ class StackOpTest(test.TestCase):
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
@test_util.run_deprecated_v1
def testConstParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):
......@@ -176,53 +179,57 @@ class StackOpTest(test.TestCase):
out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testAxis0DefaultCPU(self):
with self.session(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
@test_util.run_deprecated_v1
def testAxis0DefaultGPU(self):
with self.session(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
......
......@@ -1330,13 +1330,23 @@ def parallel_stack(values, name="parallel_stack"):
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
@compatibility(eager)
parallel_stack is not compatible with eager execution.
@end_compatibility
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
RuntimeError: if executed in eager mode.
"""
if context.executing_eagerly():
raise RuntimeError("tf.parallel_stack() is not compatible with "
"eager execution.")
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册