未验证 提交 33f512e6 编写于 作者: qq_22305325's avatar qq_22305325 提交者: GitHub

replace old api of fix_placement (#3234)

上级 4bfec0d9
......@@ -104,7 +104,7 @@ def _MakeEagerLogicalBlob(op_attribute, obn, blob_register):
def EagerInitVariableBlob(var_op_conf, var_blob):
with oneflow.fixed_placement("cpu", "0:0"):
with oneflow.scope.placement("cpu", "0:0"):
_Assign(var_blob.blob_object, _ModelInit(var_op_conf))
......
......@@ -161,7 +161,7 @@ class FixedTensorDef(ArgBlobDef):
else:
device_tag = "cpu"
device_ids = "0:0"
with oneflow.fixed_placement(device_tag, device_ids):
with oneflow.scope.placement(device_tag, device_ids):
return compile_context.CurJobAddConsistentOp(op_conf)
def SetBatchAxisAndSplitAxis(
......
......@@ -55,7 +55,7 @@ def lazy_system_assign(ref, value, validate_shape=None, use_locking=None, name=N
device_tag, machine_device_ids = parallel_conf_util.GetDeviceTagAndMachineDeviceIds(
ref.parallel_conf
)
with oneflow.fixed_placement(device_tag, machine_device_ids):
with oneflow.scope.placement(device_tag, machine_device_ids):
interpret_util.Forward(op_conf)
return ref
......
......@@ -63,7 +63,7 @@ def two_stage_reduce(x, axis=None, keepdims=False, op_type_name=None, name=None)
device_ids,
) in current_placement_scope.machine_id2device_id_list.items():
for device_id in device_ids:
with flow.fixed_placement(
with flow.scope.placement(
device_tag, str(machine_id) + ":" + str(device_id)
):
device_stage_out, device_stage_count = reduce_device_stage(
......
......@@ -51,7 +51,7 @@ def LazyWatch(blob_watched, handler_or_prompt=None):
tag_and_dev_ids = parallel_conf_util.GetDeviceTagAndMachineDeviceIds(
blob_watched.parallel_conf
)
with oneflow.fixed_placement(*tag_and_dev_ids):
with oneflow.scope.placement(*tag_and_dev_ids):
compile_context.CurJobAddOp(op_conf)
watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
elif isinstance(blob_watched, MirroredBlob):
......
......@@ -9,7 +9,7 @@ func_config.default_data_type(flow.float)
def my_test_source(name):
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
return (
flow.user_op_builder(name)
.Op("TestSource")
......
......@@ -11,7 +11,7 @@ def test_2d_gpu_variable(test_case):
@flow.global_function(function_config)
def Foo():
with flow.fixed_placement("gpu", device_name):
with flow.scope.placement("gpu", device_name):
w = flow.get_variable(
"w",
shape=(10,),
......
......@@ -20,7 +20,7 @@ def test_test_dynamic_source(test_case):
@flow.global_function(func_config)
def TestSourceJob():
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
ret = my_test_source("my_cc_test_source_op")
return ret
......
......@@ -21,7 +21,7 @@ def test_testsource(test_case):
@flow.global_function(func_config)
def TestSourceJob():
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
ret = my_test_source("my_cc_test_source_op", 0)
return ret
......
......@@ -20,7 +20,7 @@ def test_testsource(test_case):
@flow.global_function(func_config)
def TestSourceJob():
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
ret = my_test_source("my_cc_test_source_op")
# print("cons_test_source_batch_axis", ret.batch_axis)
test_case.assertTrue(ret.batch_axis is not None and ret.batch_axis == 0)
......
......@@ -21,7 +21,7 @@ def test_testsource_2_gpu(test_case):
@flow.global_function(func_config)
def TestSourceJob():
with flow.fixed_placement("cpu", "0:0-1"):
with flow.scope.placement("cpu", "0:0-1"):
ret = my_test_source("my_cc_test_source_op", 10)
# print("cons_test_source_batch_axis", ret.batch_axis)
test_case.assertTrue(ret.batch_axis is not None and ret.batch_axis == 0)
......
......@@ -24,7 +24,7 @@ def compare_with_tensorflow(device_type, in_shape, data_type):
dtype=type_name_to_flow_type[data_type],
)
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.math.argmax(input)
input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])
......
......@@ -24,7 +24,7 @@ def compare_with_tensorflow(device_type, in_shape, direction, data_type):
dtype=type_name_to_flow_type[data_type],
)
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.argsort(input, direction)
input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])
......
......@@ -27,7 +27,7 @@ def _of_assign_and_relu(value, dtype, device_type):
flow.config.cpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_data_type(dtype)
func_config.default_placement_scope(flow.fixed_placement(device_type, "0:0"))
func_config.default_placement_scope(flow.scope.placement(device_type, "0:0"))
@flow.global_function(func_config)
def assign_fn(value_def=flow.FixedTensorDef(value.shape, dtype=dtype)):
......
......@@ -22,9 +22,9 @@ def _test_split_to_split(
@flow.global_function(func_config)
def split_to_split_job(x=flow.FixedTensorDef((96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
......@@ -61,9 +61,9 @@ def _test_split_to_broadcast(
@flow.global_function(func_config)
def split_to_broadcast_job(x=flow.FixedTensorDef((96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
......@@ -99,9 +99,9 @@ def _test_broadcast_to_split(
@flow.global_function(func_config)
def broadcast_to_split_job(x=flow.FixedTensorDef((96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.broadcast()))
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
......@@ -137,10 +137,10 @@ def _test_partial_sum_to_split(
@flow.global_function(func_config)
def partial_sum_to_split_job(x=flow.FixedTensorDef((96, 96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
......@@ -171,10 +171,10 @@ def _test_partial_sum_to_broadcast(
@flow.global_function(func_config)
def partial_sum_to_broadcast_job(x=flow.FixedTensorDef((96, 96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
......@@ -204,9 +204,9 @@ def _test_broadcast_to_broadcast(
@flow.global_function(func_config)
def broadcast_to_broadcast_job(x=flow.FixedTensorDef((96, 96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.broadcast()))
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
......@@ -237,12 +237,12 @@ def _test_multi_lbi(
@flow.global_function(func_config)
def multi_lbi_job(x=flow.FixedTensorDef((96, 96, 96))):
with flow.fixed_placement(src_device_type, "0:0-" + str(src_device_num - 1)):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src_s0 = flow.identity(x.with_distribute(flow.distribute.split(0)))
src_s1 = flow.identity(x.with_distribute(flow.distribute.split(1)))
src_b = flow.identity(x.with_distribute(flow.distribute.split(1)))
(t0_0, t0_1, t0_2) = flow.identity_n((src_s0, src_s1, src_b))
with flow.fixed_placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
t0_0 = t0_0.with_distribute(flow.distribute.split(1))
t0_1 = t0_1.with_distribute(flow.distribute.broadcast())
t0_2 = t0_2.with_distribute(flow.distribute.split(1))
......
......@@ -21,7 +21,7 @@ def compare_broadcast_like_with_tf(
x=flow.FixedTensorDef(shape=input_shape, dtype=data_type_util.kFloat),
y=flow.FixedTensorDef(shape=like_shape, dtype=data_type_util.kFloat),
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.broadcast_like(x, y, broadcast_axes=broadcast_axes)
x = np.random.rand(*input_shape).astype(np.float32)
......
......@@ -15,7 +15,7 @@ def _run_test(test_case, a, b, dtype, device):
a=flow.FixedTensorDef(a.shape, dtype=dtype),
b=flow.FixedTensorDef(b.shape, dtype=dtype),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.math.maximum(a, b)
out = BroadcastMaximum(a, b).get()
......
......@@ -15,7 +15,7 @@ def _run_test(test_case, a, b, dtype, device):
a=flow.FixedTensorDef(a.shape, dtype=dtype),
b=flow.FixedTensorDef(b.shape, dtype=dtype),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.math.minimum(a, b)
out = BroadcastMinimum(a, b).get()
......
......@@ -20,7 +20,7 @@ def cast_forward_compare_with_tensorflow(test_cast, device_type, input_shape, dt
shape=input_shape, dtype=type_name_to_flow_type[dtype]
)
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.cast(input_def, dtype=type_name_to_flow_type[dtype])
input = np.random.rand(*input_shape).astype(type_name_to_np_type[dtype])
......
......@@ -23,11 +23,11 @@ def test_multi_node_comm_net(test_case):
@flow.global_function(func_config)
def ReluJob(x=flow.FixedTensorDef((10, 2))):
with flow.fixed_placement("gpu", "0:0"):
with flow.scope.placement("gpu", "0:0"):
out0 = ccrelu(x, "my_op_0_0")
with flow.fixed_placement("gpu", "1:0"):
with flow.scope.placement("gpu", "1:0"):
out1 = ccrelu(out0, "my_op_1_0")
with flow.fixed_placement("gpu", "0:0"):
with flow.scope.placement("gpu", "0:0"):
out2 = ccrelu(out1, "my_op_print")
return out2
......@@ -52,18 +52,18 @@ def test_multi_node_comm_net(test_case):
def test_multi_node_comm_net_dynamic(test_case):
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.fixed_placement("gpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("gpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(func_config)
def ReluJob(x=flow.MirroredTensorDef((10, 2))):
with flow.fixed_placement("gpu", "0:0"):
with flow.scope.placement("gpu", "0:0"):
out0 = flow.keras.activations.relu(x)
with flow.fixed_placement("gpu", "1:0"):
with flow.scope.placement("gpu", "1:0"):
out1 = flow.keras.activations.relu(out0)
with flow.fixed_placement("gpu", "0:0"):
with flow.scope.placement("gpu", "0:0"):
out2 = flow.keras.activations.relu(out1)
return out2
......@@ -88,18 +88,18 @@ def test_multi_node_comm_net_dynamic(test_case):
def test_multi_node_comm_net_dynamic_empty(test_case):
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.fixed_placement("cpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(func_config)
def ReluJob(x=flow.MirroredTensorDef((10, 2))):
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
out0 = flow.keras.activations.relu(x)
with flow.fixed_placement("cpu", "1:0"):
with flow.scope.placement("cpu", "1:0"):
out1 = flow.keras.activations.relu(out0)
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
out2 = flow.keras.activations.relu(out1)
return out2
......
......@@ -6,7 +6,7 @@ import oneflow as flow
def test_multi_node_dynamic_binary_split_concat_empty(test_case):
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.fixed_placement("cpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
......@@ -14,7 +14,7 @@ def test_multi_node_dynamic_binary_split_concat_empty(test_case):
@flow.global_function(func_config)
def DynamicBinaryJob(x=flow.MirroredTensorDef((20,))):
print("in_shape: ", x.shape)
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
out_list = flow.experimental.dynamic_binary_split(
x, base_shift=4, out_num=6
)
......@@ -22,10 +22,10 @@ def test_multi_node_dynamic_binary_split_concat_empty(test_case):
for out_blob in out_list:
print("out_shape: ", out_blob.shape)
id_out_list.append(flow.identity(out_blob))
with flow.fixed_placement("cpu", "1:0"):
with flow.scope.placement("cpu", "1:0"):
out1 = flow.experimental.dynamic_binary_concat(id_out_list, x)
print("concat_shape: ", out1.shape)
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
out2 = flow.identity(out1)
print("return_shape: ", out2.shape)
return out2
......
......@@ -23,7 +23,7 @@ def compare_with_tensorflow(device_type, x_shape, axis):
@flow.global_function(func_config)
def ExpandDimsJob():
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"var",
shape=x_shape,
......
......@@ -3,7 +3,7 @@ import oneflow as flow
def test_default_placement_scope(test_case):
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.fixed_placement("cpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
@flow.global_function(func_config)
def Foo():
......@@ -21,7 +21,7 @@ def test_config_setter_getter(test_case):
def test_global_function_desc(test_case):
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.fixed_placement("cpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
@flow.global_function(func_config)
def Foo():
......
......@@ -31,7 +31,7 @@ def _test_gather_model_parallel_fw(
params=flow.FixedTensorDef(params_shape, dtype=flow.float),
indices=flow.FixedTensorDef(indices_shape, dtype=flow.int32),
):
with flow.fixed_placement(device_type, "0:0-3"):
with flow.scope.placement(device_type, "0:0-3"):
params = params.with_distribute(flow.distribute.split(split_axis))
indices = indices.with_distribute(flow.distribute.broadcast())
return flow.gather(params=params, indices=indices, axis=axis)
......
......@@ -29,7 +29,7 @@ def _run_test(test_case, indices, values, indices_dtype, values_dtype, device):
indices=flow.FixedTensorDef(indices.shape, dtype=indices_dtype),
values=flow.FixedTensorDef(values.shape, dtype=values_dtype),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.experimental.indexed_slices_reduce_sum(indices, values)
out_indices, out_values, num_unique = TestJob(indices, values).get()
......
......@@ -9,7 +9,7 @@ func_config.default_distribute_strategy(flow.scope.consistent_view())
def test_keep_header_only_cpu(test_case):
@flow.global_function(func_config)
def job(x=flow.FixedTensorDef((2, 3, 4), dtype=flow.float)):
with flow.fixed_placement("cpu", "0:0"):
with flow.scope.placement("cpu", "0:0"):
x = flow.identity(x)
return flow.math.reduced_shape_elem_cnt(x)
......
......@@ -48,7 +48,7 @@ def _test_element_wise_mul_fw_bw(test_case, device, shape, type_name):
x=flow.FixedTensorDef(shape, dtype=flow.float),
y=flow.FixedTensorDef(shape, dtype=flow.float),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
x += flow.get_variable(
name="vx",
shape=(1,),
......
......@@ -26,7 +26,7 @@ def _run_test(
def one_hot_job(
x=flow.FixedTensorDef(x_shape, dtype=type_name_to_flow_type[dtype])
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.one_hot(
x,
depth=depth,
......
......@@ -37,7 +37,7 @@ def _run_test(test_case, device_type, dtype, x_shape, shared_axes):
@flow.global_function(func_config)
def PreluJob(x=flow.FixedTensorDef(x_shape, dtype=type_name_to_flow_type[dtype])):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="v1",
shape=(1,),
......
......@@ -27,7 +27,7 @@ def test_shuffle(_):
def TestJob(
x=flow.FixedTensorDef(x_shape, dtype=type_name_to_flow_type[data_type])
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.random.shuffle(x)
x = np.random.randn(*x_shape).astype(type_name_to_np_type[data_type])
......@@ -47,7 +47,7 @@ def test_shuffle(_):
def TestJob1(
x=flow.FixedTensorDef(x_shape, dtype=type_name_to_flow_type[data_type])
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.random.generate_random_batch_permutation_indices(x)
x = np.random.randn(*x_shape).astype(type_name_to_np_type[data_type])
......
......@@ -93,7 +93,7 @@ def test_smooth_l1_loss(_):
)
flow.watch_diff(v, assert_prediction_grad)
prediction += v
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
loss = flow.smooth_l1_loss(prediction, label, beta)
flow.losses.add_loss(loss)
return loss
......
......@@ -24,7 +24,7 @@ def compare_with_tensorflow(device_type, in_shape, direction, data_type):
dtype=type_name_to_flow_type[data_type],
)
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.sort(input, direction)
input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])
......
......@@ -13,7 +13,7 @@ def _check(test_case, x, y):
def _run_test(test_case, x, dtype, device):
@flow.global_function(func_config)
def SquareSum(x=flow.FixedTensorDef(x.shape, dtype=dtype)):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.experimental.square_sum(x)
y = SquareSum(x).get()
......
......@@ -23,7 +23,7 @@ def compare_with_tensorflow(device_type, x_shape, axis):
@flow.global_function(func_config)
def SqueezeJob():
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"var",
shape=x_shape,
......
......@@ -27,7 +27,7 @@ def test_sync_dynamic_resize(_):
x=flow.FixedTensorDef(x_shape, dtype=type_name_to_flow_type[data_type]),
size=flow.FixedTensorDef((1,), dtype=type_name_to_flow_type[size_type]),
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.sync_dynamic_resize(x, size)
size = np.random.randint(0, x_shape[0])
......
......@@ -24,7 +24,7 @@ def compare_with_tensorflow(device_type, in_shape, k, data_type, sorted):
dtype=type_name_to_flow_type[data_type],
)
):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
return flow.math.top_k(input, k, sorted)
input = (np.random.random(in_shape) * 100).astype(type_name_to_np_type[data_type])
......
......@@ -34,14 +34,14 @@ def _test_two_stage_reduce(
@flow.global_function(func_config)
def two_stage_reduce_job(x=flow.FixedTensorDef((4, 20, 20, 20))):
with flow.fixed_placement(device_type, "0:0"):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="v1",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
with flow.fixed_placement(device_type, "0:0-3"):
with flow.scope.placement(device_type, "0:0-3"):
loss = flow_func(
x.with_distribute(flow.distribute.split(split_axis)),
axis=axis,
......
......@@ -65,7 +65,7 @@ def test_acos_consistent_1n2c(test_case):
def test_acos_cpu(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_placement_scope(flow.fixed_placement("cpu", "0:0"))
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_distribute_strategy(flow.scope.consistent_view())
@flow.global_function(func_config)
......
......@@ -23,7 +23,7 @@ def _check_unique(test_case, x, y, idx, count, num_unique):
def _run_test(test_case, x, dtype, device):
@flow.global_function(func_config)
def UniqueWithCountsJob(x=flow.FixedTensorDef(x.shape, dtype=dtype)):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.experimental.unique_with_counts(x)
y, idx, count, num_unique = UniqueWithCountsJob(x).get()
......
......@@ -54,7 +54,7 @@ def _make_unsoted_segment_sum_fn(device, data, segment_ids, num_segments):
data=flow.FixedTensorDef(data.shape, dtype=flow.float),
segment_ids=flow.FixedTensorDef(segment_ids.shape, dtype=flow.int32),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
x = flow.get_variable(
"data",
shape=data.shape,
......
......@@ -51,7 +51,7 @@ def _run_test(test_case, device, out_shape, axis, segment_ids_shape):
data=flow.FixedTensorDef(data.shape, dtype=flow.float),
segment_ids=flow.FixedTensorDef(segment_ids.shape, dtype=flow.int32),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.math.unsorted_segment_sum(
data=data,
segment_ids=segment_ids,
......@@ -65,7 +65,7 @@ def _run_test(test_case, device, out_shape, axis, segment_ids_shape):
segment_ids=flow.FixedTensorDef(segment_ids.shape, dtype=flow.int32),
like=flow.FixedTensorDef(out_shape, dtype=flow.float32),
):
with flow.fixed_placement(device, "0:0"):
with flow.scope.placement(device, "0:0"):
return flow.math.unsorted_segment_sum_like(
data=data, segment_ids=segment_ids, like=like, axis=axis
)
......
......@@ -53,7 +53,7 @@ def _test_unsorted_segment_sum_model_parallel_fw(
segment_ids=flow.FixedTensorDef(segment_ids_arr.shape, dtype=flow.int32),
like=flow.FixedTensorDef(out_arr.shape, dtype=flow.float),
):
with flow.fixed_placement(device_type, "0:0-3"):
with flow.scope.placement(device_type, "0:0-3"):
if split_axis < axis:
data = data.with_distribute(flow.distribute.split(split_axis))
elif split_axis == axis:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册