diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index 3fc921ee50703485bd5d64fd1a661c9d5ad433fd..2520321f229a25e10976e39711e05b09342a0386 100755 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -115,8 +115,7 @@ Aten: | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | | 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear | -| 133 | aten::rsqrt | | | | | | | - +| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | | | | | Prim: | 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP | diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 11fd90e2ac117fc2be61574c3677ac3833ec4614..dce8581d7699c3dcdab78fa1b87fbe0e8bc19157 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -3263,27 +3263,25 @@ def aten_linear(mapper, graph, node): # transpose weight mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name) - layer_attrs_transpose = {} - layer_attrs_transpose["perm"] = [1, 0] + layer_inputs["y"] = inputs_name[1] + layer_attrs["transpose_y"] = True graph.add_layer( - "paddle.transpose", - inputs={"x": inputs_name[1]}, - outputs=[inputs_name[1] + "_transpose"], + "paddle.matmul", + inputs=layer_inputs, + outputs=layer_outputs, scope_name=scope_name, - **layer_attrs_transpose) - layer_inputs["weight"] = inputs_name[1] + "_transpose" + **layer_attrs) if len(inputs_name) == 3: mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs, scope_name) - layer_inputs["bias"] = inputs_name[2] + graph.add_layer( + "paddle.add", + inputs={"x": output_name, + "y": inputs_name[2]}, + outputs=layer_outputs, + scope_name=scope_name) current_inputs = list(layer_inputs.values()) - graph.add_layer( - "paddle.nn.functional.linear", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name, - **layer_attrs) return current_inputs, current_outputs @@ -4658,6 +4656,42 @@ def aten_repeat_interleave(mapper, graph, node): return current_inputs, current_outputs +def aten_replication_pad1d(mapper, graph, node): + """ + TorchScript Code: + %58 : Tensor = aten::replication_pad1d(%input.1, %152) + Parameter meaning: + %58 (Tensor): Output Tensor + %input.1 (Tensor): Input Tensor + %%152 (list): Padding size + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("pad", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [op_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # output list + current_outputs = [output_name] + # input list + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["input"] = inputs_name[0] + layer_attrs["padding"] = mapper.attrs[inputs_name[1]] + layer_attrs["mode"] = string("replicate") + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Pad1D", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + + return current_inputs, current_outputs + + def aten_reshape(mapper, graph, node): """ 构造调整大小的PaddleLayer。 TorchScript示例: