提交 5244790e 编写于 作者: W wjj19950828

Add activate ops

上级 703e2c98
...@@ -114,7 +114,7 @@ Aten: ...@@ -114,7 +114,7 @@ Aten:
| 117 | aten::bitwise\_not | 118 | aten::bitwise\_xor | 119 | aten::bitwise\_and | 120 | aten::silu | | 117 | aten::bitwise\_not | 118 | aten::bitwise\_xor | 119 | aten::bitwise\_and | 120 | aten::silu |
| 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format |
| 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn |
| 129 | aten::fft\_irfftn | | | | | | | | 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | | |
Prim: Prim:
......
...@@ -2743,6 +2743,68 @@ def aten_hardtanh(mapper, graph, node): ...@@ -2743,6 +2743,68 @@ def aten_hardtanh(mapper, graph, node):
return current_inputs, current_outputs return current_inputs, current_outputs
def aten_hardsigmoid(mapper, graph, node):
"""
TorchScript Code:
%55 : Tensor = aten::hardsigmoid(%54)
Parameter meaning:
%55 (Tensor): output
%54 (Tensor): input tensor
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("hardsigmoid", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# outputs list
current_outputs = [output_name]
# inputs list
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.Hardsigmoid",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_hardswish(mapper, graph, node):
"""
TorchScript Code:
%55 : Tensor = aten::hardswish(%54)
Parameter meaning:
%55 (Tensor): output
%54 (Tensor): input tensor
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("hardswish", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# outputs list
current_outputs = [output_name]
# inputs list
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.Hardswish",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_index(mapper, graph, node): def aten_index(mapper, graph, node):
""" 构造选择元素的PaddleLayer。 """ 构造选择元素的PaddleLayer。
TorchScript示例: TorchScript示例:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册