提交 dafd68b7 编写于 作者: C channingss

fix bug in pool op for tiny_volov2

上级 ae31abd2
......@@ -181,6 +181,7 @@ class ONNXGraph(Graph):
layer,
layer_name=layer.name,
is_global_input=is_place_holder)
#set data node's weight
for name, weight in self.graph_weights(self.model):
if name in self.node_map:
......@@ -405,8 +406,7 @@ class ONNXDecoder(object):
for item in graph.value_info:
item.name = self.make_variable_name(item.name)
for node in graph.node:
if node.name == '':
node.name = node.output[0]
node.name = node.output[0]
node.name = self.make_variable_name(node.name)
for i in range(len(node.input)):
node.input[i] = self.make_variable_name(node.input[i])
......
......@@ -45,7 +45,9 @@ default_op_mapping = {
'reduce_mean', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
]
],
'LeakyRelu': ['leaky_relu', ['X'], ['Out'],
dict(), dict(alpha=.01)]
}
default_ioa_constraint = {
......
......@@ -36,6 +36,14 @@ def _const_weight_or_none(node):
return None
def get_same_padding(in_size, kernel_size, stride):
new_size = int(math.ceil(in_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + kernel_size - in_size
pad0 = int(pad_size / 2)
pad1 = pad_size - pad0
return [pad0, pad1]
class ONNXOpMapper(OpMapper):
def __init__(self, decoder):
super(ONNXOpMapper, self).__init__()
......@@ -397,9 +405,8 @@ class ONNXOpMapper(OpMapper):
def AveragePool(self, node):
val_x = self.graph.get_node(node.layer.input[0], copy=True)
assert node.get_attr(
'auto_pad',
'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET is supported'
auto_pad = node.get_attr('auto_pad', 'NOTSET')
kernel_shape = node.get_attr("kernel_shape")
poolnd = len(kernel_shape)
strides = node.get_attr("strides")
......@@ -408,8 +415,16 @@ class ONNXOpMapper(OpMapper):
pads = node.get_attr('pads', [0] * (poolnd * 2))
fluid_op = 'pool{}d'.format(poolnd)
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
paddings, val_x = self._pad_if_asymmetric(node, pads, val_x)
input_shape = val_x.out_shapes
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_UPPER":
pad_h = get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
attr = {
"pool_size": kernel_shape,
"pool_type": string('avg'),
......@@ -621,10 +636,8 @@ class ONNXOpMapper(OpMapper):
def MaxPool(self, node):
val_x = self.graph.get_node(node.layer.input[0], copy=True)
assert node.get_attr(
'auto_pad', 'NOTSET'
) == 'NOTSET', 'only auto_pad = NOTSET is supported' # optional
auto_pad = node.get_attr('auto_pad', 'NOTSET')
assert node.get_attr(
"dilations") is None, 'only dilations = 0 is supported' # optional
......@@ -637,6 +650,15 @@ class ONNXOpMapper(OpMapper):
fluid_op = 'pool{}d'.format(poolnd)
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
paddings, val_x = self._pad_if_asymmetric(node, pads, val_x)
input_shape = val_x.out_shapes
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_UPPER":
pad_h = get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
attr = {"paddings": pad_h + pad_w, "pad_value": 0.0}
attr = {
"pool_size": kernel_shape,
"pool_type": string("max"),
......
......@@ -42,17 +42,21 @@
| Alexnet | [torchvison.model.alexnet](https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py) |9|
| Shufflenet | [onnx official](https://github.com/onnx/models/tree/master/vision/classification/shufflenet) |9|
| Inception_v2 | [onnx official](https://github.com/onnx/models/tree/master/vision/classification/inception_and_googlenet/inception_v2) |9|
| Mobilenet_v2 | [pytorch(personal practice)](https://github.com/tonylins/pytorch-mobilenet-v2) |9|
目前onnx2paddle主要支持onnx operator version 9,关于如何使用torchvison的model:
目前onnx2paddle主要支持onnx operator version 9;
如何将torchvison或者个人开发者写的pytroch model转换成onnx model:
```
import torch
import torchvision
#根据不同模型调整输入的shape
dummy_input = torch.randn(1, 3, 224, 224)
#预训练后的pytorch model
resnet18 = torchvision.models.resnet18(pretrained=True)
#"resnet18.onnx"为onnx model的存储路径
#"resnet18.onnx"为onnx model的存储路径,1.1
torch.onnx.export(resnet18, dummy_input, "resnet18.onnx",verbose=True)
```
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册