提交 b7baa928 编写于 作者: S SunAhong1993

fix the command

上级 3822ac80
......@@ -63,7 +63,6 @@ x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model --paddle_t
|--model | 当framework为tensorflow/onnx时,该参数指定tensorflow的pb模型文件或onnx模型路径 |
|--caffe_proto | **[可选]** 由caffe.proto编译成caffe_pb2.py文件的存放路径,当存在自定义Layer时使用,默认为None |
|--define_input_shape | **[可选]** For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见[文档Q2](./docs/user_guides/FAQ.md) |
|--params_merge | **[可选]** 当指定该参数时,转换完成后,inference_model中的所有模型参数将合并保存为一个文件__params__ |
|--paddle_type | **[可选]** 该参数指定转换为动态图代码(dygraph)或者静态图代码(static),默认为dygraph|
......
# X2Paddle支持OP列表
> 目前X2Paddle支持70+的TensorFlow OP,30+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
> 目前X2Paddle支持80+的TensorFlow OP,30+的Caffe Layer,60+的ONNX OP,110+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
**注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
......@@ -28,7 +28,7 @@
| 73 | Greater | 74 | FloorMod | 75 | LogicalAdd | 76 | Prod |
| 77 | Equal | 78 | Conv3D | 79 | Ceil | 80 | AddN |
| 81 | DivNoNan | 82 | Where | 83 | MirrorPad | 84 | Size |
| 85 | TopKv2 | | | | | | |
| 85 | TopKv2 | 86 | SplitV | | | | |
## Caffe
......@@ -42,7 +42,7 @@
| 21 | Axpy | 22 | ROIPolling | 23 | Permute | 24 | DetectionOutput |
| 25 | Normalize | 26 | Select | 27 | ShuffleChannel | 28 | ConvolutionDepthwise |
| 29 | ReLU | 30 | AbsVal | 31 | Sigmoid | 32 | TanH |
| 33 | ReLU6 | 34 | Upsample |
| 33 | ReLU6 | 34 | Upsample | | | | |
## ONNX
......@@ -61,7 +61,11 @@
| 41 | MatMul | 42 | Sum | 43 | Transpose | 44 | BatchNormalization |
| 45 | Squeeze | 46 | Equal | 47 | Identity | 48 | GlobalAveragePool |
| 49 | MaxPool | 50 | Conv | 51 | Gemm | 52 | NonZero |
| 53 | Abs | 54 | Floor | 52 | ArgMax |
| 53 | Abs | 54 | Floor | 56 | ArgMax | 57 | Sign |
| 58 | Reciprocal | 59 | Size | 60 | OneHot | 61 | ReduceProd |
| 62 | LogSoftmax | 63 | LSTM | | | | |
## PyTorch
Aten:
......@@ -95,6 +99,7 @@ Aten:
| 101 | aten::upsample\_bilinear2d | 102 | aten::values |103|aten::view|104|aten::warn|
| 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm|
| 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather|
| 113 | aten::upsample\_nearest2d || |||||
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
......
......@@ -70,12 +70,6 @@ def arg_parser():
action="store_true",
default=False,
help="define input shape for tf model")
parser.add_argument(
"--params_merge",
"-pm",
action="store_true",
default=False,
help="define whether merge the params")
parser.add_argument(
"--paddle_type",
"-pt",
......@@ -83,22 +77,14 @@ def arg_parser():
default="dygraph",
help="define the paddle model type after converting(dygraph/static)"
)
parser.add_argument(
"--without_data_format_optimization",
"-wo",
type=_text_type,
default="True",
help="tf model conversion without data format optimization")
return parser
def tf2paddle(model_path,
save_dir,
without_data_format_optimization=False,
define_input_shape=False,
paddle_type="dygraph",
params_merge=False):
paddle_type="dygraph"):
# check tensorflow installation and version
try:
import os
......@@ -139,8 +125,7 @@ def tf2paddle(model_path,
def caffe2paddle(proto, weight, save_dir, caffe_proto,
paddle_type, params_merge=False):
def caffe2paddle(proto, weight, save_dir, caffe_proto, paddle_type):
from x2paddle.decoder.caffe_decoder import CaffeDecoder
if paddle_type == "dygraph":
from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper
......@@ -165,7 +150,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto,
mapper.paddle_graph.gen_model(save_dir)
def onnx2paddle(model_path, save_dir, paddle_type, params_merge=False):
def onnx2paddle(model_path, save_dir, paddle_type):
# check onnx installation and version
try:
import onnx
......@@ -259,33 +244,19 @@ def main():
if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model"
assert args.without_data_format_optimization in [
"True", "False"
], "--the param without_data_format_optimization should be defined True or False"
define_input_shape = False
params_merge = False
without_data_format_optimization = True if args.without_data_format_optimization == "True" else False
if args.define_input_shape:
define_input_shape = True
if args.params_merge:
params_merge = True
tf2paddle(args.model, args.save_dir, without_data_format_optimization,
define_input_shape, args.paddle_type, params_merge)
tf2paddle(args.model, args.save_dir,
define_input_shape, args.paddle_type)
elif args.framework == "caffe":
assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model"
params_merge = False
if args.params_merge:
params_merge = True
caffe2paddle(args.prototxt, args.weight, args.save_dir,
args.caffe_proto, args.paddle_type, params_merge)
args.caffe_proto, args.paddle_type)
elif args.framework == "onnx":
assert args.model is not None, "--model should be defined while translating onnx model"
params_merge = False
if args.params_merge:
params_merge = True
onnx2paddle(args.model, args.save_dir, args.paddle_type, params_merge)
onnx2paddle(args.model, args.save_dir, args.paddle_type)
elif args.framework == "paddle2onnx":
print("Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx")
......
......@@ -15,7 +15,6 @@
from __future__ import print_function
from __future__ import division
import paddle.fluid as fluid
import paddle
from paddle.fluid.proto import framework_pb2
import collections
......@@ -258,22 +257,17 @@ class PaddleGraph(object):
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_program)
param_dir = osp.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = osp.exists(
osp.join(osp.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
paddle.static.load(main_program, param_dir, exe)
paddle.static.save_inference_model(
path_prefix=osp.join(infer_dir, "model"),
feed_vars=[i for i in inputs],
fetch_vars=outputs,
executor=exe)
def gen_dygraph_model(self, save_dir, jit_type=None):
......@@ -562,8 +556,7 @@ class PaddleGraph(object):
remove_default_attrs(layer.kernel, layer.attrs)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel
) or layer.kernel == "paddle.to_tensor" or \
layer.kernel.startswith("custom_layer") or \
layer.kernel.startswith("paddle.fluid.dygraph"):
layer.kernel.startswith("custom_layer"):
line = "{}".format(
layer.outputs[0]
) if layer.kernel == "paddle.to_tensor" and not layer.attrs[
......@@ -660,7 +653,6 @@ class PaddleGraph(object):
paddle.save(self.parameters, save_path)
def dygraph2static(self, save_dir, input_shapes=[], input_types=[]):
from paddle.fluid.dygraph.jit import declarative
sepc_list = list()
for i, name in enumerate(self.inputs):
sepc_list.append(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册