未验证 提交 f8910d0c 编写于 作者: jm_12138's avatar jm_12138 提交者: GitHub

fix the gpu_id bug

上级 93cfcb6a
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,31 +18,33 @@ class Model(): ...@@ -18,31 +18,33 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 预训练模型路径 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__") model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__") params = os.path.join(modelpath, "__params__")
# 加载模型参数
config = AnalysisConfig(model, params) config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
config.enable_memory_optim()
config.switch_use_feed_fetch_ops(False) config.switch_use_feed_fetch_ops(False)
config.switch_specify_input_names(True) config.switch_specify_input_names(True)
...@@ -58,7 +60,8 @@ class Model(): ...@@ -58,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
# coding=utf-8
import os import os
from paddlehub import Module from paddlehub import Module
...@@ -22,7 +21,7 @@ class Hand_Pose_Localization(Module): ...@@ -22,7 +21,7 @@ class Hand_Pose_Localization(Module):
self.model_path = os.path.join(self.directory, "hand_pose_localization") self.model_path = os.path.join(self.directory, "hand_pose_localization")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=True)
# 关键点检测函数 # 关键点检测函数
def keypoint_detection( def keypoint_detection(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,30 @@ class Model(): ...@@ -18,24 +18,30 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
config.enable_memory_optim() config.enable_memory_optim()
......
...@@ -21,7 +21,7 @@ class UGATIT_100w(Module): ...@@ -21,7 +21,7 @@ class UGATIT_100w(Module):
self.model_path = os.path.join(self.directory, "UGATIT_100w") self.model_path = os.path.join(self.directory, "UGATIT_100w")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,30 @@ class Model(): ...@@ -18,24 +18,30 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
config.enable_memory_optim() config.enable_memory_optim()
......
...@@ -21,7 +21,7 @@ class UGATIT_83w(Module): ...@@ -21,7 +21,7 @@ class UGATIT_83w(Module):
self.model_path = os.path.join(self.directory, "UGATIT_83w") self.model_path = os.path.join(self.directory, "UGATIT_83w")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,30 @@ class Model(): ...@@ -18,24 +18,30 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
config.enable_memory_optim() config.enable_memory_optim()
......
...@@ -21,7 +21,7 @@ class UGATIT_92w(Module): ...@@ -21,7 +21,7 @@ class UGATIT_92w(Module):
self.model_path = os.path.join(self.directory, "UGATIT_92w") self.model_path = os.path.join(self.directory, "UGATIT_92w")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V1_Hayao_60(Module): ...@@ -21,7 +21,7 @@ class Animegan_V1_Hayao_60(Module):
self.model_path = os.path.join(self.directory, "animegan_v1_hayao_60") self.model_path = os.path.join(self.directory, "animegan_v1_hayao_60")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Hayao_64(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Hayao_64(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_hayao_64") self.model_path = os.path.join(self.directory, "animegan_v2_hayao_64")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Hayao_99(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Hayao_99(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_hayao_99") self.model_path = os.path.join(self.directory, "animegan_v2_hayao_99")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_54(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_54(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_paprika_54") self.model_path = os.path.join(self.directory, "animegan_v2_paprika_54")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_74(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_74(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_paprika_74") self.model_path = os.path.join(self.directory, "animegan_v2_paprika_74")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_97(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_97(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_paprika_97") self.model_path = os.path.join(self.directory, "animegan_v2_paprika_97")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_98(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Paprika_98(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_paprika_98") self.model_path = os.path.join(self.directory, "animegan_v2_paprika_98")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Shinkai_33(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Shinkai_33(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_33") self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_33")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
...@@ -7,9 +7,9 @@ __all__ = ['Model'] ...@@ -7,9 +7,9 @@ __all__ = ['Model']
class Model(): class Model():
# 初始化函数 # 初始化函数
def __init__(self, modelpath, use_gpu): def __init__(self, modelpath, use_gpu=False, use_mkldnn=True, combined=True):
# 加载模型预测器 # 加载模型预测器
self.predictor = self.load_model(modelpath, use_gpu) self.predictor = self.load_model(modelpath, use_gpu, use_mkldnn, combined)
# 获取模型的输入输出 # 获取模型的输入输出
self.input_names = self.predictor.get_input_names() self.input_names = self.predictor.get_input_names()
...@@ -18,24 +18,29 @@ class Model(): ...@@ -18,24 +18,29 @@ class Model():
self.output_tensor = self.predictor.get_output_tensor(self.output_names[0]) self.output_tensor = self.predictor.get_output_tensor(self.output_names[0])
# 模型加载函数 # 模型加载函数
def load_model(self, modelpath, use_gpu): def load_model(self, modelpath, use_gpu, use_mkldnn, combined):
# 对运行位置进行配置 # 对运行位置进行配置
if use_gpu: if use_gpu:
try: try:
places = os.environ["CUDA_VISIBLE_DEVICES"] int(os.environ.get('CUDA_VISIBLE_DEVICES'))
places = int(places[0]) except Exception:
except Exception as e: print('Error! Unable to use GPU. Please set the environment variables "CUDA_VISIBLE_DEVICES=GPU_id" to use GPU.')
print('Error: %s. Please set the environment variables "CUDA_VISIBLE_DEVICES".' % e)
use_gpu = False use_gpu = False
# 加载模型参数 # 加载模型参数
if combined:
model = os.path.join(modelpath, "__model__")
params = os.path.join(modelpath, "__params__")
config = AnalysisConfig(model, params)
else:
config = AnalysisConfig(modelpath) config = AnalysisConfig(modelpath)
# 设置参数 # 设置参数
if use_gpu: if use_gpu:
config.enable_use_gpu(100, places) config.enable_use_gpu(100, 0)
else: else:
config.disable_gpu() config.disable_gpu()
if use_mkldnn:
config.enable_mkldnn() config.enable_mkldnn()
config.disable_glog_info() config.disable_glog_info()
config.switch_ir_optim(True) config.switch_ir_optim(True)
...@@ -55,7 +60,8 @@ class Model(): ...@@ -55,7 +60,8 @@ class Model():
# 遍历输入数据进行预测 # 遍历输入数据进行预测
for input_data in input_datas: for input_data in input_datas:
self.input_tensor.copy_from_cpu(input_data) inputs = input_data.copy()
self.input_tensor.copy_from_cpu(inputs)
self.predictor.zero_copy_run() self.predictor.zero_copy_run()
output = self.output_tensor.copy_to_cpu() output = self.output_tensor.copy_to_cpu()
outputs.append(output) outputs.append(output)
......
...@@ -21,7 +21,7 @@ class Animegan_V2_Shinkai_53(Module): ...@@ -21,7 +21,7 @@ class Animegan_V2_Shinkai_53(Module):
self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_53") self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_53")
# 加载模型 # 加载模型
self.model = Model(self.model_path, use_gpu) self.model = Model(self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数 # 关键点检测函数
def style_transfer( def style_transfer(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册