From f09d1f730b71a9b220cceec14a765c24cd9c1d6f Mon Sep 17 00:00:00 2001 From: dyning Date: Thu, 9 Jul 2020 20:34:42 +0800 Subject: [PATCH] add hub serving --- deploy/ocr_hubserving/ocr_det/__init__.py | 0 deploy/ocr_hubserving/ocr_det/config.json | 14 ++ deploy/ocr_hubserving/ocr_det/module.py | 160 +++++++++++++++ deploy/ocr_hubserving/ocr_rec/__init__.py | 0 deploy/ocr_hubserving/ocr_rec/config.json | 13 ++ deploy/ocr_hubserving/ocr_rec/module.py | 136 +++++++++++++ deploy/ocr_hubserving/ocr_system/__init__.py | 0 deploy/ocr_hubserving/ocr_system/config.json | 16 ++ deploy/ocr_hubserving/ocr_system/module.py | 201 +++++++++++++++++++ doc/doc_ch/hubserving.md | 106 ++++++++++ tools/infer/utility.py | 19 ++ tools/test_hubserving.py | 25 +++ 12 files changed, 690 insertions(+) create mode 100644 deploy/ocr_hubserving/ocr_det/__init__.py create mode 100644 deploy/ocr_hubserving/ocr_det/config.json create mode 100644 deploy/ocr_hubserving/ocr_det/module.py create mode 100644 deploy/ocr_hubserving/ocr_rec/__init__.py create mode 100644 deploy/ocr_hubserving/ocr_rec/config.json create mode 100644 deploy/ocr_hubserving/ocr_rec/module.py create mode 100644 deploy/ocr_hubserving/ocr_system/__init__.py create mode 100644 deploy/ocr_hubserving/ocr_system/config.json create mode 100644 deploy/ocr_hubserving/ocr_system/module.py create mode 100644 doc/doc_ch/hubserving.md create mode 100644 tools/test_hubserving.py diff --git a/deploy/ocr_hubserving/ocr_det/__init__.py b/deploy/ocr_hubserving/ocr_det/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deploy/ocr_hubserving/ocr_det/config.json b/deploy/ocr_hubserving/ocr_det/config.json new file mode 100644 index 00000000..f995d0ed --- /dev/null +++ b/deploy/ocr_hubserving/ocr_det/config.json @@ -0,0 +1,14 @@ +{ + "modules_info": { + "ocr_det": { + "init_args": { + "version": "1.0.0", + "det_model_dir": "./inference/ch_det_mv3_db/", + "use_gpu": true + }, + "predict_args": { + "visualization": false + } + } + } +} diff --git a/deploy/ocr_hubserving/ocr_det/module.py b/deploy/ocr_hubserving/ocr_det/module.py new file mode 100644 index 00000000..0ee32d38 --- /dev/null +++ b/deploy/ocr_hubserving/ocr_det/module.py @@ -0,0 +1,160 @@ +# -*- coding:utf-8 -*- +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import ast +import copy +import math +import os +import time + +from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor +from paddlehub.common.logger import logger +from paddlehub.module.module import moduleinfo, runnable, serving +from PIL import Image +import cv2 +import numpy as np +import paddle.fluid as fluid +import paddlehub as hub + +from tools.infer.utility import draw_boxes, base64_to_cv2 +from tools.infer.predict_det import TextDetector + +class Config(object): + pass + +@moduleinfo( + name="ocr_det", + version="1.0.0", + summary="ocr detection service", + author="paddle-dev", + author_email="paddle-dev@baidu.com", + type="cv/text_recognition") +class OCRDet(hub.Module): + def _initialize(self, + det_model_dir="", + det_algorithm="DB", + use_gpu=False + ): + """ + initialize with the necessary elements + """ + self.config = Config() + self.config.use_gpu = use_gpu + if use_gpu: + try: + _places = os.environ["CUDA_VISIBLE_DEVICES"] + int(_places[0]) + print("use gpu: ", use_gpu) + print("CUDA_VISIBLE_DEVICES: ", _places) + except: + raise RuntimeError( + "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id." + ) + self.config.ir_optim = True + self.config.gpu_mem = 8000 + + #params for text detector + self.config.det_algorithm = det_algorithm + self.config.det_model_dir = det_model_dir + # self.config.det_model_dir = "./inference/det/" + + #DB parmas + self.config.det_db_thresh =0.3 + self.config.det_db_box_thresh =0.5 + self.config.det_db_unclip_ratio =2.0 + + #EAST parmas + self.config.det_east_score_thresh = 0.8 + self.config.det_east_cover_thresh = 0.1 + self.config.det_east_nms_thresh = 0.2 + + def read_images(self, paths=[]): + images = [] + for img_path in paths: + assert os.path.isfile( + img_path), "The {} isn't a valid file.".format(img_path) + img = cv2.imread(img_path) + if img is None: + logger.info("error in loading image:{}".format(img_path)) + continue + images.append(img) + return images + + def det_text(self, + images=[], + paths=[], + det_max_side_len=960, + draw_img_save='ocr_det_result', + visualization=False): + """ + Get the text box in the predicted images. + Args: + images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths + paths (list[str]): The paths of images. If paths not images + use_gpu (bool): Whether to use gpu. Default false. + output_dir (str): The directory to store output images. + visualization (bool): Whether to save image or not. + box_thresh(float): the threshold of the detected text box's confidence + Returns: + res (list): The result of text detection box and save path of images. + """ + + if images != [] and isinstance(images, list) and paths == []: + predicted_data = images + elif images == [] and isinstance(paths, list) and paths != []: + predicted_data = self.read_images(paths) + else: + raise TypeError("The input data is inconsistent with expectations.") + + assert predicted_data != [], "There is not any image to be predicted. Please check the input data." + + self.config.det_max_side_len = det_max_side_len + text_detector = TextDetector(self.config) + all_results = [] + for img in predicted_data: + result = {'save_path': ''} + if img is None: + logger.info("error in loading image") + result['data'] = [] + all_results.append(result) + continue + dt_boxes, elapse = text_detector(img) + print("Predict time : ", elapse) + result['data'] = dt_boxes.astype(np.int).tolist() + + if visualization: + image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + draw_img = draw_boxes(image, dt_boxes) + draw_img = np.array(draw_img) + if not os.path.exists(draw_img_save): + os.makedirs(draw_img_save) + saved_name = 'ndarray_{}.jpg'.format(time.time()) + save_file_path = os.path.join(draw_img_save, saved_name) + cv2.imwrite(save_file_path, draw_img[:, :, ::-1]) + print("The visualized image saved in {}".format(save_file_path)) + result['save_path'] = save_file_path + + all_results.append(result) + return all_results + + @serving + def serving_method(self, images, **kwargs): + """ + Run as a service. + """ + images_decode = [base64_to_cv2(image) for image in images] + results = self.det_text(images_decode, **kwargs) + return results + + +if __name__ == '__main__': + ocr = OCRDet() + image_path = [ + './doc/imgs/11.jpg', + './doc/imgs/12.jpg', + ] + res = ocr.det_text(paths=image_path, visualization=True) + print(res) \ No newline at end of file diff --git a/deploy/ocr_hubserving/ocr_rec/__init__.py b/deploy/ocr_hubserving/ocr_rec/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deploy/ocr_hubserving/ocr_rec/config.json b/deploy/ocr_hubserving/ocr_rec/config.json new file mode 100644 index 00000000..2cfbc0b5 --- /dev/null +++ b/deploy/ocr_hubserving/ocr_rec/config.json @@ -0,0 +1,13 @@ +{ + "modules_info": { + "ocr_rec": { + "init_args": { + "version": "1.0.0", + "det_model_dir": "./inference/ch_rec_mv3_crnn/", + "use_gpu": true + }, + "predict_args": { + } + } + } +} diff --git a/deploy/ocr_hubserving/ocr_rec/module.py b/deploy/ocr_hubserving/ocr_rec/module.py new file mode 100644 index 00000000..b50016a3 --- /dev/null +++ b/deploy/ocr_hubserving/ocr_rec/module.py @@ -0,0 +1,136 @@ +# -*- coding:utf-8 -*- +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import ast +import copy +import math +import os +import time + +from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor +from paddlehub.common.logger import logger +from paddlehub.module.module import moduleinfo, runnable, serving +from PIL import Image +import cv2 +import numpy as np +import paddle.fluid as fluid +import paddlehub as hub + +from tools.infer.utility import base64_to_cv2 +from tools.infer.predict_rec import TextRecognizer + +class Config(object): + pass + +@moduleinfo( + name="ocr_rec", + version="1.0.0", + summary="ocr recognition service", + author="paddle-dev", + author_email="paddle-dev@baidu.com", + type="cv/text_recognition") +class OCRRec(hub.Module): + def _initialize(self, + rec_model_dir="", + rec_algorithm="CRNN", + rec_char_dict_path="./ppocr/utils/ppocr_keys_v1.txt", + rec_batch_num=30, + use_gpu=False + ): + """ + initialize with the necessary elements + """ + self.config = Config() + self.config.use_gpu = use_gpu + if use_gpu: + try: + _places = os.environ["CUDA_VISIBLE_DEVICES"] + int(_places[0]) + print("use gpu: ", use_gpu) + print("CUDA_VISIBLE_DEVICES: ", _places) + except: + raise RuntimeError( + "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id." + ) + self.config.ir_optim = True + self.config.gpu_mem = 8000 + + #params for text recognizer + self.config.rec_algorithm = rec_algorithm + self.config.rec_model_dir = rec_model_dir + # self.config.rec_model_dir = "./inference/rec/" + + self.config.rec_image_shape = "3, 32, 320" + self.config.rec_char_type = 'ch' + self.config.rec_batch_num = rec_batch_num + self.config.rec_char_dict_path = rec_char_dict_path + self.config.use_space_char = True + + def read_images(self, paths=[]): + images = [] + for img_path in paths: + assert os.path.isfile( + img_path), "The {} isn't a valid file.".format(img_path) + img = cv2.imread(img_path) + if img is None: + logger.info("error in loading image:{}".format(img_path)) + continue + images.append(img) + return images + + def rec_text(self, + images=[], + paths=[]): + """ + Get the text box in the predicted images. + Args: + images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths + paths (list[str]): The paths of images. If paths not images + Returns: + res (list): The result of text detection box and save path of images. + """ + + if images != [] and isinstance(images, list) and paths == []: + predicted_data = images + elif images == [] and isinstance(paths, list) and paths != []: + predicted_data = self.read_images(paths) + else: + raise TypeError("The input data is inconsistent with expectations.") + + assert predicted_data != [], "There is not any image to be predicted. Please check the input data." + + text_recognizer = TextRecognizer(self.config) + img_list = [] + for img in predicted_data: + if img is None: + continue + img_list.append(img) + try: + rec_res, predict_time = text_recognizer(img_list) + except Exception as e: + print(e) + return [] + return rec_res + + @serving + def serving_method(self, images, **kwargs): + """ + Run as a service. + """ + images_decode = [base64_to_cv2(image) for image in images] + results = self.det_text(images_decode, **kwargs) + return results + + +if __name__ == '__main__': + ocr = OCRRec() + image_path = [ + './doc/imgs_words/ch/word_1.jpg', + './doc/imgs_words/ch/word_2.jpg', + './doc/imgs_words/ch/word_3.jpg', + ] + res = ocr.rec_text(paths=image_path) + print(res) \ No newline at end of file diff --git a/deploy/ocr_hubserving/ocr_system/__init__.py b/deploy/ocr_hubserving/ocr_system/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deploy/ocr_hubserving/ocr_system/config.json b/deploy/ocr_hubserving/ocr_system/config.json new file mode 100644 index 00000000..364c7426 --- /dev/null +++ b/deploy/ocr_hubserving/ocr_system/config.json @@ -0,0 +1,16 @@ +{ + "modules_info": { + "ocr_system": { + "init_args": { + "version": "1.0.0", + "det_model_dir": "./inference/ch_det_mv3_db/", + "rec_model_dir": "./inference/ch_rec_mv3_crnn/", + "use_gpu": true + }, + "predict_args": { + "visualization": false + } + } + } +} + diff --git a/deploy/ocr_hubserving/ocr_system/module.py b/deploy/ocr_hubserving/ocr_system/module.py new file mode 100644 index 00000000..dc5ab211 --- /dev/null +++ b/deploy/ocr_hubserving/ocr_system/module.py @@ -0,0 +1,201 @@ +# -*- coding:utf-8 -*- +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import ast +import copy +import math +import os +import time + +from paddle.fluid.core import AnalysisConfig, create_paddle_predictor, PaddleTensor +from paddlehub.common.logger import logger +from paddlehub.module.module import moduleinfo, runnable, serving +from PIL import Image +import cv2 +import numpy as np +import paddle.fluid as fluid +import paddlehub as hub + +from tools.infer.utility import draw_ocr, base64_to_cv2 +from tools.infer.predict_system import TextSystem + + +class Config(object): + pass + +@moduleinfo( + name="ocr_system", + version="1.0.0", + summary="ocr system service", + author="paddle-dev", + author_email="paddle-dev@baidu.com", + type="cv/text_recognition") +class OCRSystem(hub.Module): + def _initialize(self, + det_model_dir="", + det_algorithm="DB", + rec_model_dir="", + rec_algorithm="CRNN", + rec_char_dict_path="./ppocr/utils/ppocr_keys_v1.txt", + rec_batch_num=30, + use_gpu=False + ): + """ + initialize with the necessary elements + """ + self.config = Config() + self.config.use_gpu = use_gpu + if use_gpu: + try: + _places = os.environ["CUDA_VISIBLE_DEVICES"] + int(_places[0]) + print("use gpu: ", use_gpu) + print("CUDA_VISIBLE_DEVICES: ", _places) + except: + raise RuntimeError( + "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES via export CUDA_VISIBLE_DEVICES=cuda_device_id." + ) + self.config.ir_optim = True + self.config.gpu_mem = 8000 + + #params for text detector + self.config.det_algorithm = det_algorithm + self.config.det_model_dir = det_model_dir + # self.config.det_model_dir = "./inference/det/" + + #DB parmas + self.config.det_db_thresh =0.3 + self.config.det_db_box_thresh =0.5 + self.config.det_db_unclip_ratio =2.0 + + #EAST parmas + self.config.det_east_score_thresh = 0.8 + self.config.det_east_cover_thresh = 0.1 + self.config.det_east_nms_thresh = 0.2 + + #params for text recognizer + self.config.rec_algorithm = rec_algorithm + self.config.rec_model_dir = rec_model_dir + # self.config.rec_model_dir = "./inference/rec/" + + self.config.rec_image_shape = "3, 32, 320" + self.config.rec_char_type = 'ch' + self.config.rec_batch_num = rec_batch_num + self.config.rec_char_dict_path = rec_char_dict_path + self.config.use_space_char = True + + def read_images(self, paths=[]): + images = [] + for img_path in paths: + assert os.path.isfile( + img_path), "The {} isn't a valid file.".format(img_path) + img = cv2.imread(img_path) + if img is None: + logger.info("error in loading image:{}".format(img_path)) + continue + images.append(img) + return images + + def recognize_text(self, + images=[], + paths=[], + det_max_side_len=960, + draw_img_save='ocr_result', + visualization=False, + text_thresh=0.5): + """ + Get the chinese texts in the predicted images. + Args: + images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths + paths (list[str]): The paths of images. If paths not images + use_gpu (bool): Whether to use gpu. + batch_size(int): the program deals once with one + output_dir (str): The directory to store output images. + visualization (bool): Whether to save image or not. + box_thresh(float): the threshold of the detected text box's confidence + text_thresh(float): the threshold of the recognize chinese texts' confidence + Returns: + res (list): The result of chinese texts and save path of images. + """ + + if images != [] and isinstance(images, list) and paths == []: + predicted_data = images + elif images == [] and isinstance(paths, list) and paths != []: + predicted_data = self.read_images(paths) + else: + raise TypeError("The input data is inconsistent with expectations.") + + assert predicted_data != [], "There is not any image to be predicted. Please check the input data." + + self.config.det_max_side_len = det_max_side_len + text_sys = TextSystem(self.config) + cnt = 0 + all_results = [] + for img in predicted_data: + result = {'save_path': ''} + if img is None: + logger.info("error in loading image") + result['data'] = [] + all_results.append(result) + continue + starttime = time.time() + dt_boxes, rec_res = text_sys(img) + elapse = time.time() - starttime + cnt += 1 + print("Predict time of image %d: %.3fs" % (cnt, elapse)) + dt_num = len(dt_boxes) + rec_res_final = [] + for dno in range(dt_num): + text, score = rec_res[dno] + # if the recognized text confidence score is lower than text_thresh, then drop it + if score >= text_thresh: + # text_str = "%s, %.3f" % (text, score) + # print(text_str) + rec_res_final.append( + { + 'text': text, + 'confidence': float(score), + 'text_box_position': dt_boxes[dno].astype(np.int).tolist() + } + ) + result['data'] = rec_res_final + + if visualization: + image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + boxes = dt_boxes + txts = [rec_res[i][0] for i in range(len(rec_res))] + scores = [rec_res[i][1] for i in range(len(rec_res))] + + draw_img = draw_ocr(image, boxes, txts, scores, draw_txt=True, drop_score=0.5) + if not os.path.exists(draw_img_save): + os.makedirs(draw_img_save) + saved_name = 'ndarray_{}.jpg'.format(time.time()) + save_file_path = os.path.join(draw_img_save, saved_name) + cv2.imwrite(save_file_path, draw_img[:, :, ::-1]) + print("The visualized image saved in {}".format(save_file_path)) + result['save_path'] = save_file_path + + all_results.append(result) + return all_results + + @serving + def serving_method(self, images, **kwargs): + """ + Run as a service. + """ + images_decode = [base64_to_cv2(image) for image in images] + results = self.recognize_text(images_decode, **kwargs) + return results + + +if __name__ == '__main__': + ocr = OCRSystem() + image_path = [ + './doc/imgs/11.jpg', + './doc/imgs/12.jpg', + ] + res = ocr.recognize_text(paths=image_path, visualization=True) + print(res) \ No newline at end of file diff --git a/doc/doc_ch/hubserving.md b/doc/doc_ch/hubserving.md new file mode 100644 index 00000000..d39cc178 --- /dev/null +++ b/doc/doc_ch/hubserving.md @@ -0,0 +1,106 @@ +# 服务部署 + +PaddleOCR提供2种服务部署方式: +- 基于HubServing的部署:已集成到PaddleOCR中([code](https://github.com/PaddlePaddle/PaddleOCR/tree/develop/deploy/hubserving)),按照本教程使用; +- 基于PaddleServing的部署:详见PaddleServing官网[demo](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/ocr),后续也将集成到PaddleOCR。 + +服务部署目录下包括检测、识别、2阶段串联三种服务包,根据需求选择相应的服务包进行安装和启动。目录如下: +``` +deploy/hubserving/ + └─ ocr_det 检测模块服务包 + └─ ocr_rec 识别模块服务包 + └─ ocr_system 检测+识别串联服务包 +``` + +每个服务包下包含3个文件。以2阶段串联服务包为例,目录如下: +``` +deploy/hubserving/ocr_system/ + └─ __init__.py 空文件 + └─ config.json 配置文件,启动服务时作为参数传入 + └─ module.py 主模块,包含服务的完整逻辑 +``` + +## 启动服务 +以下步骤以检测+识别2阶段串联服务为例,如果只需要检测服务或识别服务,替换相应文件路径即可。 +### 1. 安装paddlehub +```pip3 install paddlehub --upgrade -i https://pypi.tuna.tsinghua.edu.cn/simple``` + +### 2. 安装服务模块 +PaddleOCR提供3种服务模块,根据需要安装所需模块。如: +安装检测服务模块: +```hub install deploy/hubserving/ocr_det/``` +或,安装识别服务模块: +```hub install deploy/hubserving/ocr_rec/``` +或,安装检测+识别串联服务模块: +```hub install deploy/hubserving/ocr_system/``` + +### 3. 修改配置文件 +在config.json中指定模型路径、是否使用GPU、是否对结果做可视化等参数,如,串联服务ocr_system的配置: +```python +{ + "modules_info": { + "ocr_system": { + "init_args": { + "version": "1.0.0", + "det_model_dir": "./inference/det/", + "rec_model_dir": "./inference/rec/", + "use_gpu": true + }, + "predict_args": { + "visualization": false + } + } + } +} +``` +其中,模型路径对应的模型为```inference模型```。 + +### 4. 运行启动命令 +```hub serving start -m ocr_system --config hubserving/ocr_det/config.json``` + +这样就完成了一个服务化API的部署,默认端口号为8866。 + +**NOTE:** 如使用GPU预测(即,config中use_gpu置为true),则需要在启动服务之前,设置CUDA_VISIBLE_DEVICES环境变量,如:```export CUDA_VISIBLE_DEVICES=0```,否则不用设置。 + +## 发送预测请求 +配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果: + +```python +import requests +import json +import cv2 +import base64 + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +# 发送HTTP请求 +data = {'images':[cv2_to_base64(open("./doc/imgs/11.jpg", 'rb').read())]} +headers = {"Content-type": "application/json"} +# url = "http://127.0.0.1:8866/predict/ocr_det" +# url = "http://127.0.0.1:8866/predict/ocr_rec" +url = "http://127.0.0.1:8866/predict/ocr_system" +r = requests.post(url=url, headers=headers, data=json.dumps(data)) + +# 打印预测结果 +print(r.json()["results"]) +``` + +你可能需要根据实际情况修改```url```字符串中的端口号和服务模块名称。 + +上面所示代码都已写入测试脚本,可直接运行命令:```python tools/test_hubserving.py``` + +## 自定义修改服务模块 +如果需要修改服务逻辑,你一般需要操作以下步骤: + +1、 停止服务 +```hub serving stop -m ocr_system``` + +2、 到相应的module.py文件中根据实际需求修改代码 + +3、 卸载旧服务包 +```hub uninstall ocr_system``` + +4、 安装修改后的新服务包 +```hub install deploy/hubserving/ocr_system/``` + diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 88a84fae..2d98ae70 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -302,6 +302,25 @@ def text_visual(texts, scores, img_h=400, img_w=600, threshold=0.): return np.array(blank_img) +def base64_to_cv2(b64str): + import base64 + data = base64.b64decode(b64str.encode('utf8')) + data = np.fromstring(data, np.uint8) + data = cv2.imdecode(data, cv2.IMREAD_COLOR) + return data + + +def draw_boxes(image, boxes, scores=None, drop_score=0.5): + if scores is None: + scores = [1] * len(boxes) + for (box, score) in zip(boxes, scores): + if score < drop_score: + continue + box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) + image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) + return image + + if __name__ == '__main__': test_img = "./doc/test_v2" predict_txt = "./doc/predict.txt" diff --git a/tools/test_hubserving.py b/tools/test_hubserving.py new file mode 100644 index 00000000..edf6ec8c --- /dev/null +++ b/tools/test_hubserving.py @@ -0,0 +1,25 @@ +#!usr/bin/python +# -*- coding: utf-8 -*- + +import requests +import json +import cv2 +import base64 +import time + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +start = time.time() +# 发送HTTP请求 +data = {'images':[cv2_to_base64(open("./doc/imgs/11.jpg", 'rb').read())]} +headers = {"Content-type": "application/json"} +# url = "http://127.0.0.1:8866/predict/ocr_det" +# url = "http://127.0.0.1:8866/predict/ocr_rec" +url = "http://127.0.0.1:8866/predict/ocr_system" +r = requests.post(url=url, headers=headers, data=json.dumps(data)) +end = time.time() + +# 打印预测结果 +print(r.json()["results"]) +print("time cost: ", end - start) -- GitLab