提交 63328917 编写于 作者: W wenlihaoyu

优化代码,替换cpython\python\gpunms 为opencv dnn.nms;

上级 9e6f83cb
文件已添加
...@@ -4,11 +4,14 @@ ...@@ -4,11 +4,14 @@
- [x] 文字方向检测 0、90、180、270度检测(支持dnn/tensorflow) - [x] 文字方向检测 0、90、180、270度检测(支持dnn/tensorflow)
- [x] 支持(darknet/opencv dnn /keras)文字检测,支持darknet/keras训练 - [x] 支持(darknet/opencv dnn /keras)文字检测,支持darknet/keras训练
- [x] 不定长OCR训练(英文、中英文) crnn\dense ocr 识别及训练 ,新增pytorch转keras模型代码(tools/pytorch_to_keras.py) - [x] 不定长OCR训练(英文、中英文) crnn\dense ocr 识别及训练 ,新增pytorch转keras模型代码(tools/pytorch_to_keras.py)
- [x] 支持darknet 转keras, keras转darknet, pytorch 转keras模型 - [x] 支持darknet 转keras, keras转darknet, pytorch 转keras模型
- [x] 新增对身份证/火车票结构化数据识别 - [x] 身份证/火车票结构化数据识别
- [ ] 新增语音模型修正OCR识别结果 - [x] 新增CNN+ctc模型,支持DNN模块调用OCR,单行图像平均时间为0.02秒以下
- [ ] 新增CNN+ctc模型,支持DNN模块调用OCR,单行图像平均时间为0.02秒以下 - [ ] CPU版本加速
- [ ] 优化CPU调用,识别速度与GPU接近(近期更新) - [ ] 支持基于用户字典OCR识别
- [ ] 新增语言模型修正OCR识别结果
- [ ] 支持树莓派实时识别方案
## 环境部署 ## 环境部署
...@@ -36,7 +39,6 @@ lib = CDLL(root+"chineseocr/darknet/libdarknet.so", RTLD_GLOBAL) ...@@ -36,7 +39,6 @@ lib = CDLL(root+"chineseocr/darknet/libdarknet.so", RTLD_GLOBAL)
## 下载模型文件 ## 下载模型文件
模型文件地址: 模型文件地址:
* [baidu pan](https://pan.baidu.com/s/1gTW9gwJR6hlwTuyB6nCkzQ) * [baidu pan](https://pan.baidu.com/s/1gTW9gwJR6hlwTuyB6nCkzQ)
* [google drive](https://drive.google.com/drive/folders/1XiT1FLFvokAdwfE9WSUSS1PnZA34WBzy?usp=sharing)
复制文件夹中的所有文件到models目录 复制文件夹中的所有文件到models目录
...@@ -65,11 +67,10 @@ pip install . ...@@ -65,11 +67,10 @@ pip install .
wget https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm wget https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm
mv zh_giga.no_cna_cmn.prune01244.klm chineseocr/models/ mv zh_giga.no_cna_cmn.prune01244.klm chineseocr/models/
``` ```
## web服务启动 ## 模型选择
``` Bash ``` Bash
cd chineseocr## 进入chineseocr目录 参考config.py文件
ipython app.py 8080 ##8080端口号,可以设置任意端口 ```
```
## 构建docker镜像 ## 构建docker镜像
``` Bash ``` Bash
...@@ -81,6 +82,18 @@ docker run -d -p 8080:8080 chineseocr /root/anaconda3/bin/python app.py ...@@ -81,6 +82,18 @@ docker run -d -p 8080:8080 chineseocr /root/anaconda3/bin/python app.py
``` ```
## web服务启动
``` Bash
cd chineseocr## 进入chineseocr目录
python app.py 8080 ##8080端口号,可以设置任意端口
```
## 访问服务
http://127.0.0.1:8080/ocr
<img width="500" height="300" src="https://github.com/chineseocr/chineseocr/blob/master/test/demo.png"/>
## 识别结果展示 ## 识别结果展示
...@@ -90,11 +103,6 @@ docker run -d -p 8080:8080 chineseocr /root/anaconda3/bin/python app.py ...@@ -90,11 +103,6 @@ docker run -d -p 8080:8080 chineseocr /root/anaconda3/bin/python app.py
<img width="500" height="300" src="https://github.com/chineseocr/chineseocr/blob/master/test/line-demo.png"/> <img width="500" height="300" src="https://github.com/chineseocr/chineseocr/blob/master/test/line-demo.png"/>
## 访问服务
http://127.0.0.1:8080/ocr
<img width="500" height="300" src="https://github.com/chineseocr/chineseocr/blob/master/test/demo.png"/>
## 参考 ## 参考
1. yolo3 https://github.com/pjreddie/darknet.git 1. yolo3 https://github.com/pjreddie/darknet.git
......
...@@ -3,20 +3,103 @@ ...@@ -3,20 +3,103 @@
@author: lywen @author: lywen
""" """
import os import os
import cv2
import json import json
import time import time
import uuid
import base64
import web import web
import numpy as np
from PIL import Image from PIL import Image
web.config.debug = True web.config.debug = True
import model
render = web.template.render('templates', base='base') render = web.template.render('templates', base='base')
from config import DETECTANGLE from config import *
from apphelper.image import union_rbox,adjust_box_to_origin from apphelper.image import union_rbox,adjust_box_to_origin,base64_to_PIL
from application import trainTicket,idcard from application import trainTicket,idcard
if yoloTextFlag =='keras' or AngleModelFlag=='tf' or ocrFlag=='keras':
if GPU:
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.3## GPU最大占用量
config.gpu_options.allow_growth = True##GPU是否可动态增加
K.set_session(tf.Session(config=config))
K.get_session().run(tf.global_variables_initializer())
else:
##CPU启动
os.environ["CUDA_VISIBLE_DEVICES"] = ''
if yoloTextFlag=='opencv':
scale,maxScale = IMGSIZE
from text.opencv_dnn_detect import text_detect
elif yoloTextFlag=='darknet':
scale,maxScale = IMGSIZE
from text.darknet_detect import text_detect
elif yoloTextFlag=='keras':
scale,maxScale = IMGSIZE[0],2048
from text.keras_detect import text_detect
else:
print( "err,text engine in keras\opencv\darknet")
from text.opencv_dnn_detect import angle_detect
if ocr_redis:
##多任务并发识别
from apphelper.redisbase import redisDataBase
ocr = redisDataBase().put_values
else:
from crnn.keys import alphabetChinese,alphabetEnglish
if ocrFlag=='keras':
from crnn.network_keras import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelKerasLstm
else:
ocrModel = ocrModelKerasDense
else:
ocrModel = ocrModelKerasEng
alphabet = alphabetEnglish
LSTMFLAG = True
elif ocrFlag=='torch':
from crnn.network_torch import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelTorchLstm
else:
ocrModel = ocrModelTorchDense
else:
ocrModel = ocrModelTorchEng
alphabet = alphabetEnglish
LSTMFLAG = True
elif ocrFlag=='opencv':
from crnn.network_dnn import CRNN
ocrModel = ocrModelOpencv
alphabet = alphabetChinese
else:
print( "err,ocr engine in keras\opencv\darknet")
nclass = len(alphabet)+1
if ocrFlag=='opencv':
crnn = CRNN(alphabet=alphabet)
else:
crnn = CRNN( 32, 1, nclass, 256, leakyRelu=False,lstmFlag=LSTMFLAG,GPU=GPU,alphabet=alphabet)
if os.path.exists(ocrModel):
crnn.load_weights(ocrModel)
else:
print("download model or tranform model with tools!")
ocr = crnn.predict_job
from main import TextOcrModel
model = TextOcrModel(ocr,text_detect,angle_detect)
billList = ['通用OCR','火车票','身份证'] billList = ['通用OCR','火车票','身份证']
...@@ -30,7 +113,6 @@ class OCR: ...@@ -30,7 +113,6 @@ class OCR:
post['H'] = 1000 post['H'] = 1000
post['width'] = 600 post['width'] = 600
post['W'] = 600 post['W'] = 600
post['uuid'] = uuid.uuid1().__str__()
post['billList'] = billList post['billList'] = billList
return render.ocr(post) return render.ocr(post)
...@@ -42,33 +124,32 @@ class OCR: ...@@ -42,33 +124,32 @@ class OCR:
textLine = data.get('textLine',False)##只进行单行识别 textLine = data.get('textLine',False)##只进行单行识别
imgString = data['imgString'].encode().split(b';base64,')[-1] imgString = data['imgString'].encode().split(b';base64,')[-1]
imgString = base64.b64decode(imgString) img = base64_to_PIL(imgString)
jobid = uuid.uuid1().__str__() if img is not None:
path = 'test/{}.jpg'.format(jobid) img = np.array(img)
with open(path,'wb') as f:
f.write(imgString)
img = cv2.imread(path)##GBR
H,W = img.shape[:2] H,W = img.shape[:2]
timeTake = time.time() timeTake = time.time()
if textLine: if textLine:
##单行识别 ##单行识别
partImg = Image.fromarray(img) partImg = Image.fromarray(img)
text = model.crnnOcr(partImg.convert('L')) text = ocr.predict(partImg.convert('L'))
res =[ {'text':text,'name':'0','box':[0,0,W,0,W,H,0,H]} ] res =[ {'text':text,'name':'0','box':[0,0,W,0,W,H,0,H]} ]
else: else:
detectAngle = textAngle detectAngle = textAngle
_,result,angle= model.model(img, result,angle= model.model(img,
scale=scale,
maxScale=maxScale,
detectAngle=detectAngle,##是否进行文字方向检测,通过web传参控制 detectAngle=detectAngle,##是否进行文字方向检测,通过web传参控制
config=dict(MAX_HORIZONTAL_GAP=50,##字符之间的最大间隔,用于文本行的合并 MAX_HORIZONTAL_GAP=100,##字符之间的最大间隔,用于文本行的合并
MIN_V_OVERLAPS=0.6, MIN_V_OVERLAPS=0.6,
MIN_SIZE_SIM=0.6, MIN_SIZE_SIM=0.6,
TEXT_PROPOSALS_MIN_SCORE=0.1, TEXT_PROPOSALS_MIN_SCORE=0.1,
TEXT_PROPOSALS_NMS_THRESH=0.3, TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.7,##文本行之间测iou值 TEXT_LINE_NMS_THRESH = 0.99,##文本行之间测iou值
), LINE_MIN_SCORE=0.1,
leftAdjust=True,##对检测的文本行进行向左延伸 leftAdjustAlph=0.01,##对检测的文本行进行向左延伸
rightAdjust=True,##对检测的文本行进行向右延伸 rightAdjustAlph=0.01,##对检测的文本行进行向右延伸
alph=0.01,##对检测的文本行进行向右、左延伸的倍数
) )
...@@ -101,8 +182,6 @@ class OCR: ...@@ -101,8 +182,6 @@ class OCR:
timeTake = time.time()-timeTake timeTake = time.time()-timeTake
os.remove(path)
return json.dumps({'res':res,'timeTake':round(timeTake,4)},ensure_ascii=False) return json.dumps({'res':res,'timeTake':round(timeTake,4)},ensure_ascii=False)
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
##图像相关函数 ##图像相关函数
@author: lywen @author: chineseocr
""" """
import sys
import six import six
import os
import base64 import base64
import requests
import numpy as np import numpy as np
import cv2 import cv2
from PIL import Image from PIL import Image
import traceback from io import BytesIO
import uuid def base64_to_PIL(string):
from glob import glob
from bs4 import BeautifulSoup
def sort_box_(box):
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
pts = (x1,y1),(x2,y2),(x3,y3),(x4,y4)
pts = np.array(pts, dtype="float32")
(x1,y1),(x2,y2),(x3,y3),(x4,y4) = _order_points(pts)
""" """
newBox = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]] base64 string to PIL
## sort x
newBox = sorted(newBox,key=lambda x:x[0])
x1,y1 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x1,y1])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x4,y4 = sorted(newBox[:2],key=lambda x:x[0])[0]
index = newBox.index([x4,y4])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[0])
x2,y2 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x2,y2])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x3,y3 = sorted(newBox[:2],key=lambda x:x[0])[0]
""" """
return x1,y1,x2,y2,x3,y3,x4,y4 try:
base64_data = base64.b64decode(string)
buf = six.BytesIO()
import numpy as np buf.write(base64_data)
from scipy.spatial import distance as dist buf.seek(0)
def _order_points(pts): img = Image.open(buf).convert('RGB')
# 根据x坐标对点进行排序 return img
""" except:
--------------------- return None
作者:Tong_T
来源:CSDN
原文:https://blog.csdn.net/Tong_T/article/details/81907132 def PIL_to_base64(image):
版权声明:本文为博主原创文章,转载请附上博文链接! output = BytesIO()
""" image.save(output,format='png')
x_sorted = pts[np.argsort(pts[:, 0]), :] contents = output.getvalue()
output.close()
# 从排序中获取最左侧和最右侧的点 string = base64.b64encode(contents)
# x坐标点 return string
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
# 现在,根据它们的y坐标对最左边的坐标进行排序,这样我们就可以分别抓住左上角和左下角
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
# 现在我们有了左上角坐标,用它作为锚来计算左上角和右上角之间的欧氏距离;
# 根据毕达哥拉斯定理,距离最大的点将是我们的右下角
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
# 返回左上角,右上角,右下角和左下角的坐标
return np.array([tl, tr, br, bl], dtype="float32")
def solve(box): def solve(box):
...@@ -103,65 +61,6 @@ def solve(box): ...@@ -103,65 +61,6 @@ def solve(box):
angle = np.arcsin(sinA) angle = np.arcsin(sinA)
return angle,w,h,cx,cy return angle,w,h,cx,cy
def read_singLine_for_yolo(p):
"""
单行文本
"""
im = Image.open(p).convert('RGB')
w,h = im.size
boxes = [{'cx':w/2,'cy':h/2,'w':w,'h':h,'angle':0.0}]
return im,boxes
def read_voc_xml(p):
##读取voc xml 文件
boxes = []
if os.path.exists(p):
with open(p) as f:
xmlString = f.read()
xmlString = BeautifulSoup(xmlString,'lxml')
objList = xmlString.findAll('object')
for obj in objList:
robndbox = obj.find('robndbox')
bndbox = obj.find('bndbox')
if robndbox is not None and bndbox is None:
cx = np.float(robndbox.find('cx').text)
cy = np.float(robndbox.find('cy').text)
w = np.float(robndbox.find('w').text)
h = np.float(robndbox.find('h').text)
angle = robndbox.find('angle').text
if angle=='nan' or h==0 or w==0:
#boxes = []
continue
angle = np.float(angle)
if abs(angle)>np.pi/2:
w,h = h,w
angle = abs(angle)%(np.pi/2)*np.sign(angle)
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
if abs(angle)>np.pi/2:
##lableImg bug
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
else:
xmin = np.float(bndbox.find('xmin').text)
xmax = np.float(bndbox.find('xmax').text)
ymin = np.float(bndbox.find('ymin').text)
ymax = np.float(bndbox.find('ymax').text)
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
w = (-xmin+xmax)#/2.0
h = (-ymin+ymax)#/2.0
angle =0.0
boxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return boxes
def xy_rotate_box(cx,cy,w,h,angle): def xy_rotate_box(cx,cy,w,h,angle):
""" """
...@@ -181,7 +80,7 @@ def xy_rotate_box(cx,cy,w,h,angle): ...@@ -181,7 +80,7 @@ def xy_rotate_box(cx,cy,w,h,angle):
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy) x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4 return x1,y1,x2,y2,x3,y3,x4,y4
from numpy import cos,sin,pi,tan from numpy import cos,sin
def rotate(x,y,angle,cx,cy): def rotate(x,y,angle,cx,cy):
""" """
点(x,y) 绕(cx,cy)点旋转 点(x,y) 绕(cx,cy)点旋转
...@@ -238,64 +137,22 @@ def letterbox_image(image, size,fillValue=[128,128,128]): ...@@ -238,64 +137,22 @@ def letterbox_image(image, size,fillValue=[128,128,128]):
''' '''
resize image with unchanged aspect ratio using padding resize image with unchanged aspect ratio using padding
''' '''
image_w, image_h = image.size image_h, image_w = image.shape[:2]
w, h = size w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h)) new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h)) new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC) resized_image = cv2.resize(image,(new_w,new_h))
if fillValue is None: if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))] fillValue = [int(x.mean()) for x in cv2.split(np.array(image))]
boxed_image = Image.new('RGB', size, tuple(fillValue)) boxed_image = np.zeros((size[1],size[0],3),dtype=np.uint8)
boxed_image[:] = fillValue
boxed_image.paste(resized_image,) boxed_image[:new_h,:new_w,:] =resized_image
return boxed_image,new_w/image_w return boxed_image,new_w/image_w
def box_split(boxes,splitW = 15):
newBoxes = []
for box in boxes:
w = box['w']
h = box['h']
cx = box['cx']
cy=box['cy']
angle = box['angle']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
splitBoxes =[]
i = 1
tanAngle = tan(-angle)
while True:
flag = 0 if i==1 else 1
xmin = x1+(i-1)*splitW
ymin = y1-tanAngle*splitW*i
xmax = x1+i*splitW
ymax = y4-(i-1)*tanAngle*splitW +flag*tanAngle*(x4-x1)
if xmax>max(x2,x3) and xmin>max(x2,x3):
break
splitBoxes.append([int(xmin),int(ymin),int(xmax),int(ymax)])
i+=1
newBoxes.append(splitBoxes)
return newBoxes
def get_box_spilt(boxes,im,sizeW,SizeH,splitW=8,isRoate=False,rorateDegree=0):
"""
isRoate:是否旋转box
"""
size = sizeW,SizeH
if isRoate:
##旋转box
im,boxes = get_rorate(boxes,im,degree=rorateDegree)
newIm,f = letterbox_image(im, size)
newBoxes = resize_box(boxes,f)
newBoxes = sum(box_split(newBoxes,splitW),[])
newBoxes = [box+[1] for box in newBoxes]
return newBoxes,newIm
...@@ -327,96 +184,25 @@ def box_rotate(box,angle=0,imgH=0,imgW=0): ...@@ -327,96 +184,25 @@ def box_rotate(box,angle=0,imgH=0,imgW=0):
return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_) return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_)
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
from numpy import cos,sin,pi
def rotate(x,y,angle,cx,cy):
angle = angle#*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
def rotate_cut_img(im,degree,box,w,h,leftAdjust=False,rightAdjust=False,alph=0.2): def rotate_cut_img(im,box,leftAdjustAlph=0.0,rightAdjustAlph=0.0):
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8] angle,w,h,cx,cy = solve(box)
x_center,y_center = np.mean([x1,x2,x3,x4]),np.mean([y1,y2,y3,y4]) degree_ = angle*180.0/np.pi
degree_ = degree*180.0/np.pi
right = 0
left = 0
if rightAdjust:
right = 1
if leftAdjust:
left = 1
box = (max(1,x_center-w/2-left*alph*(w/2))##xmin box = (max(1,cx-w/2-leftAdjustAlph*(w/2))##xmin
,y_center-h/2,##ymin ,cy-h/2,##ymin
min(x_center+w/2+right*alph*(w/2),im.size[0]-1)##xmax min(cx+w/2+rightAdjustAlph*(w/2),im.size[0]-1)##xmax
,y_center+h/2)##ymax ,cy+h/2)##ymax
newW = box[2]-box[0] newW = box[2]-box[0]
newH = box[3]-box[1] newH = box[3]-box[1]
tmpImg = im.rotate(degree_,center=(x_center,y_center)).crop(box) tmpImg = im.rotate(degree_,center=(cx,cy)).crop(box)
return tmpImg,newW,newH box = {'cx':cx,'cy':cy,'w':newW,'h':newH,'degree':degree_,}
return tmpImg,box
def letterbox_image(image, size,fillValue=[128,128,128]): from scipy.ndimage import filters,interpolation
'''resize image with unchanged aspect ratio using padding'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image, (0,0))
return boxed_image,new_w/image_w
from scipy.ndimage import filters,interpolation,morphology,measurements,minimum
#from pylab import amin, amax
from numpy import amin, amax from numpy import amin, amax
def estimate_skew_angle(raw): def estimate_skew_angle(raw):
""" """
...@@ -521,8 +307,17 @@ def get_boxes( bboxes): ...@@ -521,8 +307,17 @@ def get_boxes( bboxes):
text_recs[index, 6] = x4 text_recs[index, 6] = x4
text_recs[index, 7] = y4 text_recs[index, 7] = y4
index = index + 1 index = index + 1
boxes = []
for box in text_recs:
x1,y1 = (box[0],box[1])
x2,y2 = (box[2],box[3])
x3,y3 = (box[6],box[7])
x4,y4 = (box[4],box[5])
boxes.append([x1,y1,x2,y2,x3,y3,x4,y4])
boxes = np.array(boxes)
return text_recs return boxes
...@@ -611,4 +406,6 @@ def adjust_box_to_origin(img,angle, result): ...@@ -611,4 +406,6 @@ def adjust_box_to_origin(img,angle, result):
box = x1,y1,x2,y2,x3,y3,x4,y4 box = x1,y1,x2,y2,x3,y3,x4,y4
newresult.append({'name':line['name'],'text':line['text'],'box':box}) newresult.append({'name':line['name'],'text':line['text'],'box':box})
return newresult return newresult
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 23:30:48 2019
@author: chineseocr
"""
import redis
from helper.image import PIL_to_base64,base64_to_PIL
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True) # host是redis主机,需要redis服务端和客户端都起着 redis默认端口是6379
conn = redis.Redis(connection_pool=pool)
jobListTable = 'job_list_table'##job列表
resSetTable = 'res_set_table'##识别结果列表
imgStringTable='img_string_table'##图像存储表
import time
import uuid
class redisDataBase:
"""
redis相关操作
"""
def put_values(self,resJob,timeOut=10):
##向队列推送PIL 并返回识别结果
timeBegin = time.time()
listName = uuid.uuid1().__str__()
keys = []
for ind,line in enumerate(resJob):
img = line['img']
value = PIL_to_base64(img)
ind = '{}_{}'.format(listName,ind)
keys.append(ind)
self.set_dict(imgStringTable,ind,value)
self.set_list(jobListTable,ind)
res = [None for key in keys]
while time.time()-timeBegin<timeOut:
res = self.get_dict(resSetTable,keys)
delres = [x for x in res if x is not None]
if len(delres)==len(keys):
break
##删除记录
for key in keys:
self.del_dict(imgStringTable,key)##删除缓存图像
self.del_dict(resSetTable,key)##删除缓存图像
for ind,text in enumerate(res):
resJob[ind]['text'] = text
return resJob
def get_job(self,callback):
##获取队列中的图像
ind = self.get_list(jobListTable)
if ind is not None:
value = self.get_dict(imgStringTable,[ind])
value = value[0]
print(ind)
if value is not None:
value = base64_to_PIL(value)
value = callback(value)
print(ind,value)
self.set_dict(resSetTable,ind,value)##存储识别结果到set
def set_list(self,name,value):
"""
向队列中推送识别值
"""
conn.rpush(name,value)
def get_list(self,name):
"""
获取队列值
"""
value = conn.lpop(name)
return value
def set_dict(self,name,key,value):
"""
设置键值对
"""
conn.hset(name, key,value)
def get_dict(self,name,keys):
"""
批量取出
"""
return conn.hmget(name, keys)
def del_dict(self,name,key):
conn.hdel(name, key)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
身份证 Created on Sun Aug 4 01:01:37 2019
身份证识别
@author: chineseocr
""" """
from apphelper.image import union_rbox from apphelper.image import union_rbox
import re import re
......
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
火车票 Created on Sun Aug 4 01:01:37 2019
火车票识别
@author: chineseocr
""" """
from apphelper.image import union_rbox from apphelper.image import union_rbox
import re import re
...@@ -28,8 +32,7 @@ class trainTicket: ...@@ -28,8 +32,7 @@ class trainTicket:
txt = txt.replace(' ','') txt = txt.replace(' ','')
res = re.findall('[一-龥]+站',txt),re.findall('[一-龥]+站(.+?)[][一-龥]+站',txt) res = re.findall('[一-龥]+站',txt),re.findall('[一-龥]+站(.+?)[][一-龥]+站',txt)
if len(res[0])>0: if len(res[0])>1:
station['出发'],station['到达'] = res[0][0].replace('站',''),res[0][1].replace('站','') station['出发'],station['到达'] = res[0][0].replace('站',''),res[0][1].replace('站','')
if len(res[1])>0: if len(res[1])>0:
station['车次'] = res[1][0] station['车次'] = res[1][0]
......
import os import os
########################文字检测########################
##文字检测引擎
pwd = os.getcwd() pwd = os.getcwd()
opencvFlag = 'keras' ##keras,opencv,darknet,模型性能 keras>darknet>opencv ########################文字检测################################################
##文字检测引擎
IMGSIZE = (608,608)## yolo3 输入图像尺寸 IMGSIZE = (608,608)## yolo3 输入图像尺寸
## keras 版本anchors yoloTextFlag = 'keras' ##keras,opencv,darknet,模型性能 keras>darknet>opencv
############## keras yolo ##############
keras_anchors = '8,11, 8,16, 8,23, 8,33, 8,48, 8,97, 8,139, 8,198, 8,283' keras_anchors = '8,11, 8,16, 8,23, 8,33, 8,48, 8,97, 8,139, 8,198, 8,283'
class_names = ['none','text',] class_names = ['none','text',]
kerasTextModel=os.path.join(pwd,"models","text.h5")##keras版本模型权重文件 kerasTextModel=os.path.join(pwd,"models","text.h5")##keras版本模型权重文件
############## keras yolo ##############
############## darknet yolo ############## ############## darknet yolo ##############
darknetRoot = os.path.join(os.path.curdir,"darknet")## yolo 安装目录 darknetRoot = os.path.join(os.path.curdir,"darknet")## yolo 安装目录
yoloCfg = os.path.join(pwd,"models","text.cfg") yoloCfg = os.path.join(pwd,"models","text.cfg")
yoloWeights = os.path.join(pwd,"models","text.weights") yoloWeights = os.path.join(pwd,"models","text.weights")
yoloData = os.path.join(pwd,"models","text.data") yoloData = os.path.join(pwd,"models","text.data")
############## darknet yolo ############## ############## darknet yolo ##############
########################文字检测######################## ########################文字检测################################################
## GPU选择及启动GPU序号 ## GPU选择及启动GPU序号
GPU = True##OCR 是否启用GPU GPU = True##OCR 是否启用GPU
GPUID=0##调用GPU序号 GPUID=0##调用GPU序号
## nms选择,支持cython,gpu,python
nmsFlag='gpu'## cython/gpu/python ##容错性 优先启动GPU,其次是cpython 最后是python
if not GPU:
nmsFlag='cython'
##vgg文字方向检测模型 ##vgg文字方向检测模型
DETECTANGLE=True##是否进行文字方向检测 DETECTANGLE=True##是否进行文字方向检测
AngleModelPb = os.path.join(pwd,"models","Angle-model.pb") AngleModelPb = os.path.join(pwd,"models","Angle-model.pb")
AngleModelPbtxt = os.path.join(pwd,"models","Angle-model.pbtxt") AngleModelPbtxt = os.path.join(pwd,"models","Angle-model.pbtxt")
AngleModelFlag = 'opencv' ## opencv or tf
######################OCR模型###################################################
######################OCR模型###################### ocr_redis = False##是否多任务执行OCR识别加速
##是否启用LSTM crnn模型 ##是否启用LSTM crnn模型
##OCR模型是否调用LSTM层 ##OCR模型是否调用LSTM层
LSTMFLAG = True LSTMFLAG = True
ocrFlag = 'torch'##ocr模型 支持 keras torch opencv版本
##模型选择 True:中英文模型 False:英文模型 ##模型选择 True:中英文模型 False:英文模型
ocrFlag = 'torch'##ocr模型 支持 keras torch版本 chineseModel = True## 中文模型或者纯英文模型
chinsesModel = True ##转换keras模型 参考tools目录
ocrModelKeras = os.path.join(pwd,"models","ocr-dense-keras.h5")##keras版本OCR,暂时支持dense ocrModelKerasDense = os.path.join(pwd,"models","ocr-dense.h5")
if chinsesModel: ocrModelKerasLstm = os.path.join(pwd,"models","ocr-lstm.h5")
if LSTMFLAG: ocrModelKerasEng = os.path.join(pwd,"models","ocr-english.h5")
ocrModel = os.path.join(pwd,"models","ocr-lstm.pth")
else: ocrModelTorchLstm = os.path.join(pwd,"models","ocr-lstm.pth")
ocrModel = os.path.join(pwd,"models","ocr-dense.pth") ocrModelTorchDense = os.path.join(pwd,"models","ocr-dense.pth")
else: ocrModelTorchEng = os.path.join(pwd,"models","ocr-english.pth")
##纯英文模型
LSTMFLAG=True ocrModelOpencv = os.path.join(pwd,"models","ocr.pb")
ocrModel = os.path.join(pwd,"models","ocr-english.pth")
######################OCR模型###################### ######################OCR模型###################################################
#coding:utf-8
from crnn.utils import strLabelConverter,resizeNormalize
from crnn.network_keras import keras_crnn as CRNN
from config import LSTMFLAG
import tensorflow as tf
graph = tf.get_default_graph()##解决web.py 相关报错问题
from crnn import keys
from config import ocrModelKeras
import numpy as np
def crnnSource():
alphabet = keys.alphabetChinese##中英文模型
converter = strLabelConverter(alphabet)
model = CRNN(32, 1, len(alphabet)+1, 256, 1,lstmFlag=LSTMFLAG)
model.load_weights(ocrModelKeras)
return model,converter
##加载模型
model,converter = crnnSource()
def crnnOcr(image):
"""
crnn模型,ocr识别
image:PIL.Image.convert("L")
"""
scale = image.size[1]*1.0 / 32
w = image.size[0] / scale
w = int(w)
transformer = resizeNormalize((w, 32))
image = transformer(image)
image = image.astype(np.float32)
image = np.array([[image]])
global graph
with graph.as_default():
preds = model.predict(image)
preds = preds[0]
preds = np.argmax(preds,axis=2).reshape((-1,))
sim_pred = converter.decode(preds)
return sim_pred
#coding:utf-8
import torch
import numpy as np
from torch.autograd import Variable
from crnn.utils import strLabelConverter,resizeNormalize
from crnn.network_torch import CRNN
from crnn import keys
from collections import OrderedDict
from config import ocrModel,LSTMFLAG,GPU
from config import chinsesModel
def crnnSource():
"""
加载模型
"""
if chinsesModel:
alphabet = keys.alphabetChinese##中英文模型
else:
alphabet = keys.alphabetEnglish##英文模型
converter = strLabelConverter(alphabet)
if torch.cuda.is_available() and GPU:
model = CRNN(32, 1, len(alphabet)+1, 256, 1,lstmFlag=LSTMFLAG).cuda()##LSTMFLAG=True crnn 否则 dense ocr
else:
model = CRNN(32, 1, len(alphabet)+1, 256, 1,lstmFlag=LSTMFLAG).cpu()
trainWeights = torch.load(ocrModel,map_location=lambda storage, loc: storage)
modelWeights = OrderedDict()
for k, v in trainWeights.items():
name = k.replace('module.','') # remove `module.`
modelWeights[name] = v
# load params
model.load_state_dict(modelWeights)
return model,converter
##加载模型
model,converter = crnnSource()
model.eval()
def crnnOcr(image):
"""
crnn模型,ocr识别
image:PIL.Image.convert("L")
"""
scale = image.size[1]*1.0 / 32
w = image.size[0] / scale
w = int(w)
transformer = resizeNormalize((w, 32))
image = transformer(image)
image = image.astype(np.float32)
image = torch.from_numpy(image)
if torch.cuda.is_available() and GPU:
image = image.cuda()
else:
image = image.cpu()
image = image.view(1,1, *image.size())
image = Variable(image)
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_pred = converter.decode(preds)
return sim_pred
#!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = str(txn.get(label_key))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=128, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels
#coding:UTF-8 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
alphabetChinese = u'\'疗绚诚娇溜题贿者廖更纳加奉公一就汴计与路房原妇208-7其>:],,骑刈全消昏傈安久钟嗅不影处驽蜿资关椤地瘸专问忖票嫉炎韵要月田节陂鄙捌备拳伺眼网盎大傍心东愉汇蹿科每业里航晏字平录先13彤鲶产稍督腴有象岳注绍在泺文定核名水过理让偷率等这发”为含肥酉相鄱七编猥锛日镀蒂掰倒辆栾栗综涩州雌滑馀了机块司宰甙兴矽抚保用沧秩如收息滥页疑埠!!姥异橹钇向下跄的椴沫国绥獠报开民蜇何分凇长讥藏掏施羽中讲派嘟人提浼间世而古多倪唇饯控庚首赛蜓味断制觉技替艰溢潮夕钺外摘枋动双单啮户枇确锦曜杜或能效霜盒然侗电晁放步鹃新杖蜂吒濂瞬评总隍对独合也是府青天诲墙组滴级邀帘示已时骸仄泅和遨店雇疫持巍踮境只亨目鉴崤闲体泄杂作般轰化解迂诿蛭璀腾告版服省师小规程线海办引二桧牌砺洄裴修图痫胡许犊事郛基柴呼食研奶律蛋因葆察戏褒戒再李骁工貂油鹅章啄休场给睡纷豆器捎说敏学会浒设诊格廓查来霓室溆¢诡寥焕舜柒狐回戟砾厄实翩尿五入径惭喹股宇篝|;美期云九祺扮靠锝槌系企酰阊暂蚕忻豁本羹执条钦H獒限进季楦于芘玖铋茯未答粘括样精欠矢甥帷嵩扣令仔风皈行支部蓉刮站蜡救钊汗松嫌成可.鹤院从交政怕活调球局验髌第韫谗串到圆年米/*友忿检区看自敢刃个兹弄流留同没齿星聆轼湖什三建蛔儿椋汕震颧鲤跟力情璺铨陪务指族训滦鄣濮扒商箱十召慷辗所莞管护臭横硒嗓接侦六露党馋驾剖高侬妪幂猗绺骐央酐孝筝课徇缰门男西项句谙瞒秃篇教碲罚声呐景前富嘴鳌稀免朋啬睐去赈鱼住肩愕速旁波厅健茼厥鲟谅投攸炔数方击呋谈绩别愫僚躬鹧胪炳招喇膨泵蹦毛结54谱识陕粽婚拟构且搜任潘比郢妨醪陀桔碘扎选哈骷楷亿明缆脯监睫逻婵共赴淝凡惦及达揖谩澹减焰蛹番祁柏员禄怡峤龙白叽生闯起细装谕竟聚钙上导渊按艾辘挡耒盹饪臀记邮蕙受各医搂普滇朗茸带翻酚(光堤墟蔷万幻〓瑙辈昧盏亘蛀吉铰请子假闻税井诩哨嫂好面琐校馊鬣缂营访炖占农缀否经钚棵趟张亟吏茶谨捻论迸堂玉信吧瞠乡姬寺咬溏苄皿意赉宝尔钰艺特唳踉都荣倚登荐丧奇涵批炭近符傩感道着菊虹仲众懈濯颞眺南释北缝标既茗整撼迤贲挎耱拒某妍卫哇英矶藩治他元领膜遮穗蛾飞荒棺劫么市火温拈棚洼转果奕卸迪伸泳斗邡侄涨屯萋胭氡崮枞惧冒彩斜手豚随旭淑妞形菌吲沱争驯歹挟兆柱传至包内响临红功弩衡寂禁老棍耆渍织害氵渑布载靥嗬虽苹咨娄库雉榜帜嘲套瑚亲簸欧边6腿旮抛吹瞳得镓梗厨继漾愣憨士策窑抑躯襟脏参贸言干绸鳄穷藜音折详)举悍甸癌黎谴死罩迁寒驷袖媒蒋掘模纠恣观祖蛆碍位稿主澧跌筏京锏帝贴证糠才黄鲸略炯饱四出园犀牧容汉杆浈汰瑷造虫瘩怪驴济应花沣谔夙旅价矿以考su呦晒巡茅准肟瓴詹仟褂译桌混宁怦郑抿些余鄂饴攒珑群阖岔琨藓预环洮岌宀杲瀵最常囡周踊女鼓袭喉简范薯遐疏粱黜禧法箔斤遥汝奥直贞撑置绱集她馅逗钧橱魉[恙躁唤9旺膘待脾惫购吗依盲度瘿蠖俾之镗拇鲵厝簧续款展啃表剔品钻腭损清锶统涌寸滨贪链吠冈伎迥咏吁览防迅失汾阔逵绀蔑列川凭努熨揪利俱绉抢鸨我即责膦易毓鹊刹玷岿空嘞绊排术估锷违们苟铜播肘件烫审鲂广像铌惰铟巳胍鲍康憧色恢想拷尤疳知SYFDA峄裕帮握搔氐氘难墒沮雨叁缥悴藐湫娟苑稠颛簇后阕闭蕤缚怎佞码嘤蔡痊舱螯帕赫昵升烬岫、疵蜻髁蕨隶烛械丑盂梁强鲛由拘揉劭龟撤钩呕孛费妻漂求阑崖秤甘通深补赃坎床啪承吼量暇钼烨阂擎脱逮称P神属矗华届狍葑汹育患窒蛰佼静槎运鳗庆逝曼疱克代官此麸耧蚌晟例础榛副测唰缢迹灬霁身岁赭扛又菡乜雾板读陷徉贯郁虑变钓菜圾现琢式乐维渔浜左吾脑钡警T啵拴偌漱湿硕止骼魄积燥联踢玛|则窿见振畿送班钽您赵刨印讨踝籍谡舌崧汽蔽沪酥绒怖财帖肱私莎勋羔霸励哼帐将帅渠纪婴娩岭厘滕吻伤坝冠戊隆瘁介涧物黍并姗奢蹑掣垸锴命箍捉病辖琰眭迩艘绌繁寅若毋思诉类诈燮轲酮狂重反职筱县委磕绣奖晋濉志徽肠呈獐坻口片碰几村柿劳料获亩惕晕厌号罢池正鏖煨家棕复尝懋蜥锅岛扰队坠瘾钬@卧疣镇譬冰彷频黯据垄采八缪瘫型熹砰楠襁箐但嘶绳啤拍盥穆傲洗盯塘怔筛丿台恒喂葛永¥烟酒桦书砂蚝缉态瀚袄圳轻蛛超榧遛姒奘铮右荽望偻卡丶氰附做革索戚坨桷唁垅榻岐偎坛莨山殊微骇陈爨推嗝驹澡藁呤卤嘻糅逛侵郓酌德摇※鬃被慨殡羸昌泡戛鞋河宪沿玲鲨翅哽源铅语照邯址荃佬顺鸳町霭睾瓢夸椁晓酿痈咔侏券噎湍签嚷离午尚社锤背孟使浪缦潍鞅军姹驶笑鳟鲁》孽钜绿洱礴焯椰颖囔乌孔巴互性椽哞聘昨早暮胶炀隧低彗昝铁呓氽藉喔癖瑗姨权胱韦堑蜜酋楝砝毁靓歙锲究屋喳骨辨碑武鸠宫辜烊适坡殃培佩供走蜈迟翼况姣凛浔吃飘债犟金促苛崇坂莳畔绂兵蠕斋根砍亢欢恬崔剁餐榫快扶‖濒缠鳜当彭驭浦篮昀锆秸钳弋娣瞑夷龛苫拱致%嵊障隐弑初娓抉汩累蓖"唬助苓昙押毙破城郧逢嚏獭瞻溱婿赊跨恼璧萃姻貉灵炉密氛陶砸谬衔点琛沛枳层岱诺脍榈埂征冷裁打蹴素瘘逞蛐聊激腱萘踵飒蓟吆取咙簋涓矩曝挺揣座你史舵焱尘苏笈脚溉榨诵樊邓焊义庶儋蟋蒲赦呷杞诠豪还试颓茉太除紫逃痴草充鳕珉祗墨渭烩蘸慕璇镶穴嵘恶骂险绋幕碉肺戳刘潞秣纾潜銮洛须罘销瘪汞兮屉r林厕质探划狸殚善煊烹〒锈逯宸辍泱柚袍远蹋嶙绝峥娥缍雀徵认镱谷=贩勉撩鄯斐洋非祚泾诒饿撬威晷搭芍锥笺蓦候琊档礁沼卵荠忑朝凹瑞头仪弧孵畏铆突衲车浩气茂悖厢枕酝戴湾邹飚攘锂写宵翁岷无喜丈挑嗟绛殉议槽具醇淞笃郴阅饼底壕砚弈询缕庹翟零筷暨舟闺甯撞麂茌蔼很珲捕棠角阉媛娲诽剿尉爵睬韩诰匣危糍镯立浏阳少盆舔擘匪申尬铣旯抖赘瓯居ˇ哮游锭茏歌坏甚秒舞沙仗劲潺阿燧郭嗖霏忠材奂耐跺砀输岖媳氟极摆灿今扔腻枝奎药熄吨话q额慑嘌协喀壳埭视著於愧陲翌峁颅佛腹聋侯咎叟秀颇存较罪哄岗扫栏钾羌己璨枭霉煌涸衿键镝益岢奏连夯睿冥均糖狞蹊稻爸刿胥煜丽肿璃掸跚灾垂樾濑乎莲窄犹撮战馄软络显鸢胸宾妲恕埔蝌份遇巧瞟粒恰剥桡博讯凯堇阶滤卖斌骚彬兑磺樱舷两娱福仃差找桁÷净把阴污戬雷碓蕲楚罡焖抽妫咒仑闱尽邑菁爱贷沥鞑牡嗉崴骤塌嗦订拮滓捡锻次坪杩臃箬融珂鹗宗枚降鸬妯阄堰盐毅必杨崃俺甬状莘货耸菱腼铸唏痤孚澳懒溅翘疙杷淼缙骰喊悉砻坷艇赁界谤纣宴晃茹归饭梢铡街抄肼鬟苯颂撷戈炒咆茭瘙负仰客琉铢封卑珥椿镧窨鬲寿御袤铃萎砖餮脒裳肪孕嫣馗嵇恳氯江石褶冢祸阻狈羞银靳透咳叼敷芷啥它瓤兰痘懊逑肌往捺坊甩呻〃沦忘膻祟菅剧崆智坯臧霍墅攻眯倘拢骠铐庭岙瓠′缺泥迢捶??郏喙掷沌纯秘种听绘固螨团香盗妒埚蓝拖旱荞铀血遏汲辰叩拽幅硬惶桀漠措泼唑齐肾念酱虚屁耶旗砦闵婉馆拭绅韧忏窝醋葺顾辞倜堆辋逆玟贱疾董惘倌锕淘嘀莽俭笏绑鲷杈择蟀粥嗯驰逾案谪褓胫哩昕颚鲢绠躺鹄崂儒俨丝尕泌啊萸彰幺吟骄苣弦脊瑰〈诛镁析闪剪侧哟框螃守嬗燕狭铈缮概迳痧鲲俯售笼痣扉挖满咋援邱扇歪便玑绦峡蛇叨〖泽胃斓喋怂坟猪该蚬炕弥赞棣晔娠挲狡创疖铕镭稷挫弭啾翔粉履苘哦楼秕铂土锣瘟挣栉习享桢袅磨桂谦延坚蔚噗署谟猬钎恐嬉雒倦衅亏璩睹刻殿王算雕麻丘柯骆丸塍谚添鲈垓桎蚯芥予飕镦谌窗醚菀亮搪莺蒿羁足J真轶悬衷靛翊掩哒炅掐冼妮l谐稚荆擒犯陵虏浓崽刍陌傻孜千靖演矜钕煽杰酗渗伞栋俗泫戍罕沾疽灏煦芬磴叱阱榉湃蜀叉醒彪租郡篷屎良垢隗弱陨峪砷掴颁胎雯绵贬沐撵隘篙暖曹陡栓填臼彦瓶琪潼哪鸡摩啦俟锋域耻蔫疯纹撇毒绶痛酯忍爪赳歆嘹辕烈册朴钱吮毯癜娃谀邵厮炽璞邃丐追词瓒忆轧芫谯喷弟半冕裙掖墉绮寝苔势顷褥切衮君佳嫒蚩霞佚洙逊镖暹唛&殒顶碗獗轭铺蛊废恹汨崩珍那杵曲纺夏薰傀闳淬姘舀拧卷楂恍讪厩寮篪赓乘灭盅鞣沟慎挂饺鼾杳树缨丛絮娌臻嗳篡侩述衰矛圈蚜匕筹匿濞晨叶骋郝挚蚴滞增侍描瓣吖嫦蟒匾圣赌毡癞恺百曳需篓肮庖帏卿驿遗蹬鬓骡歉芎胳屐禽烦晌寄媾狄翡苒船廉终痞殇々畦饶改拆悻萄£瓿乃訾桅匮溧拥纱铍骗蕃龋缬父佐疚栎醍掳蓄x惆颜鲆榆〔猎敌暴谥鲫贾罗玻缄扦芪癣落徒臾恿猩托邴肄牵春陛耀刊拓蓓邳堕寇枉淌啡湄兽酷萼碚濠萤夹旬戮梭琥椭昔勺蜊绐晚孺僵宣摄冽旨萌忙蚤眉噼蟑付契瓜悼颡壁曾窕颢澎仿俑浑嵌浣乍碌褪乱蔟隙玩剐葫箫纲围伐决伙漩瑟刑肓镳缓蹭氨皓典畲坍铑檐塑洞倬储胴淳戾吐灼惺妙毕珐缈虱盖羰鸿磅谓髅娴苴唷蚣霹抨贤唠犬誓逍庠逼麓籼釉呜碧秧氩摔霄穸纨辟妈映完牛缴嗷炊恩荔茆掉紊慌莓羟阙萁磐另蕹辱鳐湮吡吩唐睦垠舒圜冗瞿溺芾囱匠僳汐菩饬漓黑霰浸濡窥毂蒡兢驻鹉芮诙迫雳厂忐臆猴鸣蚪栈箕羡渐莆捍眈哓趴蹼埕嚣骛宏淄斑噜严瑛垃椎诱压庾绞焘廿抡迄棘夫纬锹眨瞌侠脐竞瀑孳骧遁姜颦荪滚萦伪逸粳爬锁矣役趣洒颔诏逐奸甭惠攀蹄泛尼拼阮鹰亚颈惑勒〉际肛爷刚钨丰养冶鲽辉蔻画覆皴妊麦返醉皂擀〗酶凑粹悟诀硖港卜z杀涕±舍铠抵弛段敝镐奠拂轴跛袱et沉菇俎薪峦秭蟹历盟菠寡液肢喻染裱悱抱氙赤捅猛跑氮谣仁尺辊窍烙衍架擦倏璐瑁币楞胖夔趸邛惴饕虔蝎§哉贝宽辫炮扩饲籽魏菟锰伍猝末琳哚蛎邂呀姿鄞却歧仙恸椐森牒寤袒婆虢雅钉朵贼欲苞寰故龚坭嘘咫礼硷兀睢汶’铲烧绕诃浃钿哺柜讼颊璁腔洽咐脲簌筠镣玮鞠谁兼姆挥梯蝴谘漕刷躏宦弼b垌劈麟莉揭笙渎仕嗤仓配怏抬错泯镊孰猿邪仍秋鼬壹歇吵炼<尧射柬廷胧霾凳隋肚浮梦祥株堵退L鹫跎凶毽荟炫栩玳甜沂鹿顽伯爹赔蛴徐匡欣狰缸雹蟆疤默沤啜痂衣禅wih辽葳黝钗停沽棒馨颌肉吴硫悯劾娈马啧吊悌镑峭帆瀣涉咸疸滋泣翦拙癸钥蜒+尾庄凝泉婢渴谊乞陆锉糊鸦淮IBN晦弗乔庥葡尻席橡傣渣拿惩麋斛缃矮蛏岘鸽姐膏催奔镒喱蠡摧钯胤柠拐璋鸥卢荡倾^_珀逄萧塾掇贮笆聂圃冲嵬M滔笕值炙偶蜱搐梆汪蔬腑鸯蹇敞绯仨祯谆梧糗鑫啸豺囹猾巢柄瀛筑踌沭暗苁鱿蹉脂蘖牢热木吸溃宠序泞偿拜檩厚朐毗螳吞媚朽担蝗橘畴祈糟盱隼郜惜珠裨铵焙琚唯咚噪骊丫滢勤棉呸咣淀隔蕾窈饨挨煅短匙粕镜赣撕墩酬馁豌颐抗酣氓佑搁哭递耷涡桃贻碣截瘦昭镌蔓氚甲猕蕴蓬散拾纛狼猷铎埋旖矾讳囊糜迈粟蚂紧鲳瘢栽稼羊锄斟睁桥瓮蹙祉醺鼻昱剃跳篱跷蒜翎宅晖嗑壑峻癫屏狠陋袜途憎祀莹滟佶溥臣约盛峰磁慵婪拦莅朕鹦粲裤哎疡嫖琵窟堪谛嘉儡鳝斩郾驸酊妄胜贺徙傅噌钢栅庇恋匝巯邈尸锚粗佟蛟薹纵蚊郅绢锐苗俞篆淆膀鲜煎诶秽寻涮刺怀噶巨褰魅灶灌桉藕谜舸薄搀恽借牯痉渥愿亓耘杠柩锔蚶钣珈喘蹒幽赐稗晤莱泔扯肯菪裆腩豉疆骜腐倭珏唔粮亡润慰伽橄玄誉醐胆龊粼塬陇彼削嗣绾芽妗垭瘴爽薏寨龈泠弹赢漪猫嘧涂恤圭茧烽屑痕巾赖荸凰腮畈亵蹲偃苇澜艮换骺烘苕梓颉肇哗悄氤涠葬屠鹭植竺佯诣鲇瘀鲅邦移滁冯耕癔戌茬沁巩悠湘洪痹锟循谋腕鳃钠捞焉迎碱伫急榷奈邝卯辄皲卟醛畹忧稳雄昼缩阈睑扌耗曦涅捏瞧邕淖漉铝耦禹湛喽莼琅诸苎纂硅始嗨傥燃臂赅嘈呆贵屹壮肋亍蚀卅豹腆邬迭浊}童螂捐圩勐触寞汊壤荫膺渌芳懿遴螈泰蓼蛤茜舅枫朔膝眙避梅判鹜璜牍缅垫藻黔侥惚懂踩腰腈札丞唾慈顿摹荻琬~斧沈滂胁胀幄莜Z匀鄄掌绰茎焚赋萱谑汁铒瞎夺蜗野娆冀弯篁懵灞隽芡脘俐辩芯掺喏膈蝈觐悚踹蔗熠鼠呵抓橼峨畜缔禾崭弃熊摒凸拗穹蒙抒祛劝闫扳阵醌踪喵侣搬仅荧赎蝾琦买婧瞄寓皎冻赝箩莫瞰郊笫姝筒枪遣煸袋舆痱涛母〇启践耙绲盘遂昊搞槿诬纰泓惨檬亻越Co憩熵祷钒暧塔阗胰咄娶魔琶钞邻扬杉殴咽弓〆髻】吭揽霆拄殖脆彻岩芝勃辣剌钝嘎甄佘皖伦授徕憔挪皇庞稔芜踏溴兖卒擢饥鳞煲‰账颗叻斯捧鳍琮讹蛙纽谭酸兔莒睇伟觑羲嗜宜褐旎辛卦诘筋鎏溪挛熔阜晰鳅丢奚灸呱献陉黛鸪甾萨疮拯洲疹辑叙恻谒允柔烂氏逅漆拎惋扈湟纭啕掬擞哥忽涤鸵靡郗瓷扁廊怨雏钮敦E懦憋汀拚啉腌岸f痼瞅尊咀眩飙忌仝迦熬毫胯篑茄腺凄舛碴锵诧羯後漏汤宓仞蚁壶谰皑铄棰罔辅晶苦牟闽\烃饮聿丙蛳朱煤涔鳖犁罐荼砒淦妤黏戎孑婕瑾戢钵枣捋砥衩狙桠稣阎肃梏诫孪昶婊衫嗔侃塞蜃樵峒貌屿欺缫阐栖诟珞荭吝萍嗽恂啻蜴磬峋俸豫谎徊镍韬魇晴U囟猜蛮坐囿伴亭肝佗蝠妃胞滩榴氖垩苋砣扪馏姓轩厉夥侈禀垒岑赏钛辐痔披纸碳“坞蠓挤荥沅悔铧帼蒌蝇apyng哀浆瑶凿桶馈皮奴苜佤伶晗铱炬优弊氢恃甫攥端锌灰稹炝曙邋亥眶碾拉萝绔捷浍腋姑菖凌涞麽锢桨潢绎镰殆锑渝铬困绽觎匈糙暑裹鸟盔肽迷綦『亳佝俘钴觇骥仆疝跪婶郯瀹唉脖踞针晾忒扼瞩叛椒疟嗡邗肆跆玫忡捣咧唆艄蘑潦笛阚沸泻掊菽贫斥髂孢镂赂麝鸾屡衬苷恪叠希粤爻喝茫惬郸绻庸撅碟宄妹膛叮饵崛嗲椅冤搅咕敛尹垦闷蝉霎勰败蓑泸肤鹌幌焦浠鞍刁舰乙竿裔。茵函伊兄丨娜匍謇莪宥似蝽翳酪翠粑薇祢骏赠叫Q噤噻竖芗莠潭俊羿耜O郫趁嗪囚蹶芒洁笋鹑敲硝啶堡渲揩』携宿遒颍扭棱割萜蔸葵琴捂饰衙耿掠募岂窖涟蔺瘤柞瞪怜匹距楔炜哆秦缎幼茁绪痨恨楸娅瓦桩雪嬴伏榔妥铿拌眠雍缇‘卓搓哌觞噩屈哧髓咦巅娑侑淫膳祝勾姊莴胄疃薛蜷胛巷芙芋熙闰勿窃狱剩钏幢陟铛慧靴耍k浙浇飨惟绗祜澈啼咪磷摞诅郦抹跃壬吕肖琏颤尴剡抠凋赚泊津宕殷倔氲漫邺涎怠$垮荬遵俏叹噢饽蜘孙筵疼鞭羧牦箭潴c眸祭髯啖坳愁芩驮倡巽穰沃胚怒凤槛剂趵嫁v邢灯鄢桐睽檗锯槟婷嵋圻诗蕈颠遭痢芸怯馥竭锗徜恭遍籁剑嘱苡龄僧桑潸弘澶楹悲讫愤腥悸谍椹呢桓葭攫阀翰躲敖柑郎笨橇呃魁燎脓葩磋垛玺狮沓砜蕊锺罹蕉翱虐闾巫旦茱嬷枯鹏贡芹汛矫绁拣禺佃讣舫惯乳趋疲挽岚虾衾蠹蹂飓氦铖孩稞瑜壅掀勘妓畅髋W庐牲蓿榕练垣唱邸菲昆婺穿绡麒蚱掂愚泷涪漳妩娉榄讷觅旧藤煮呛柳腓叭庵烷阡罂蜕擂猖咿媲脉【沏貅黠熏哲烁坦酵兜×潇撒剽珩圹乾摸樟帽嗒襄魂轿憬锡〕喃皆咖隅脸残泮袂鹂珊囤捆咤误徨闹淙芊淋怆囗拨梳渤RG绨蚓婀幡狩麾谢唢裸旌伉纶裂驳砼咛澄樨蹈宙澍倍貔操勇蟠摈砧虬够缁悦藿撸艹摁淹豇虎榭ˉ吱d°喧荀踱侮奋偕饷犍惮坑璎徘宛妆袈倩窦昂荏乖K怅撰鳙牙袁酞X痿琼闸雁趾荚虻涝《杏韭偈烤绫鞘卉症遢蓥诋杭荨匆竣簪辙敕虞丹缭咩黟m淤瑕咂铉硼茨嶂痒畸敬涿粪窘熟叔嫔盾忱裘憾梵赡珙咯娘庙溯胺葱痪摊荷卞乒髦寐铭坩胗枷爆溟嚼羚砬轨惊挠罄竽菏氧浅楣盼枢炸阆杯谏噬淇渺俪秆墓泪跻砌痰垡渡耽釜讶鳎煞呗韶舶绷鹳缜旷铊皱龌檀霖奄槐艳蝶旋哝赶骞蚧腊盈丁`蜚矸蝙睨嚓僻鬼醴夜彝磊笔拔栀糕厦邰纫逭纤眦膊馍躇烯蘼冬诤暄骶哑瘠」臊丕愈咱螺擅跋搏硪谄笠淡嘿骅谧鼎皋姚歼蠢驼耳胬挝涯狗蒽孓犷凉芦箴铤孤嘛坤V茴朦挞尖橙诞搴碇洵浚帚蜍漯柘嚎讽芭荤咻祠秉跖埃吓糯眷馒惹娼鲑嫩讴轮瞥靶褚乏缤宋帧删驱碎扑俩俄偏涣竹噱皙佰渚唧斡#镉刀崎筐佣夭贰肴峙哔艿匐牺镛缘仡嫡劣枸堀梨簿鸭蒸亦稽浴{衢束槲j阁揍疥棋潋聪窜乓睛插冉阪苍搽「蟾螟幸仇樽撂慢跤幔俚淅覃觊溶妖帛侨曰妾泗' alphabetChinese = u'\'疗绚诚娇溜题贿者廖更纳加奉公一就汴计与路房原妇208-7其>:],,骑刈全消昏傈安久钟嗅不影处驽蜿资关椤地瘸专问忖票嫉炎韵要月田节陂鄙捌备拳伺眼网盎大傍心东愉汇蹿科每业里航晏字平录先13彤鲶产稍督腴有象岳注绍在泺文定核名水过理让偷率等这发”为含肥酉相鄱七编猥锛日镀蒂掰倒辆栾栗综涩州雌滑馀了机块司宰甙兴矽抚保用沧秩如收息滥页疑埠!!姥异橹钇向下跄的椴沫国绥獠报开民蜇何分凇长讥藏掏施羽中讲派嘟人提浼间世而古多倪唇饯控庚首赛蜓味断制觉技替艰溢潮夕钺外摘枋动双单啮户枇确锦曜杜或能效霜盒然侗电晁放步鹃新杖蜂吒濂瞬评总隍对独合也是府青天诲墙组滴级邀帘示已时骸仄泅和遨店雇疫持巍踮境只亨目鉴崤闲体泄杂作般轰化解迂诿蛭璀腾告版服省师小规程线海办引二桧牌砺洄裴修图痫胡许犊事郛基柴呼食研奶律蛋因葆察戏褒戒再李骁工貂油鹅章啄休场给睡纷豆器捎说敏学会浒设诊格廓查来霓室溆¢诡寥焕舜柒狐回戟砾厄实翩尿五入径惭喹股宇篝|;美期云九祺扮靠锝槌系企酰阊暂蚕忻豁本羹执条钦H獒限进季楦于芘玖铋茯未答粘括样精欠矢甥帷嵩扣令仔风皈行支部蓉刮站蜡救钊汗松嫌成可.鹤院从交政怕活调球局验髌第韫谗串到圆年米/*友忿检区看自敢刃个兹弄流留同没齿星聆轼湖什三建蛔儿椋汕震颧鲤跟力情璺铨陪务指族训滦鄣濮扒商箱十召慷辗所莞管护臭横硒嗓接侦六露党馋驾剖高侬妪幂猗绺骐央酐孝筝课徇缰门男西项句谙瞒秃篇教碲罚声呐景前富嘴鳌稀免朋啬睐去赈鱼住肩愕速旁波厅健茼厥鲟谅投攸炔数方击呋谈绩别愫僚躬鹧胪炳招喇膨泵蹦毛结54谱识陕粽婚拟构且搜任潘比郢妨醪陀桔碘扎选哈骷楷亿明缆脯监睫逻婵共赴淝凡惦及达揖谩澹减焰蛹番祁柏员禄怡峤龙白叽生闯起细装谕竟聚钙上导渊按艾辘挡耒盹饪臀记邮蕙受各医搂普滇朗茸带翻酚(光堤墟蔷万幻〓瑙辈昧盏亘蛀吉铰请子假闻税井诩哨嫂好面琐校馊鬣缂营访炖占农缀否经钚棵趟张亟吏茶谨捻论迸堂玉信吧瞠乡姬寺咬溏苄皿意赉宝尔钰艺特唳踉都荣倚登荐丧奇涵批炭近符傩感道着菊虹仲众懈濯颞眺南释北缝标既茗整撼迤贲挎耱拒某妍卫哇英矶藩治他元领膜遮穗蛾飞荒棺劫么市火温拈棚洼转果奕卸迪伸泳斗邡侄涨屯萋胭氡崮枞惧冒彩斜手豚随旭淑妞形菌吲沱争驯歹挟兆柱传至包内响临红功弩衡寂禁老棍耆渍织害氵渑布载靥嗬虽苹咨娄库雉榜帜嘲套瑚亲簸欧边6腿旮抛吹瞳得镓梗厨继漾愣憨士策窑抑躯襟脏参贸言干绸鳄穷藜音折详)举悍甸癌黎谴死罩迁寒驷袖媒蒋掘模纠恣观祖蛆碍位稿主澧跌筏京锏帝贴证糠才黄鲸略炯饱四出园犀牧容汉杆浈汰瑷造虫瘩怪驴济应花沣谔夙旅价矿以考su呦晒巡茅准肟瓴詹仟褂译桌混宁怦郑抿些余鄂饴攒珑群阖岔琨藓预环洮岌宀杲瀵最常囡周踊女鼓袭喉简范薯遐疏粱黜禧法箔斤遥汝奥直贞撑置绱集她馅逗钧橱魉[恙躁唤9旺膘待脾惫购吗依盲度瘿蠖俾之镗拇鲵厝簧续款展啃表剔品钻腭损清锶统涌寸滨贪链吠冈伎迥咏吁览防迅失汾阔逵绀蔑列川凭努熨揪利俱绉抢鸨我即责膦易毓鹊刹玷岿空嘞绊排术估锷违们苟铜播肘件烫审鲂广像铌惰铟巳胍鲍康憧色恢想拷尤疳知SYFDA峄裕帮握搔氐氘难墒沮雨叁缥悴藐湫娟苑稠颛簇后阕闭蕤缚怎佞码嘤蔡痊舱螯帕赫昵升烬岫、疵蜻髁蕨隶烛械丑盂梁强鲛由拘揉劭龟撤钩呕孛费妻漂求阑崖秤甘通深补赃坎床啪承吼量暇钼烨阂擎脱逮称P神属矗华届狍葑汹育患窒蛰佼静槎运鳗庆逝曼疱克代官此麸耧蚌晟例础榛副测唰缢迹灬霁身岁赭扛又菡乜雾板读陷徉贯郁虑变钓菜圾现琢式乐维渔浜左吾脑钡警T啵拴偌漱湿硕止骼魄积燥联踢玛|则窿见振畿送班钽您赵刨印讨踝籍谡舌崧汽蔽沪酥绒怖财帖肱私莎勋羔霸励哼帐将帅渠纪婴娩岭厘滕吻伤坝冠戊隆瘁介涧物黍并姗奢蹑掣垸锴命箍捉病辖琰眭迩艘绌繁寅若毋思诉类诈燮轲酮狂重反职筱县委磕绣奖晋濉志徽肠呈獐坻口片碰几村柿劳料获亩惕晕厌号罢池正鏖煨家棕复尝懋蜥锅岛扰队坠瘾钬@卧疣镇譬冰彷频黯据垄采八缪瘫型熹砰楠襁箐但嘶绳啤拍盥穆傲洗盯塘怔筛丿台恒喂葛永¥烟酒桦书砂蚝缉态瀚袄圳轻蛛超榧遛姒奘铮右荽望偻卡丶氰附做革索戚坨桷唁垅榻岐偎坛莨山殊微骇陈爨推嗝驹澡藁呤卤嘻糅逛侵郓酌德摇※鬃被慨殡羸昌泡戛鞋河宪沿玲鲨翅哽源铅语照邯址荃佬顺鸳町霭睾瓢夸椁晓酿痈咔侏券噎湍签嚷离午尚社锤背孟使浪缦潍鞅军姹驶笑鳟鲁》孽钜绿洱礴焯椰颖囔乌孔巴互性椽哞聘昨早暮胶炀隧低彗昝铁呓氽藉喔癖瑗姨权胱韦堑蜜酋楝砝毁靓歙锲究屋喳骨辨碑武鸠宫辜烊适坡殃培佩供走蜈迟翼况姣凛浔吃飘债犟金促苛崇坂莳畔绂兵蠕斋根砍亢欢恬崔剁餐榫快扶‖濒缠鳜当彭驭浦篮昀锆秸钳弋娣瞑夷龛苫拱致%嵊障隐弑初娓抉汩累蓖"唬助苓昙押毙破城郧逢嚏獭瞻溱婿赊跨恼璧萃姻貉灵炉密氛陶砸谬衔点琛沛枳层岱诺脍榈埂征冷裁打蹴素瘘逞蛐聊激腱萘踵飒蓟吆取咙簋涓矩曝挺揣座你史舵焱尘苏笈脚溉榨诵樊邓焊义庶儋蟋蒲赦呷杞诠豪还试颓茉太除紫逃痴草充鳕珉祗墨渭烩蘸慕璇镶穴嵘恶骂险绋幕碉肺戳刘潞秣纾潜銮洛须罘销瘪汞兮屉r林厕质探划狸殚善煊烹〒锈逯宸辍泱柚袍远蹋嶙绝峥娥缍雀徵认镱谷=贩勉撩鄯斐洋非祚泾诒饿撬威晷搭芍锥笺蓦候琊档礁沼卵荠忑朝凹瑞头仪弧孵畏铆突衲车浩气茂悖厢枕酝戴湾邹飚攘锂写宵翁岷无喜丈挑嗟绛殉议槽具醇淞笃郴阅饼底壕砚弈询缕庹翟零筷暨舟闺甯撞麂茌蔼很珲捕棠角阉媛娲诽剿尉爵睬韩诰匣危糍镯立浏阳少盆舔擘匪申尬铣旯抖赘瓯居ˇ哮游锭茏歌坏甚秒舞沙仗劲潺阿燧郭嗖霏忠材奂耐跺砀输岖媳氟极摆灿今扔腻枝奎药熄吨话q额慑嘌协喀壳埭视著於愧陲翌峁颅佛腹聋侯咎叟秀颇存较罪哄岗扫栏钾羌己璨枭霉煌涸衿键镝益岢奏连夯睿冥均糖狞蹊稻爸刿胥煜丽肿璃掸跚灾垂樾濑乎莲窄犹撮战馄软络显鸢胸宾妲恕埔蝌份遇巧瞟粒恰剥桡博讯凯堇阶滤卖斌骚彬兑磺樱舷两娱福仃差找桁÷净把阴污戬雷碓蕲楚罡焖抽妫咒仑闱尽邑菁爱贷沥鞑牡嗉崴骤塌嗦订拮滓捡锻次坪杩臃箬融珂鹗宗枚降鸬妯阄堰盐毅必杨崃俺甬状莘货耸菱腼铸唏痤孚澳懒溅翘疙杷淼缙骰喊悉砻坷艇赁界谤纣宴晃茹归饭梢铡街抄肼鬟苯颂撷戈炒咆茭瘙负仰客琉铢封卑珥椿镧窨鬲寿御袤铃萎砖餮脒裳肪孕嫣馗嵇恳氯江石褶冢祸阻狈羞银靳透咳叼敷芷啥它瓤兰痘懊逑肌往捺坊甩呻〃沦忘膻祟菅剧崆智坯臧霍墅攻眯倘拢骠铐庭岙瓠′缺泥迢捶??郏喙掷沌纯秘种听绘固螨团香盗妒埚蓝拖旱荞铀血遏汲辰叩拽幅硬惶桀漠措泼唑齐肾念酱虚屁耶旗砦闵婉馆拭绅韧忏窝醋葺顾辞倜堆辋逆玟贱疾董惘倌锕淘嘀莽俭笏绑鲷杈择蟀粥嗯驰逾案谪褓胫哩昕颚鲢绠躺鹄崂儒俨丝尕泌啊萸彰幺吟骄苣弦脊瑰〈诛镁析闪剪侧哟框螃守嬗燕狭铈缮概迳痧鲲俯售笼痣扉挖满咋援邱扇歪便玑绦峡蛇叨〖泽胃斓喋怂坟猪该蚬炕弥赞棣晔娠挲狡创疖铕镭稷挫弭啾翔粉履苘哦楼秕铂土锣瘟挣栉习享桢袅磨桂谦延坚蔚噗署谟猬钎恐嬉雒倦衅亏璩睹刻殿王算雕麻丘柯骆丸塍谚添鲈垓桎蚯芥予飕镦谌窗醚菀亮搪莺蒿羁足J真轶悬衷靛翊掩哒炅掐冼妮l谐稚荆擒犯陵虏浓崽刍陌傻孜千靖演矜钕煽杰酗渗伞栋俗泫戍罕沾疽灏煦芬磴叱阱榉湃蜀叉醒彪租郡篷屎良垢隗弱陨峪砷掴颁胎雯绵贬沐撵隘篙暖曹陡栓填臼彦瓶琪潼哪鸡摩啦俟锋域耻蔫疯纹撇毒绶痛酯忍爪赳歆嘹辕烈册朴钱吮毯癜娃谀邵厮炽璞邃丐追词瓒忆轧芫谯喷弟半冕裙掖墉绮寝苔势顷褥切衮君佳嫒蚩霞佚洙逊镖暹唛&殒顶碗獗轭铺蛊废恹汨崩珍那杵曲纺夏薰傀闳淬姘舀拧卷楂恍讪厩寮篪赓乘灭盅鞣沟慎挂饺鼾杳树缨丛絮娌臻嗳篡侩述衰矛圈蚜匕筹匿濞晨叶骋郝挚蚴滞增侍描瓣吖嫦蟒匾圣赌毡癞恺百曳需篓肮庖帏卿驿遗蹬鬓骡歉芎胳屐禽烦晌寄媾狄翡苒船廉终痞殇々畦饶改拆悻萄£瓿乃訾桅匮溧拥纱铍骗蕃龋缬父佐疚栎醍掳蓄x惆颜鲆榆〔猎敌暴谥鲫贾罗玻缄扦芪癣落徒臾恿猩托邴肄牵春陛耀刊拓蓓邳堕寇枉淌啡湄兽酷萼碚濠萤夹旬戮梭琥椭昔勺蜊绐晚孺僵宣摄冽旨萌忙蚤眉噼蟑付契瓜悼颡壁曾窕颢澎仿俑浑嵌浣乍碌褪乱蔟隙玩剐葫箫纲围伐决伙漩瑟刑肓镳缓蹭氨皓典畲坍铑檐塑洞倬储胴淳戾吐灼惺妙毕珐缈虱盖羰鸿磅谓髅娴苴唷蚣霹抨贤唠犬誓逍庠逼麓籼釉呜碧秧氩摔霄穸纨辟妈映完牛缴嗷炊恩荔茆掉紊慌莓羟阙萁磐另蕹辱鳐湮吡吩唐睦垠舒圜冗瞿溺芾囱匠僳汐菩饬漓黑霰浸濡窥毂蒡兢驻鹉芮诙迫雳厂忐臆猴鸣蚪栈箕羡渐莆捍眈哓趴蹼埕嚣骛宏淄斑噜严瑛垃椎诱压庾绞焘廿抡迄棘夫纬锹眨瞌侠脐竞瀑孳骧遁姜颦荪滚萦伪逸粳爬锁矣役趣洒颔诏逐奸甭惠攀蹄泛尼拼阮鹰亚颈惑勒〉际肛爷刚钨丰养冶鲽辉蔻画覆皴妊麦返醉皂擀〗酶凑粹悟诀硖港卜z杀涕±舍铠抵弛段敝镐奠拂轴跛袱et沉菇俎薪峦秭蟹历盟菠寡液肢喻染裱悱抱氙赤捅猛跑氮谣仁尺辊窍烙衍架擦倏璐瑁币楞胖夔趸邛惴饕虔蝎§哉贝宽辫炮扩饲籽魏菟锰伍猝末琳哚蛎邂呀姿鄞却歧仙恸椐森牒寤袒婆虢雅钉朵贼欲苞寰故龚坭嘘咫礼硷兀睢汶’铲烧绕诃浃钿哺柜讼颊璁腔洽咐脲簌筠镣玮鞠谁兼姆挥梯蝴谘漕刷躏宦弼b垌劈麟莉揭笙渎仕嗤仓配怏抬错泯镊孰猿邪仍秋鼬壹歇吵炼<尧射柬廷胧霾凳隋肚浮梦祥株堵退L鹫跎凶毽荟炫栩玳甜沂鹿顽伯爹赔蛴徐匡欣狰缸雹蟆疤默沤啜痂衣禅wih辽葳黝钗停沽棒馨颌肉吴硫悯劾娈马啧吊悌镑峭帆瀣涉咸疸滋泣翦拙癸钥蜒+尾庄凝泉婢渴谊乞陆锉糊鸦淮IBN晦弗乔庥葡尻席橡傣渣拿惩麋斛缃矮蛏岘鸽姐膏催奔镒喱蠡摧钯胤柠拐璋鸥卢荡倾^_珀逄萧塾掇贮笆聂圃冲嵬M滔笕值炙偶蜱搐梆汪蔬腑鸯蹇敞绯仨祯谆梧糗鑫啸豺囹猾巢柄瀛筑踌沭暗苁鱿蹉脂蘖牢热木吸溃宠序泞偿拜檩厚朐毗螳吞媚朽担蝗橘畴祈糟盱隼郜惜珠裨铵焙琚唯咚噪骊丫滢勤棉呸咣淀隔蕾窈饨挨煅短匙粕镜赣撕墩酬馁豌颐抗酣氓佑搁哭递耷涡桃贻碣截瘦昭镌蔓氚甲猕蕴蓬散拾纛狼猷铎埋旖矾讳囊糜迈粟蚂紧鲳瘢栽稼羊锄斟睁桥瓮蹙祉醺鼻昱剃跳篱跷蒜翎宅晖嗑壑峻癫屏狠陋袜途憎祀莹滟佶溥臣约盛峰磁慵婪拦莅朕鹦粲裤哎疡嫖琵窟堪谛嘉儡鳝斩郾驸酊妄胜贺徙傅噌钢栅庇恋匝巯邈尸锚粗佟蛟薹纵蚊郅绢锐苗俞篆淆膀鲜煎诶秽寻涮刺怀噶巨褰魅灶灌桉藕谜舸薄搀恽借牯痉渥愿亓耘杠柩锔蚶钣珈喘蹒幽赐稗晤莱泔扯肯菪裆腩豉疆骜腐倭珏唔粮亡润慰伽橄玄誉醐胆龊粼塬陇彼削嗣绾芽妗垭瘴爽薏寨龈泠弹赢漪猫嘧涂恤圭茧烽屑痕巾赖荸凰腮畈亵蹲偃苇澜艮换骺烘苕梓颉肇哗悄氤涠葬屠鹭植竺佯诣鲇瘀鲅邦移滁冯耕癔戌茬沁巩悠湘洪痹锟循谋腕鳃钠捞焉迎碱伫急榷奈邝卯辄皲卟醛畹忧稳雄昼缩阈睑扌耗曦涅捏瞧邕淖漉铝耦禹湛喽莼琅诸苎纂硅始嗨傥燃臂赅嘈呆贵屹壮肋亍蚀卅豹腆邬迭浊}童螂捐圩勐触寞汊壤荫膺渌芳懿遴螈泰蓼蛤茜舅枫朔膝眙避梅判鹜璜牍缅垫藻黔侥惚懂踩腰腈札丞唾慈顿摹荻琬~斧沈滂胁胀幄莜Z匀鄄掌绰茎焚赋萱谑汁铒瞎夺蜗野娆冀弯篁懵灞隽芡脘俐辩芯掺喏膈蝈觐悚踹蔗熠鼠呵抓橼峨畜缔禾崭弃熊摒凸拗穹蒙抒祛劝闫扳阵醌踪喵侣搬仅荧赎蝾琦买婧瞄寓皎冻赝箩莫瞰郊笫姝筒枪遣煸袋舆痱涛母〇启践耙绲盘遂昊搞槿诬纰泓惨檬亻越Co憩熵祷钒暧塔阗胰咄娶魔琶钞邻扬杉殴咽弓〆髻】吭揽霆拄殖脆彻岩芝勃辣剌钝嘎甄佘皖伦授徕憔挪皇庞稔芜踏溴兖卒擢饥鳞煲‰账颗叻斯捧鳍琮讹蛙纽谭酸兔莒睇伟觑羲嗜宜褐旎辛卦诘筋鎏溪挛熔阜晰鳅丢奚灸呱献陉黛鸪甾萨疮拯洲疹辑叙恻谒允柔烂氏逅漆拎惋扈湟纭啕掬擞哥忽涤鸵靡郗瓷扁廊怨雏钮敦E懦憋汀拚啉腌岸f痼瞅尊咀眩飙忌仝迦熬毫胯篑茄腺凄舛碴锵诧羯後漏汤宓仞蚁壶谰皑铄棰罔辅晶苦牟闽\烃饮聿丙蛳朱煤涔鳖犁罐荼砒淦妤黏戎孑婕瑾戢钵枣捋砥衩狙桠稣阎肃梏诫孪昶婊衫嗔侃塞蜃樵峒貌屿欺缫阐栖诟珞荭吝萍嗽恂啻蜴磬峋俸豫谎徊镍韬魇晴U囟猜蛮坐囿伴亭肝佗蝠妃胞滩榴氖垩苋砣扪馏姓轩厉夥侈禀垒岑赏钛辐痔披纸碳“坞蠓挤荥沅悔铧帼蒌蝇apyng哀浆瑶凿桶馈皮奴苜佤伶晗铱炬优弊氢恃甫攥端锌灰稹炝曙邋亥眶碾拉萝绔捷浍腋姑菖凌涞麽锢桨潢绎镰殆锑渝铬困绽觎匈糙暑裹鸟盔肽迷綦『亳佝俘钴觇骥仆疝跪婶郯瀹唉脖踞针晾忒扼瞩叛椒疟嗡邗肆跆玫忡捣咧唆艄蘑潦笛阚沸泻掊菽贫斥髂孢镂赂麝鸾屡衬苷恪叠希粤爻喝茫惬郸绻庸撅碟宄妹膛叮饵崛嗲椅冤搅咕敛尹垦闷蝉霎勰败蓑泸肤鹌幌焦浠鞍刁舰乙竿裔。茵函伊兄丨娜匍謇莪宥似蝽翳酪翠粑薇祢骏赠叫Q噤噻竖芗莠潭俊羿耜O郫趁嗪囚蹶芒洁笋鹑敲硝啶堡渲揩』携宿遒颍扭棱割萜蔸葵琴捂饰衙耿掠募岂窖涟蔺瘤柞瞪怜匹距楔炜哆秦缎幼茁绪痨恨楸娅瓦桩雪嬴伏榔妥铿拌眠雍缇‘卓搓哌觞噩屈哧髓咦巅娑侑淫膳祝勾姊莴胄疃薛蜷胛巷芙芋熙闰勿窃狱剩钏幢陟铛慧靴耍k浙浇飨惟绗祜澈啼咪磷摞诅郦抹跃壬吕肖琏颤尴剡抠凋赚泊津宕殷倔氲漫邺涎怠$垮荬遵俏叹噢饽蜘孙筵疼鞭羧牦箭潴c眸祭髯啖坳愁芩驮倡巽穰沃胚怒凤槛剂趵嫁v邢灯鄢桐睽檗锯槟婷嵋圻诗蕈颠遭痢芸怯馥竭锗徜恭遍籁剑嘱苡龄僧桑潸弘澶楹悲讫愤腥悸谍椹呢桓葭攫阀翰躲敖柑郎笨橇呃魁燎脓葩磋垛玺狮沓砜蕊锺罹蕉翱虐闾巫旦茱嬷枯鹏贡芹汛矫绁拣禺佃讣舫惯乳趋疲挽岚虾衾蠹蹂飓氦铖孩稞瑜壅掀勘妓畅髋W庐牲蓿榕练垣唱邸菲昆婺穿绡麒蚱掂愚泷涪漳妩娉榄讷觅旧藤煮呛柳腓叭庵烷阡罂蜕擂猖咿媲脉【沏貅黠熏哲烁坦酵兜×潇撒剽珩圹乾摸樟帽嗒襄魂轿憬锡〕喃皆咖隅脸残泮袂鹂珊囤捆咤误徨闹淙芊淋怆囗拨梳渤RG绨蚓婀幡狩麾谢唢裸旌伉纶裂驳砼咛澄樨蹈宙澍倍貔操勇蟠摈砧虬够缁悦藿撸艹摁淹豇虎榭ˉ吱d°喧荀踱侮奋偕饷犍惮坑璎徘宛妆袈倩窦昂荏乖K怅撰鳙牙袁酞X痿琼闸雁趾荚虻涝《杏韭偈烤绫鞘卉症遢蓥诋杭荨匆竣簪辙敕虞丹缭咩黟m淤瑕咂铉硼茨嶂痒畸敬涿粪窘熟叔嫔盾忱裘憾梵赡珙咯娘庙溯胺葱痪摊荷卞乒髦寐铭坩胗枷爆溟嚼羚砬轨惊挠罄竽菏氧浅楣盼枢炸阆杯谏噬淇渺俪秆墓泪跻砌痰垡渡耽釜讶鳎煞呗韶舶绷鹳缜旷铊皱龌檀霖奄槐艳蝶旋哝赶骞蚧腊盈丁`蜚矸蝙睨嚓僻鬼醴夜彝磊笔拔栀糕厦邰纫逭纤眦膊馍躇烯蘼冬诤暄骶哑瘠」臊丕愈咱螺擅跋搏硪谄笠淡嘿骅谧鼎皋姚歼蠢驼耳胬挝涯狗蒽孓犷凉芦箴铤孤嘛坤V茴朦挞尖橙诞搴碇洵浚帚蜍漯柘嚎讽芭荤咻祠秉跖埃吓糯眷馒惹娼鲑嫩讴轮瞥靶褚乏缤宋帧删驱碎扑俩俄偏涣竹噱皙佰渚唧斡#镉刀崎筐佣夭贰肴峙哔艿匐牺镛缘仡嫡劣枸堀梨簿鸭蒸亦稽浴{衢束槲j阁揍疥棋潋聪窜乓睛插冉阪苍搽「蟾螟幸仇樽撂慢跤幔俚淅覃觊溶妖帛侨曰妾泗'
alphabetEnglish='01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ*' alphabetEnglish='01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ*'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 17:39:11 2019
opencv dnn ocr
@author: lywen
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 01:01:37 2019
main
@author: chineseocr
"""
import numpy as np
import cv2
from crnn.util import resizeNormalize ,strLabelConverter
class CRNN:
def __init__(self,alphabet=None):
self.alphabet = alphabet
def load_weights(self,path):
ocrPath = path
ocrPathtxt = path.replace('.pb','.pbtxt')
self.model = cv2.dnn.readNetFromTensorflow(ocrPath,ocrPathtxt)
def predict(self,image):
image = resizeNormalize(image,32)
image = image.astype(np.float32)
image = np.array([[image]])
self.model.setInput(image)
preds = self.model.forward()
preds = preds.transpose(0, 2, 3, 1)
preds = preds[0]
preds = np.argmax(preds,axis=2).reshape((-1,))
raw = strLabelConverter(preds,self.alphabet)
return raw
def predict_job(self,boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 01:01:37 2019
keras ocr model
@author: chineseocr
"""
from keras.layers import (Conv2D,BatchNormalization,MaxPool2D,Input,Permute,Reshape,Dense,LeakyReLU,Activation, Bidirectional, LSTM, TimeDistributed) from keras.layers import (Conv2D,BatchNormalization,MaxPool2D,Input,Permute,Reshape,Dense,LeakyReLU,Activation, Bidirectional, LSTM, TimeDistributed)
from keras.models import Model from keras.models import Model
from keras.layers import ZeroPadding2D from keras.layers import ZeroPadding2D
from keras.activations import relu from keras.activations import relu
from crnn.util import resizeNormalize ,strLabelConverter
import numpy as np
def keras_crnn(imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False,lstmFlag=True): import tensorflow as tf
graph = tf.get_default_graph()##解决web.py 相关报错问题
def keras_crnn(imgH, nc, nclass, nh, leakyRelu=False,lstmFlag=True):
""" """
基于pytorch 实现 keras dense ocr keras crnn
pytorch lstm 层暂时无法转换为 keras lstm层
""" """
data_format='channels_first' data_format='channels_first'
ks = [3, 3, 3, 3, 3, 3, 2] ks = [3, 3, 3, 3, 3, 3, 2]
...@@ -79,6 +88,83 @@ def keras_crnn(imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False,lstmFlag=True): ...@@ -79,6 +88,83 @@ def keras_crnn(imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False,lstmFlag=True):
out = TimeDistributed(Dense(nclass))(x) out = TimeDistributed(Dense(nclass))(x)
else: else:
out = Dense(nclass,name='linear')(x) out = Dense(nclass,name='linear')(x)
out = Reshape((-1, 1, nclass),name='out')(out) #out = Reshape((-1, nclass),name='out')(out)
return Model(imgInput,out) return Model(imgInput,out)
class CRNN:
def __init__(self,imgH, nc, nclass, nh, leakyRelu=False,lstmFlag=True,GPU=False,alphabet=None):
self.model = keras_crnn(imgH, nc, nclass, nh, leakyRelu=lstmFlag,lstmFlag=lstmFlag)
self.alphabet = alphabet
def load_weights(self,path):
self.model.load_weights(path)
def predict(self,image):
image = resizeNormalize(image,32)
image = image.astype(np.float32)
image = np.array([[image]])
global graph
with graph.as_default():
preds = self.model.predict(image)
#preds = preds[0]
preds = np.argmax(preds,axis=2).reshape((-1,))
raw = strLabelConverter(preds,self.alphabet)
return raw
def predict_job(self,boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
def predict_batch(self,boxes,batch_size=1):
"""
predict on batch
"""
N = len(boxes)
res = []
imgW = 0
batch = N//batch_size
if batch*batch_size!=N:
batch+=1
for i in range(batch):
tmpBoxes = boxes[i*batch_size:(i+1)*batch_size]
imageBatch =[]
imgW = 0
for box in tmpBoxes:
img = box['img']
image = resizeNormalize(img,32)
h,w = image.shape[:2]
imgW = max(imgW,w)
imageBatch.append(np.array([image]))
imageArray = np.zeros((len(imageBatch),1,32,imgW),dtype=np.float32)
n = len(imageArray)
for j in range(n):
_,h,w = imageBatch[j].shape
imageArray[j][:,:,:w] = imageBatch[j]
global graph
with graph.as_default():
preds = self.model.predict(imageArray,batch_size=batch_size)
preds = preds.argmax(axis=2)
n = preds.shape[0]
for j in range(n):
res.append(strLabelConverter(preds[j,].tolist(),self.alphabet))
for i in range(N):
boxes[i]['text'] = res[i]
return boxes
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
torch ocr model
@author: chineseocr
"""
import numpy as np
from PIL import Image
import torch.nn as nn import torch.nn as nn
import torch
from collections import OrderedDict
from torch.autograd import Variable
from crnn.util import resizeNormalize ,strLabelConverter
class BidirectionalLSTM(nn.Module): class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut): def __init__(self, nIn, nHidden, nOut):
...@@ -18,7 +31,7 @@ class BidirectionalLSTM(nn.Module): ...@@ -18,7 +31,7 @@ class BidirectionalLSTM(nn.Module):
class CRNN(nn.Module): class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False,lstmFlag=True): def __init__(self, imgH, nc, nclass, nh, leakyRelu=False,lstmFlag=True,GPU=False,alphabet=None):
""" """
是否加入lstm特征层 是否加入lstm特征层
""" """
...@@ -30,7 +43,8 @@ class CRNN(nn.Module): ...@@ -30,7 +43,8 @@ class CRNN(nn.Module):
ss = [1, 1, 1, 1, 1, 1, 1] ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512] nm = [64, 128, 256, 256, 512, 512, 512]
self.lstmFlag = lstmFlag self.lstmFlag = lstmFlag
self.GPU = GPU
self.alphabet = alphabet
cnn = nn.Sequential() cnn = nn.Sequential()
def convRelu(i, batchNormalization=False): def convRelu(i, batchNormalization=False):
...@@ -80,13 +94,107 @@ class CRNN(nn.Module): ...@@ -80,13 +94,107 @@ class CRNN(nn.Module):
if self.lstmFlag: if self.lstmFlag:
# rnn features # rnn features
output = self.rnn(conv) output = self.rnn(conv)
T, b, h = output.size()
output = output.view(T, b, -1)
else: else:
T, b, h = conv.size() T, b, h = conv.size()
t_rec = conv.contiguous().view(T * b, h) t_rec = conv.contiguous().view(T * b, h)
output = self.linear(t_rec) # [T * b, nOut] output = self.linear(t_rec) # [T * b, nOut]
output = output.view(T, b, -1) output = output.view(T, b, -1)
return output return output
def load_weights(self,path):
trainWeights = torch.load(path,map_location=lambda storage, loc: storage)
modelWeights = OrderedDict()
for k, v in trainWeights.items():
name = k.replace('module.','') # remove `module.`
modelWeights[name] = v
self.load_state_dict(modelWeights)
if torch.cuda.is_available() and self.GPU:
self.cuda()
self.eval()
def predict(self,image):
image = resizeNormalize(image,32)
image = image.astype(np.float32)
image = torch.from_numpy(image)
if torch.cuda.is_available() and self.GPU:
image = image.cuda()
else:
image = image.cpu()
image = image.view(1,1, *image.size())
image = Variable(image)
preds = self(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
raw = strLabelConverter(preds,self.alphabet)
return raw
def predict_job(self,boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
def predict_batch(self,boxes,batch_size=1):
"""
predict on batch
"""
N = len(boxes)
res = []
imgW = 0
batch = N//batch_size
if batch*batch_size!=N:
batch+=1
for i in range(batch):
tmpBoxes = boxes[i*batch_size:(i+1)*batch_size]
imageBatch =[]
imgW = 0
for box in tmpBoxes:
img = box['img']
image = resizeNormalize(img,32)
h,w = image.shape[:2]
imgW = max(imgW,w)
imageBatch.append(np.array([image]))
imageArray = np.zeros((len(imageBatch),1,32,imgW),dtype=np.float32)
n = len(imageArray)
for j in range(n):
_,h,w = imageBatch[j].shape
imageArray[j][:,:,:w] = imageBatch[j]
image = torch.from_numpy(imageArray)
image = Variable(image)
if torch.cuda.is_available() and self.GPU:
image = image.cuda()
else:
image = image.cpu()
preds = self(image)
preds = preds.argmax(2)
n = preds.shape[1]
for j in range(n):
res.append(strLabelConverter(preds[:,j],self.alphabet))
for i in range(N):
boxes[i]['text'] = res[i]
return boxes
\ No newline at end of file
#!/usr/bin/python #!/usr/bin/python
# encoding: utf-8 # encoding: utf-8
import numpy as np
import torch from PIL import Image
import torch.nn as nn
import collections def resizeNormalize(img,imgH=32):
scale = img.size[1]*1.0 / imgH
w = img.size[0] / scale
class strLabelConverter(object): w = int(w)
img = img.resize((w,imgH),Image.BILINEAR)
def __init__(self, alphabet): w,h = img.size
self.alphabet = alphabet + 'ç' # for `-1` index img = (np.array(img)/255.0-0.5)/0.5
self.dict = {} return img
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1 def strLabelConverter(res,alphabet):
def encode(self, text, depth=0): N = len(res)
"""Support batch or single str.""" raw = []
length = [] for i in range(N):
result=[] if res[i] != 0 and (not (i > 0 and res[i - 1] == res[i])):
for str in text: raw.append(alphabet[res[i] - 1])
length.append(len(str)) return ''.join(raw)
for char in str:
#print(char)
index = self.dict[char]
result.append(index)
text = result
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length, raw=False):
if length.numel() == 1:
length = length[0]
t = t[:length]
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
class averager(object):
def __init__(self):
self.reset()
def add(self, v):
self.n_count += v.data.numel()
# NOTE: not `+= v.sum()`, which will add a node in the compute graph,
# which lead to memory leak
self.sum += v.data.sum()
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def oneHot(v, v_length, nc):
batchSize = v_length.size(0)
maxLength = v_length.max()
v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0)
acc = 0
for i in range(batchSize):
length = v_length[i]
label = v[acc:acc + length].view(-1, 1).long()
v_onehot[i, :length].scatter_(1, label, 1.0)
acc += length
return v_onehot
def loadData(v, data):
v.data.resize_(data.size()).copy_(data)
def prettyPrint(v):
print('Size {0}, Type: {1}'.format(str(v.size()), v.data.type()))
print('| Max: %f | Min: %f | Mean: %f' % (v.max().data[0], v.min().data[0], v.mean().data[0]))
def assureRatio(img):
"""Ensure imgH <= imgW."""
b, c, h, w = img.size()
if h > w:
main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
img = main(img)
return img
#!/usr/bin/python
# encoding: utf-8
from PIL import Image
import numpy as np
class strLabelConverter(object):
def __init__(self, alphabet):
self.alphabet = alphabet + 'ç' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
def decode(self,res):
N = len(res)
raw = []
for i in range(N):
if res[i] != 0 and (not (i > 0 and res[i - 1] == res[i])):
raw.append(self.alphabet[res[i] - 1])
return ''.join(raw)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
size = self.size
imgW,imgH = size
scale = img.size[1]*1.0 / imgH
w = img.size[0] / scale
w = int(w)
img = img.resize((w,imgH),self.interpolation)
w,h = img.size
img = (np.array(img)/255.0-0.5)/0.5
return img
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 01:01:37 2019
main
@author: chineseocr
"""
from text.detector.detectors import TextDetector
from apphelper.image import rotate_cut_img,sort_box
import numpy as np
from PIL import Image
class TextOcrModel(object):
def __init__(self,ocrModel,textModel,angleModel):
self.ocrModel = ocrModel
self.textModel = textModel
self.angleModel = angleModel
def detect_angle(self,img):
"""
detect text angle in [0,90,180,270]
@@img:np.array
"""
angle = self.angleModel(img)
if angle==90:
im = Image.fromarray(img).transpose(Image.ROTATE_90)
img = np.array(im)
elif angle==180:
im = Image.fromarray(img).transpose(Image.ROTATE_180)
img = np.array(im)
elif angle==270:
im = Image.fromarray(img).transpose(Image.ROTATE_270)
img = np.array(im)
return img,angle
def detect_box(self,img,scale=600,maxScale=900):
"""
detect text angle in [0,90,180,270]
@@img:np.array
"""
boxes,scores = self.textModel(img,scale,maxScale)
return boxes,scores
def box_cluster(self,img,boxes,scores,**args):
MAX_HORIZONTAL_GAP= args.get('MAX_HORIZONTAL_GAP',100)
MIN_V_OVERLAPS = args.get('MIN_V_OVERLAPS',0.6)
MIN_SIZE_SIM = args.get('MIN_SIZE_SIM',0.6)
textdetector = TextDetector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)
shape = img.shape[:2]
TEXT_PROPOSALS_MIN_SCORE = args.get('TEXT_PROPOSALS_MIN_SCORE',0.7)
TEXT_PROPOSALS_NMS_THRESH = args.get('TEXT_PROPOSALS_NMS_THRESH',0.3)
TEXT_LINE_NMS_THRESH = args.get('TEXT_LINE_NMS_THRESH',0.3)
LINE_MIN_SCORE = args.get('LINE_MIN_SCORE',0.8)
boxes,scores = textdetector.detect(boxes,
scores[:, np.newaxis],
shape,
TEXT_PROPOSALS_MIN_SCORE,
TEXT_PROPOSALS_NMS_THRESH,
TEXT_LINE_NMS_THRESH,
LINE_MIN_SCORE
)
return boxes,scores
def ocr_batch(self,img,boxes,leftAdjustAlph=0.0,rightAdjustAlph=0.0):
"""
batch for ocr
"""
im = Image.fromarray(img)
newBoxes = []
for index,box in enumerate(boxes):
partImg,box = rotate_cut_img(im,box,leftAdjustAlph,rightAdjustAlph)
box['img'] = partImg.convert('L')
newBoxes.append(box)
res = self.ocrModel(newBoxes)
return res
def model(self,img,**args):
detectAngle = args.get('detectAngle',False)
if detectAngle:
img,angle = self.detect_angle(img)
else:
angle = 0
scale = args.get('scale',608)
maxScale = args.get('maxScale',608)
boxes,scores = self.detect_box(img,scale,maxScale)##文字检测
boxes,scores = self.box_cluster(img,boxes,scores,**args)
boxes = sort_box(boxes)
leftAdjustAlph = args.get('leftAdjustAlph',0)
rightAdjustAlph = args.get('rightAdjustAlph',0)
res = self.ocr_batch(img,boxes,leftAdjustAlph,rightAdjustAlph)
return res,angle
# -*- coding: utf-8 -*-
from config import opencvFlag,GPU,IMGSIZE,ocrFlag
if not GPU:
import os
os.environ["CUDA_VISIBLE_DEVICES"]=''##不启用GPU
if ocrFlag=='torch':
from crnn.crnn_torch import crnnOcr as crnnOcr ##torch版本ocr
elif ocrFlag=='keras':
from crnn.crnn_keras import crnnOcr as crnnOcr ##keras版本OCR
import time
import cv2
import numpy as np
from PIL import Image
from glob import glob
from text.detector.detectors import TextDetector
from apphelper.image import get_boxes,letterbox_image
from text.opencv_dnn_detect import angle_detect##文字方向检测,支持dnn/tensorflow
from apphelper.image import estimate_skew_angle ,rotate_cut_img,xy_rotate_box,sort_box,box_rotate,solve
if opencvFlag=='opencv':
from text import opencv_dnn_detect as detect ##opencv dnn model for darknet
elif opencvFlag=='darknet':
from text import darknet_detect as detect
else:
## keras版本文字检测
from text import keras_detect as detect
print("Text detect engine:{}".format(opencvFlag))
def text_detect(img,
MAX_HORIZONTAL_GAP=30,
MIN_V_OVERLAPS=0.6,
MIN_SIZE_SIM=0.6,
TEXT_PROPOSALS_MIN_SCORE=0.7,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.3,
):
boxes, scores = detect.text_detect(np.array(img))
boxes = np.array(boxes,dtype=np.float32)
scores = np.array(scores,dtype=np.float32)
textdetector = TextDetector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)
shape = img.shape[:2]
boxes = textdetector.detect(boxes,
scores[:, np.newaxis],
shape,
TEXT_PROPOSALS_MIN_SCORE,
TEXT_PROPOSALS_NMS_THRESH,
TEXT_LINE_NMS_THRESH,
)
text_recs = get_boxes(boxes)
newBox = []
rx = 1
ry = 1
for box in text_recs:
x1,y1 = (box[0],box[1])
x2,y2 = (box[2],box[3])
x3,y3 = (box[6],box[7])
x4,y4 = (box[4],box[5])
newBox.append([x1*rx,y1*ry,x2*rx,y2*ry,x3*rx,y3*ry,x4*rx,y4*ry])
return newBox
def crnnRec(im,boxes,leftAdjust=False,rightAdjust=False,alph=0.2,f=1.0):
"""
crnn模型,ocr识别
leftAdjust,rightAdjust 是否左右调整box 边界误差,解决文字漏检
"""
results = []
im = Image.fromarray(im)
for index,box in enumerate(boxes):
degree,w,h,cx,cy = solve(box)
partImg,newW,newH = rotate_cut_img(im,degree,box,w,h,leftAdjust,rightAdjust,alph)
text = crnnOcr(partImg.convert('L'))
if text.strip()!=u'':
results.append({'cx':cx*f,'cy':cy*f,'text':text,'w':newW*f,'h':newH*f,'degree':degree*180.0/np.pi})
return results
def eval_angle(im,detectAngle=False):
"""
估计图片偏移角度
@@param:im
@@param:detectAngle 是否检测文字朝向
"""
angle = 0
img = np.array(im)
if detectAngle:
angle = angle_detect(img=np.copy(img))##文字朝向检测
if angle==90:
im = Image.fromarray(im).transpose(Image.ROTATE_90)
elif angle==180:
im = Image.fromarray(im).transpose(Image.ROTATE_180)
elif angle==270:
im = Image.fromarray(im).transpose(Image.ROTATE_270)
img = np.array(im)
return angle,img
def model(img,detectAngle=False,config={},leftAdjust=False,rightAdjust=False,alph=0.2):
"""
@@param:img,
@@param:ifadjustDegree 调整文字识别倾斜角度
@@param:detectAngle,是否检测文字朝向
"""
angle,img = eval_angle(img,detectAngle=detectAngle)##文字方向检测
if opencvFlag!='keras':
img,f =letterbox_image(Image.fromarray(img), IMGSIZE)## pad
img = np.array(img)
else:
f=1.0##解决box在原图坐标不一致问题
config['img'] = img
text_recs = text_detect(**config)##文字检测
newBox = sort_box(text_recs)##行文本识别
result = crnnRec(np.array(img),newBox,leftAdjust,rightAdjust,alph,1.0/f)
return img,result,angle
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 02:02:29 2019
job
@author: chineseocr
"""
from helper.redisbase import redisDataBase
from config import *
from crnn.keys import alphabetChinese,alphabetEnglish
if ocrFlag=='keras':
if GPU:
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.1## GPU最大占用量
config.gpu_options.allow_growth = True##GPU是否可动态增加
K.set_session(tf.Session(config=config))
K.get_session().run(tf.global_variables_initializer())
else:
##CPU启动
os.environ["CUDA_VISIBLE_DEVICES"] = ''
if ocrFlag=='keras':
from crnn.network_keras import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelKerasLstm
else:
ocrModel = ocrModelKerasDense
else:
ocrModel = ocrModelKerasEng
alphabet = alphabetEnglish
LSTMFLAG = True
else:
from crnn.network_torch import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelTorchLstm
else:
ocrModel = ocrModelTorchDense
else:
ocrModel = ocrModelTorchEng
alphabet = alphabetEnglish
LSTMFLAG = True
nclass = len(alphabet)+1
ocr = CRNN( 32, 1, nclass, 256, leakyRelu=False,lstmFlag=LSTMFLAG,GPU=GPU,alphabet=alphabet)
if os.path.exists(ocrModel):
ocr.load_weights(ocrModel)
else:
print("download model or tranform model with tools!")
if __name__=='__main__':
redisJob = redisDataBase()
while True:
redisJob.get_job(ocr.predict)
##CPU 环境配置,支持linux\macOs ##CPU 环境配置,支持linux\macOs
conda create -n chineseocr python=3.6 pip scipy numpy jupyter ipython ##运用conda 创建python环境 conda create -n chineseocr python=3.6 pip scipy numpy jupyter ipython ##运用conda 创建python环境
source activate chineseocr source activate chineseocr
git submodule init && git submodule update
cd darknet/ && make && cd .. cd darknet/ && make && cd ..
pip install easydict opencv-contrib-python==4.0.0.21 Cython h5py lmdb mahotas pandas requests bs4 matplotlib lxml -i https://pypi.tuna.tsinghua.edu.cn/simple/ pip install easydict opencv-contrib-python==4.0.0.21 Cython h5py lmdb mahotas pandas requests bs4 matplotlib lxml -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install -U pillow -i https://pypi.tuna.tsinghua.edu.cn/simple/ pip install -U pillow -i https://pypi.tuna.tsinghua.edu.cn/simple/
...@@ -11,6 +10,4 @@ pip install keras==2.1.5 tensorflow==1.8 ...@@ -11,6 +10,4 @@ pip install keras==2.1.5 tensorflow==1.8
conda install pytorch torchvision -c pytorch conda install pytorch torchvision -c pytorch
## linux ## linux
## conda install pytorch-cpu torchvision-cpu -c pytorch ## conda install pytorch-cpu torchvision-cpu -c pytorch
## python版本nms无须执行下一步
pushd text/detector/utils && sh make-for-cpu.sh && popd
...@@ -8,7 +8,3 @@ pip install keras==2.1.5 tensorflow==1.8 tensorflow-gpu==1.8 ...@@ -8,7 +8,3 @@ pip install keras==2.1.5 tensorflow==1.8 tensorflow-gpu==1.8
pip install web.py==0.40.dev0 pip install web.py==0.40.dev0
conda install pytorch torchvision -c pytorch conda install pytorch torchvision -c pytorch
## pip install torch torchvision ## pip install torch torchvision
## python版本nms无须执行下一步
pushd text/detector/utils && sh make.sh && popd
因为 它太大了无法显示 source diff 。你可以改为 查看blob
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os import os
import sys, os import sys
pwd = os.getcwd() pwd = os.getcwd()
import numpy as np
from PIL import Image
from config import yoloCfg,yoloWeights,yoloData,darknetRoot,GPU,GPUID from config import yoloCfg,yoloWeights,yoloData,darknetRoot,GPU,GPUID
os.chdir(darknetRoot) os.chdir(darknetRoot)
...@@ -38,7 +38,6 @@ def detect_np(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45): ...@@ -38,7 +38,6 @@ def detect_np(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
dn.free_detections(dets, num) dn.free_detections(dets, num)
return res return res
import cv2
def to_box(r): def to_box(r):
boxes = [] boxes = []
scores = [] scores = []
...@@ -46,7 +45,6 @@ def to_box(r): ...@@ -46,7 +45,6 @@ def to_box(r):
if rc[0]==b'text': if rc[0]==b'text':
cx,cy,w,h = rc[-1] cx,cy,w,h = rc[-1]
scores.append(rc[1]) scores.append(rc[1])
prob = rc[1]
xmin,ymin,xmax,ymax = cx-w/2,cy-h/2,cx+w/2,cy+h/2 xmin,ymin,xmax,ymax = cx-w/2,cy-h/2,cx+w/2,cy+h/2
boxes.append([int(xmin),int(ymin),int(xmax),int(ymax)]) boxes.append([int(xmin),int(ymin),int(xmax),int(ymax)])
return boxes,scores return boxes,scores
...@@ -58,11 +56,12 @@ if GPU: ...@@ -58,11 +56,12 @@ if GPU:
dn.set_gpu(GPUID) dn.set_gpu(GPUID)
except: except:
pass pass
net = dn.load_net(yoloCfg.encode('utf-8'), yoloWeights.encode('utf-8'), 0) net = dn.load_net(yoloCfg.encode('utf-8'), yoloWeights.encode('utf-8'), 0)
meta = dn.load_meta(yoloData.encode('utf-8')) meta = dn.load_meta(yoloData.encode('utf-8'))
os.chdir(pwd) os.chdir(pwd)
def text_detect(img): def text_detect(img,scale,maxScale,prob = 0.05):
r = detect_np(net, meta, img,thresh=0, hier_thresh=0.5, nms=None)##输出所有box,与opencv dnn统一 r = detect_np(net, meta, img,thresh=prob, hier_thresh=0.5, nms=None)##输出所有box,与opencv dnn统一
bboxes = to_box(r) bboxes = to_box(r)
return bboxes return bboxes
#coding:utf-8 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np import numpy as np
from config import GPUID,GPU,nmsFlag from text.detector.nms import nms,rotate_nms
from text.detector.utils.python_nms import nms as python_nms ##python版本nms from apphelper.image import get_boxes
from text.detector.text_proposal_connector import TextProposalConnector from text.detector.text_proposal_connector import TextProposalConnector
##优先加载编译对GPU编译的gpu_nms
if nmsFlag=='gpu' and GPU and GPUID is not None:
try:
from text.detector.utils.gpu_nms import gpu_nms
except:
gpu_nms = None
cython_nms = None
elif nmsFlag=='python':
gpu_nms ==None
cython_nms = None
elif nmsFlag=='cython':
try:
from text.detector.utils.cython_nms import nms as cython_nms
except:
cython_nms = None
gpu_nms =None
else:
gpu_nms =None
cython_nms = None
print("Nms engine gpu_nms:",gpu_nms,",cython_nms:",cython_nms,",python_nms:",python_nms)
def nms(dets, thresh):
if dets.shape[0] == 0:
return []
if gpu_nms is not None and GPUID is not None:
return gpu_nms(dets, thresh, device_id=GPUID)
elif cython_nms is not None:
return cython_nms(dets, thresh)
else:
return python_nms(dets, thresh, method='Union')
def normalize(data): def normalize(data):
if data.shape[0]==0: if data.shape[0]==0:
return data return data
...@@ -52,10 +13,6 @@ def normalize(data): ...@@ -52,10 +13,6 @@ def normalize(data):
min_=data.min() min_=data.min()
return (data-min_)/(max_-min_) if max_-min_!=0 else data-min_ return (data-min_)/(max_-min_) if max_-min_!=0 else data-min_
class TextDetector: class TextDetector:
""" """
Detect text from an image Detect text from an image
...@@ -65,43 +22,13 @@ class TextDetector: ...@@ -65,43 +22,13 @@ class TextDetector:
pass pass
""" """
self.text_proposal_connector=TextProposalConnector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM) self.text_proposal_connector=TextProposalConnector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)
def detect_region(self, text_proposals,scores,size,
TEXT_PROPOSALS_MIN_SCORE=0.7,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.3,):
"""
Detecting texts from an image
:return: the bounding boxes of the detected texts
@@param:TEXT_PROPOSALS_MIN_SCORE:TEXT_PROPOSALS_MIN_SCORE=0.7##过滤字符box阀值
@@param:TEXT_PROPOSALS_NMS_THRESH:TEXT_PROPOSALS_NMS_THRESH=0.3##nms过滤重复字符box
@@param:TEXT_LINE_NMS_THRESH:TEXT_LINE_NMS_THRESH=0.3##nms过滤行文本重复过滤阀值
@@param:MIN_RATIO:MIN_RATIO=1.0#0.01 ##widths/heights宽度与高度比例
@@param:LINE_MIN_SCORE:##行文本置信度
@@param:TEXT_PROPOSALS_WIDTH##每个字符的默认最小宽度
@@param:MIN_NUM_PROPOSALS,MIN_NUM_PROPOSALS=1##最小字符数
"""
#text_proposals, scores=self.text_proposal_detector.detect(im, cfg.MEAN)
keep_inds=np.where(scores>TEXT_PROPOSALS_MIN_SCORE)[0]###
text_proposals, scores=text_proposals[keep_inds], scores[keep_inds]
sorted_indices=np.argsort(scores.ravel())[::-1]
text_proposals, scores=text_proposals[sorted_indices], scores[sorted_indices]
# nms for text proposals
keep_inds=nms(np.hstack((text_proposals, scores)), TEXT_PROPOSALS_NMS_THRESH,GPU_ID=self.GPU_ID)##nms 过滤重复的box
text_proposals, scores=text_proposals[keep_inds], scores[keep_inds]
groups_boxes,groups_scores = self.text_proposal_connector.get_text_region(text_proposals, scores, size)
return groups_boxes,groups_scores
def detect(self, text_proposals,scores,size, def detect(self, text_proposals,scores,size,
TEXT_PROPOSALS_MIN_SCORE=0.7, TEXT_PROPOSALS_MIN_SCORE=0.7,
TEXT_PROPOSALS_NMS_THRESH=0.3, TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.3, TEXT_LINE_NMS_THRESH = 0.3,
LINE_MIN_SCORE=0.8
): ):
""" """
Detecting texts from an image Detecting texts from an image
...@@ -109,31 +36,24 @@ class TextDetector: ...@@ -109,31 +36,24 @@ class TextDetector:
@@param:TEXT_PROPOSALS_MIN_SCORE:TEXT_PROPOSALS_MIN_SCORE=0.7##过滤字符box阀值 @@param:TEXT_PROPOSALS_MIN_SCORE:TEXT_PROPOSALS_MIN_SCORE=0.7##过滤字符box阀值
@@param:TEXT_PROPOSALS_NMS_THRESH:TEXT_PROPOSALS_NMS_THRESH=0.3##nms过滤重复字符box @@param:TEXT_PROPOSALS_NMS_THRESH:TEXT_PROPOSALS_NMS_THRESH=0.3##nms过滤重复字符box
@@param:TEXT_LINE_NMS_THRESH:TEXT_LINE_NMS_THRESH=0.3##nms过滤行文本重复过滤阀值 @@param:TEXT_LINE_NMS_THRESH:TEXT_LINE_NMS_THRESH=0.3##nms过滤行文本重复过滤阀值
@@param:MIN_RATIO:MIN_RATIO=1.0#0.01 ##widths/heights宽度与高度比例
@@param:LINE_MIN_SCORE:##行文本置信度 @@param:LINE_MIN_SCORE:##行文本置信度
@@param:TEXT_PROPOSALS_WIDTH##每个字符的默认最小宽度
@@param:MIN_NUM_PROPOSALS,MIN_NUM_PROPOSALS=1##最小字符数
""" """
#text_proposals, scores=self.text_proposal_detector.detect(im, cfg.MEAN) #text_proposals, scores=self.text_proposal_detector.detect(im, cfg.MEAN)
keep_inds=np.where(scores>TEXT_PROPOSALS_MIN_SCORE)[0]### keep_inds=np.where(scores>TEXT_PROPOSALS_MIN_SCORE)[0]###
text_proposals, scores=text_proposals[keep_inds], scores[keep_inds] text_proposals, scores=text_proposals[keep_inds], scores[keep_inds]
sorted_indices=np.argsort(scores.ravel())[::-1] sorted_indices=np.argsort(scores.ravel())[::-1]
text_proposals, scores=text_proposals[sorted_indices], scores[sorted_indices] text_proposals, scores=text_proposals[sorted_indices], scores[sorted_indices]
# nms for text proposals # nms for text proposals
if len(text_proposals)>0: if len(text_proposals)>0:
keep_inds=nms(np.hstack((text_proposals, scores)), TEXT_PROPOSALS_NMS_THRESH)##nms 过滤重复的box text_proposals, scores = nms(text_proposals,scores,TEXT_PROPOSALS_MIN_SCORE,TEXT_PROPOSALS_NMS_THRESH)
text_proposals, scores=text_proposals[keep_inds], scores[keep_inds] scores=normalize(scores)
text_lines,scores = self.text_proposal_connector.get_text_lines(text_proposals, scores, size)##合并文本行
scores=normalize(scores) text_lines = get_boxes(text_lines)
text_lines, scores = rotate_nms(text_lines,scores,LINE_MIN_SCORE,TEXT_LINE_NMS_THRESH)
text_lines = self.text_proposal_connector.get_text_lines(text_proposals, scores, size)##合并文本行 return text_lines,scores
keep_inds = nms(text_lines, TEXT_LINE_NMS_THRESH)##nms
text_lines = text_lines[keep_inds]
return text_lines
else: else:
return [] return []
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 00:28:01 2019
replace gpu/python/cython nms with cv2.dnn.NMS
@author: chineseocr
"""
import cv2
from apphelper.image import solve
def nms(boxes, scores, score_threshold=0.5, nms_threshold=0.3):
def box_to_center(box):
xmin,ymin,xmax,ymax = [round(float(x),4) for x in box]
w = xmax-xmin
h = ymax-ymin
return [round(xmin,4),round(ymin,4),round(w,4),round(h,4)]
newBoxes = [ box_to_center(box) for box in boxes]
newscores = [ round(float(x),6) for x in scores]
index = cv2.dnn.NMSBoxes(newBoxes, newscores, score_threshold=score_threshold, nms_threshold=nms_threshold)
index = index.reshape((-1,))
return boxes[index],scores[index]
def rotate_nms(boxes, scores, score_threshold=0.5, nms_threshold=0.3):
"""
boxes.append((center, (w,h), angle * 180.0 / math.pi))
box:x1,y1,x2,y2,x3,y3,x4,y4
"""
def rotate_box(box):
angle,w,h,cx,cy = solve(box)
angle = round(angle,4)
w = round(w,4)
h = round(h,4)
cx = round(cx,4)
cy = round(cy,4)
return ((cx,cy),(w,h),angle)
newboxes = [rotate_box(box) for box in boxes]
newscores = [ round(float(x),6) for x in scores]
index = cv2.dnn.NMSBoxesRotated(newboxes, newscores, score_threshold=score_threshold, nms_threshold=nms_threshold)
if len(index)>0:
index = index.reshape((-1,))
return boxes[index],scores[index]
else:
return [],[]
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np import numpy as np
from text.detector.text_proposal_graph_builder import TextProposalGraphBuilder from text.detector.text_proposal_graph_builder import TextProposalGraphBuilder
...@@ -29,7 +31,7 @@ class TextProposalConnector: ...@@ -29,7 +31,7 @@ class TextProposalConnector:
tp_groups=self.group_text_proposals(text_proposals, scores, im_size)##find the text line tp_groups=self.group_text_proposals(text_proposals, scores, im_size)##find the text line
text_lines=np.zeros((len(tp_groups), 8), np.float32) text_lines=np.zeros((len(tp_groups), 8), np.float32)
newscores =np.zeros((len(tp_groups), ), np.float32)
for index, tp_indices in enumerate(tp_groups): for index, tp_indices in enumerate(tp_groups):
text_line_boxes=text_proposals[list(tp_indices)] text_line_boxes=text_proposals[list(tp_indices)]
#num = np.size(text_line_boxes)##find #num = np.size(text_line_boxes)##find
...@@ -61,7 +63,8 @@ class TextProposalConnector: ...@@ -61,7 +63,8 @@ class TextProposalConnector:
text_lines[index, 6]=z1[1] text_lines[index, 6]=z1[1]
height = np.mean( (text_line_boxes[:,3]-text_line_boxes[:,1]) ) height = np.mean( (text_line_boxes[:,3]-text_line_boxes[:,1]) )
text_lines[index, 7]= height + 2.5 text_lines[index, 7]= height + 2.5
#text_lines=clip_boxes(text_lines, im_size) newscores[index] = score
return text_lines return text_lines,newscores
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np import numpy as np
class Graph: class Graph:
def __init__(self, graph): def __init__(self, graph):
......
此差异已折叠。
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
cimport numpy as np
cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
return a if a >= b else b
cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
return a if a <= b else b
def nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
cdef np.ndarray[np.int64_t, ndim=1] order = scores.argsort()[::-1]
cdef int ndets = dets.shape[0]
cdef np.ndarray[np.int_t, ndim=1] suppressed = \
np.zeros((ndets), dtype=np.int)
# nominal indices
cdef int _i, _j
# sorted indices
cdef np.int64_t i, j
# temp variables for box i's (the box currently under consideration)
cdef np.float32_t ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
cdef np.float32_t xx1, yy1, xx2, yy2
cdef np.float32_t w, h
cdef np.float32_t inter, ovr
keep = []
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
keep.append(i)
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
return keep
def nms_new(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
cdef int ndets = dets.shape[0]
cdef np.ndarray[np.int_t, ndim=1] suppressed = \
np.zeros((ndets), dtype=np.int)
# nominal indices
cdef int _i, _j
# sorted indices
cdef int i, j
# temp variables for box i's (the box currently under consideration)
cdef np.float32_t ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
cdef np.float32_t xx1, yy1, xx2, yy2
cdef np.float32_t w, h
cdef np.float32_t inter, ovr
keep = []
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
keep.append(i)
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
ovr1 = inter / iarea
ovr2 = inter / areas[j]
if ovr >= thresh or ovr1 > 0.95 or ovr2 > 0.95:
suppressed[j] = 1
return keep
此差异已折叠。
此差异已折叠。
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id);
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
cimport numpy as np
assert sizeof(int) == sizeof(np.int32_t)
cdef extern from "gpu_nms.hpp":
void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh,
np.int32_t device_id=0):
cdef int boxes_num = dets.shape[0]
cdef int boxes_dim = dets.shape[1]
cdef int num_out
cdef np.ndarray[np.int32_t, ndim=1] \
keep = np.zeros(boxes_num, dtype=np.int32)
cdef np.ndarray[np.float32_t, ndim=1] \
scores = dets[:, 4]
cdef np.ndarray[np.int_t, ndim=1] \
order = scores.argsort()[::-1]
cdef np.ndarray[np.float32_t, ndim=2] \
sorted_dets = dets[order, :]
_nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
keep = keep[:num_out]
return list(order[keep])
@echo off
cython cython_nms.pyx
python setup_cpu_win.py build_ext --inplace
MOVE /Y utils\*.* .\
RMDIR /S /Q build
RMDIR /S /Q utils
cython cython_nms.pyx
python setup_cpu.py build_ext --inplace
mv utils/* ./
rm -rf build
rm -rf utils
cython cython_nms.pyx
cython gpu_nms.pyx
python setup.py build_ext --inplace
mv utils/* ./
rm -rf build
rm -rf utils
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(&current_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
import numpy as np
def nms(boxes, threshold, method='Union'):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
#for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_nms",
["cython_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('utils.gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
]
setup(
ext_modules=ext_modules,
cmdclass={'build_ext': custom_build_ext},
)
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
#for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
#CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
"""
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
"""
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_nms",
["cython_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
]
setup(
ext_modules=ext_modules,
cmdclass={'build_ext': custom_build_ext},
)
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
ext_modules = cythonize([
Extension(
"utils.cython_nms",
sources=["cython_nms.pyx"],
language="c",
include_dirs = [numpy_include],
library_dirs=[],
libraries=[],
extra_compile_args=[],
extra_link_args=[]
# extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
])
setup(
ext_modules = ext_modules
# ,
# cmdclass = {'build_ext': build_ext},
)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
YOLO_v3 Model Defined in Keras. YOLO_v3 Model Defined in Keras.
Reference: https://github.com/qqwweee/keras-yolo3.git Reference: https://github.com/qqwweee/keras-yolo3.git
""" """
from config import kerasTextModel,IMGSIZE,keras_anchors,class_names,GPU,GPUID from config import kerasTextModel,keras_anchors,class_names
from text.keras_yolo3 import yolo_text,box_layer,K from text.keras_yolo3 import yolo_text,box_layer,K
from apphelper.image import resize_im
from apphelper.image import resize_im,letterbox_image
from PIL import Image from PIL import Image
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
graph = tf.get_default_graph()##解决web.py 相关报错问题
graph = tf.get_default_graph()##解决web.py 相关报错问题
anchors = [float(x) for x in keras_anchors.split(',')] anchors = [float(x) for x in keras_anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2) anchors = np.array(anchors).reshape(-1, 2)
num_anchors = len(anchors) num_anchors = len(anchors)
num_classes = len(class_names) num_classes = len(class_names)
textModel = yolo_text(num_classes,anchors) textModel = yolo_text(num_classes,anchors)
textModel.load_weights(kerasTextModel) textModel.load_weights(kerasTextModel)
sess = K.get_session() sess = K.get_session()
image_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w image_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w
input_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w input_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w
...@@ -27,9 +25,8 @@ box_score = box_layer([*textModel.output,image_shape,input_shape],anchors, num_c ...@@ -27,9 +25,8 @@ box_score = box_layer([*textModel.output,image_shape,input_shape],anchors, num_c
def text_detect(img,prob = 0.05): def text_detect(img,scale,maxScale,prob = 0.05):
im = Image.fromarray(img) im = Image.fromarray(img)
scale = IMGSIZE[0]
w,h = im.size w,h = im.size
w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000 w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000
#boxed_image,f = letterbox_image(im, (w_,h_)) #boxed_image,f = letterbox_image(im, (w_,h_))
...@@ -37,10 +34,6 @@ def text_detect(img,prob = 0.05): ...@@ -37,10 +34,6 @@ def text_detect(img,prob = 0.05):
image_data = np.array(boxed_image, dtype='float32') image_data = np.array(boxed_image, dtype='float32')
image_data /= 255. image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension. image_data = np.expand_dims(image_data, 0) # Add batch dimension.
imgShape = np.array([[h,w]])
inputShape = np.array([[h_,w_]])
global graph global graph
with graph.as_default(): with graph.as_default():
##定义 graph变量 解决web.py 相关报错问题 ##定义 graph变量 解决web.py 相关报错问题
......
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
YOLO_v3 Model Defined in Keras. YOLO_v3 Model Defined in Keras.
Reference: https://github.com/qqwweee/keras-yolo3.git Reference: https://github.com/qqwweee/keras-yolo3.git
...@@ -242,88 +244,6 @@ def box_iou(b1, b2): ...@@ -242,88 +244,6 @@ def box_iou(b1, b2):
return iou return iou
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
'''Return yolo_loss tensor
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
'''
num_layers = len(anchors)//3 # default setting
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = [0,0,0,0]
#loss = K.zeros((4,))
m = K.shape(yolo_outputs[0])[0] # batch size, tensor
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]],
num_classes,
input_shape,
calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
return b+1, ignore_mask
#_, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
_, ignore_mask = tf.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
(1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
xy_loss = K.sum(xy_loss) / mf
wh_loss = K.sum(wh_loss) / mf
confidence_loss = K.sum(confidence_loss) / mf
class_loss = K.sum(class_loss) / mf
loss[0]+=class_loss
loss[1]+=xy_loss
loss[2]+=wh_loss
loss[3]+=confidence_loss
#loss = [loss[0]/num_layers,loss[1]/num_layers,loss[2]/num_layers,loss[3]/num_layers]
loss = K.stack(loss)
return loss
def box_layer(inputs,anchors,num_classes): def box_layer(inputs,anchors,num_classes):
y1,y2,y3,image_shape,input_shape = inputs y1,y2,y3,image_shape,input_shape = inputs
...@@ -391,31 +311,8 @@ def yolo_text(num_classes,anchors,train=False): ...@@ -391,31 +311,8 @@ def yolo_text(num_classes,anchors,train=False):
x = Concatenate()([x,darknet.layers[92].output]) x = Concatenate()([x,darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5)) x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
out = [y1,y2,y3] out = [y1,y2,y3]
if train: textModel = Model([imgInput],out)
num_anchors = len(anchors) return textModel
y_true = [Input(shape=(None, None,num_anchors//3, num_classes+5)) for l in range(3)]
loss = Lambda(yolo_loss,output_shape=(4,),name='loss',arguments={'anchors': anchors,
'num_classes': num_classes,
'ignore_thresh': 0.5,})(out+y_true)
def get_loss(loss,index):
return loss[index]
lossName = ['class_loss','xy_loss','wh_loss','confidence_loss']
lossList = [Lambda(get_loss,output_shape=(1,),name=lossName[i],arguments={'index':i})(loss) for i in range(4)]
textModel = Model([imgInput, *y_true], lossList)
return textModel
else:
textModel = Model([imgInput],out)
return textModel
## keras 取代 tf sess.run
#image_shape = Input(shape=(2,))
#input_shape = Input(shape=(2,))
#box_scores = Lambda(box_layer,arguments={'anchors':anchors,'num_classes':num_classes})([*out,image_shape,input_shape])
#textModel = Model([imgInput,image_shape,input_shape],box_scores)
#return textModel
\ No newline at end of file
from config import yoloCfg,yoloWeights,opencvFlag #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from config import yoloCfg,yoloWeights,AngleModelFlag
from config import AngleModelPb,AngleModelPbtxt from config import AngleModelPb,AngleModelPbtxt
from config import IMGSIZE
from PIL import Image
import numpy as np import numpy as np
import cv2 import cv2
from apphelper.image import letterbox_image
if opencvFlag=='keras': if AngleModelFlag=='tf':
##转换为tf模型,以便GPU调用 ##转换为tf模型,以便GPU调用
import tensorflow as tf import tensorflow as tf
from tensorflow.python.platform import gfile from tensorflow.python.platform import gfile
...@@ -19,16 +20,17 @@ if opencvFlag=='keras': ...@@ -19,16 +20,17 @@ if opencvFlag=='keras':
inputImg = sess.graph.get_tensor_by_name('input_1:0') inputImg = sess.graph.get_tensor_by_name('input_1:0')
predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0') predictions = sess.graph.get_tensor_by_name('predictions/Softmax:0')
keep_prob = tf.placeholder(tf.float32) keep_prob = tf.placeholder(tf.float32)
else: else:
angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测 angleNet = cv2.dnn.readNetFromTensorflow(AngleModelPb,AngleModelPbtxt)##dnn 文字方向检测
textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位 textNet = cv2.dnn.readNetFromDarknet(yoloCfg,yoloWeights)##文字定位
def text_detect(img): def text_detect(img,scale,maxScale,prob = 0.05):
thresh = 0 thresh = prob
img_height,img_width = img.shape[:2] img_height,img_width = img.shape[:2]
inputBlob = cv2.dnn.blobFromImage(img, scalefactor=1.0, size=IMGSIZE,swapRB=True ,crop=False); inputBlob,f = letterbox_image(img,(scale,scale))
inputBlob = cv2.dnn.blobFromImage(inputBlob, scalefactor=1.0, size=(scale,scale),swapRB=True ,crop=False);
textNet.setInput(inputBlob/255.0) textNet.setInput(inputBlob/255.0)
outputName = textNet.getUnconnectedOutLayersNames() outputName = textNet.getUnconnectedOutLayersNames()
outputs = textNet.forward(outputName) outputs = textNet.forward(outputName)
...@@ -41,10 +43,10 @@ def text_detect(img): ...@@ -41,10 +43,10 @@ def text_detect(img):
class_id = np.argmax(scores) class_id = np.argmax(scores)
confidence = scores[class_id] confidence = scores[class_id]
if confidence > thresh: if confidence > thresh:
center_x = int(detection[0] * img_width) center_x = int(detection[0] * scale/f)
center_y = int(detection[1] * img_height) center_y = int(detection[1] * scale/f)
width = int(detection[2] * img_width) width = int(detection[2] * scale/f)
height = int(detection[3] * img_height) height = int(detection[3] * scale/f)
left = int(center_x - width / 2) left = int(center_x - width / 2)
top = int(center_y - height / 2) top = int(center_y - height / 2)
if class_id==1: if class_id==1:
...@@ -52,7 +54,10 @@ def text_detect(img): ...@@ -52,7 +54,10 @@ def text_detect(img):
confidences.append(float(confidence)) confidences.append(float(confidence))
boxes.append([left, top,left+width, top+height ]) boxes.append([left, top,left+width, top+height ])
return np.array(boxes),np.array(confidences) boxes = np.array(boxes)
confidences = np.array(confidences)
return boxes,confidences
...@@ -111,7 +116,7 @@ def angle_detect(img,adjust=True): ...@@ -111,7 +116,7 @@ def angle_detect(img,adjust=True):
""" """
文字方向检测 文字方向检测
""" """
if opencvFlag=='keras': if AngleModelFlag=='tf':
return angle_detect_tf(img,adjust=adjust) return angle_detect_tf(img,adjust=adjust)
else: else:
return angle_detect_dnn(img,adjust=adjust) return angle_detect_dnn(img,adjust=adjust)
\ No newline at end of file
## 数据来源于 icdr2019 数据下载地址 http://rrc.cvc.uab.es/?ch=14
import numpy as np
import cv2
def polylines(img,points):
im = np.zeros(img.shape[:2], dtype = "uint8")
for point in points:
b = np.array([point],dtype = np.int32)
cv2.fillPoly(im, b, 255)
return im
def check_points(points,w,h):
##检测标注是否正确
check=False
for point in points:
for x,y in point:
if x>w or y>h:
check=True
break
if check:
break
return check
def get_points(res):
points =[]
for line in res:
points.append(line['points'])
return points
def resize_im(img, scale=416, max_scale=608):
h,w = img.shape[:2]
f=float(scale)/min(h, w)
if max_scale is not None:
if f*max(h, w)>max_scale:
f=float(max_scale)/max(h, w)
newW,newH = int(w*f),int(h*f)
newW,newH = newW-(newW%32),newH-(newH%32)
fw = w/newW
fh = h/newH
tmpImg = cv2.resize(img, None, None, fx=1/fw, fy=1/fh, interpolation=cv2.INTER_LINEAR)
return tmpImg,fw,fh
def cleam_im(im):
avg = 127
im[im>avg]=255
im[im<=avg]=0
y,x = np.where(im==255)
xmin,ymin,xmax,ymax = (min(x),min(y),max(x),max(y))
return xmin,ymin,xmax,ymax
def adjust_height(h):
"""
调整box高
"""
heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]
N = len(heights)
for i in range(N-1):
if h<=heights[i]+heights[i]*0.44/2:
return heights[i]
return h
def img_split_to_box(im,splitW = 15,adjust=True):
"""
均等分割box
"""
tmpIm = im==255
h,w = tmpIm.shape[:2]
num = w//splitW+1
box = []
for i in range(num-1):
xmin,ymin,xmax,ymax = splitW*i,0,splitW*(i+1),h
##迭代寻找最优ymin,ymax
childIm = tmpIm[ymin:ymax,xmin:xmax]
checkYmin = False
checkYmax = False
for j in range(ymax):
if not checkYmin:
if childIm[j].max():
ymin = j
checkYmin = True
if not checkYmax:
if childIm[ymax-j-1].max():
ymax = ymax-j
checkYmax = True
if adjust:
childH = ymax-ymin+1
cy = (ymax+ymin)/2
childH = adjust_height(childH)
ymin = cy-childH/2
ymax = cy+childH/2
box.append([xmin,ymin,xmax,ymax])
return box
def resize_img_box(p,scale=416, max_scale=608,splitW=15,adjust=True):
path = root.format(p)
img = cv2.imread(path)
if img is None:
return None,[]
points = get_points(train_labels[f'{p}'])
h,w = img.shape[:2]
check = check_points(points,w,h)
if check:
return None,[]
img,fw,fh = resize_im(img, scale=scale, max_scale=max_scale)
boxes = []
for point in points:
point = [[bx[0]/fw,bx[1]/fh] for bx in point]
im = polylines(img,[point])
if im.max()==0:
continue
xmin,ymin,xmax,ymax = cleam_im(im)
tmp = im[ymin:ymax,xmin:xmax]
box = img_split_to_box(tmp,splitW = splitW,adjust=adjust)
childBoxes = []
for bx in box:
xmin_,ymin_,xmax_,ymax_ = bx
xmin_,ymin_,xmax_,ymax_ = xmin+xmin_,ymin_+ymin,xmax_+xmin,ymax_+ymin
boxes.append([xmin_,ymin_,xmax_,ymax_])
return img,boxes
def convert(size, box):
"""
box = xmin,ymin,xmax,ymax
转化为darknet训练所需格式 cx,cy,w,h
"""
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[2])/2.0 - 1
y = (box[1] + box[3])/2.0 - 1
w = box[2] - box[0]
h = box[3] - box[1]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return [str(x),str(y),str(w),str(h)]
def convert_annotation(p,scale=608, max_scale=1024,splitW=8,adjust=False):
img,boxes = resize_img_box(p,scale=scale, max_scale=scale,splitW=splitW,adjust=adjust)
if img is None or len(boxes)==0:
return None,''
h,w = img.shape[:2]
newBoxes = []
for bx in boxes:
cls_id = 1
bb = convert((w,h), bx)
newBoxes.append(' '.join([str(cls_id)]+bb))
return img,'\n'.join(newBoxes)
def write_for_darknet(img,newBoxes,filename):
imgP = os.path.join(JPEGP,filename+'.jpg')
txtP = os.path.join(labelP,filename+'.txt')
cv2.imwrite(imgP,img)
with open(txtP,'w') as f:
f.write(newBoxes)
if __name__=='__main__':
import os
import json
dataRoot = '/tmp/ICDR2019/'##icdr 数据所在目录
darknetRoot = '/tmp/darknet'##darknet目录
wP = '/tmp/darknet53.conv.74'##darknet 预训练模型权重
root = dataRoot+'train_images/{}.jpg'
with open(dataRoot+'train_labels.json') as f:
train_labels = json.loads(f.read())
##创建voc目录
labelP = os.path.join(darknetRoot,'VOCdevkit','VOC2007','labels')
JPEGP = os.path.join(darknetRoot,'VOCdevkit','VOC2007','JPEGImages')
if not os.path.exists(labelP):
os.makedirs(labelP)
if not os.path.exists(JPEGP):
os.makedirs(JPEGP)
for p in train_labels.keys():
img,newBoxes = convert_annotation(p,scale=608, max_scale=1024,splitW=8,adjust=True)
if img is None or len(newBoxes)==0:
continue
write_for_darknet(img,newBoxes,p)
##训练集,测试集划分
from sklearn.model_selection import train_test_split
from glob import glob
jpgPaths = glob(os.path.join(JPEGP,'*.jpg'))
train,test = train_test_split(jpgPaths,test_size=0.1)
trainP = os.path.join(darknetRoot,'VOCdevkit','VOC2007','train.txt')
testP = os.path.join(darknetRoot,'VOCdevkit','VOC2007','test.txt')
with open(trainP,'w') as f:
f.write('\n'.join(train))
with open(testP,'w') as f:
f.write('\n'.join(test))
## 生成 voc.name voc.data
vocDatap = os.path.join(darknetRoot,'VOCdevkit','VOC2007','voc.data')
vocNameP = os.path.join(darknetRoot,'VOCdevkit','VOC2007','voc.name')
with open(vocDatap,'w') as f:
f.write('classes= 2\n')
f.write('train = {}\n'.format(trainP))
f.write('valid = {}\n'.format(testP))
f.write('names ={}\n'.format(vocNameP))
f.write('backup = backup')
with open(vocNameP,'w') as f:
f.write('none\n')
f.write('text\n')
## 生成训练脚本
chineseocrP=os.getcwd()
textCfg = os.path.join(chineseocrP,'models/text.cfg')
with open(os.path.join(chineseocrP,'train/darknet/','train.sh'),'w') as f:
f.write('cd {}\n'.format(darknetRoot))
f.write('./darknet detector train {} {} {}'.format(vocDatap,textCfg,wP))
## 训练模型
\ No newline at end of file
## 下载ICDR2019数据集 地址 http://rrc.cvc.uab.es/?ch=14
## 百度云地址:https://pan.baidu.com/s/1fmOpTYFmZ4f2UxGDsmKVJA 密码:sh4e
## 下载 darknet预训练模型
wget https://pjreddie.com/media/files/darknet53.conv.74
## 修改 train/darknet/data-ready.py
line166 ~line 168
dataRoot = '/tmp/ICDR2019/'##icdr2019 数据所在目录
darknetRoot = '/tmp/darknet'##darknet目录
wP = '/tmp/darknet53.conv.74'##darknet 预训练模型权重
执行 python train/darknet/data-ready.py 生成darknet格式训练数据
训练模型 : sh train/darknet/train.sh
cd /tmp/darknet
./darknet detector train /tmp/darknet/VOCdevkit/VOC2007/voc.data /tmp/chineseocr/models/text.cfg /tmp/darknet53.conv.74
\ No newline at end of file
Halal
\ No newline at end of file
Flame
\ No newline at end of file
Grilled
\ No newline at end of file
Chicken
\ No newline at end of file
I.Dammam:
\ No newline at end of file
0138171856
\ No newline at end of file
I-Khobar
\ No newline at end of file
LG
\ No newline at end of file
<annotation>
<path>TB24y0WcqLN8KJjSZFGXXbjrVXa_!!2439342654.jpg.txt</path>
<folder/>
<filename>TB24y0WcqLN8KJjSZFGXXbjrVXa_!!2439342654</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>800</width>
<height>800</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>&#23478;&#22871;&#35013;20&#29255;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>128.17499999999998</cx>
<cy>762.12</cy>
<w>229.34764620405667</w>
<h>50.0861598827398</h>
<angle>-0.0016</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20912;&#40092;&#37197;&#36865;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>374.18</cx>
<cy>772.4549999999999</cy>
<w>224.0</w>
<h>49.33000000000004</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#183;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>512.175</cx>
<cy>772.4549999999999</cy>
<w>14.670000000000016</w>
<h>9.330000000000041</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22351;&#21333;&#21253;&#36180;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>650.175</cx>
<cy>770.4549999999999</cy>
<w>218.66999999999996</w>
<h>50.66999999999996</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>|&#31435;&#21363;&#36141;&#20080;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>153.51</cx>
<cy>675.4525</cy>
<w>150.66293519437562</w>
<h>28.66500000000002</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>27.9</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>160.845</cx>
<cy>585.79</cy>
<w>210.67000000000002</w>
<h>84.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#38044;&#24800;&#20215;&#65509;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>163.51</cx>
<cy>500.45000000000005</cy>
<w>130.66</w>
<h>36.00000000000006</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#27873;&#27819;&#31665;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>112.845</cx>
<cy>407.12</cy>
<w>157.32999999999998</w>
<h>49.34000000000003</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20912;&#34955;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>88.845</cx>
<cy>351.79</cy>
<w>106.67000000000002</w>
<h>48.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#36865;&#32440;&#34955;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>112.51</cx>
<cy>288.45</cy>
<w>156.66</w>
<h>52.07680962081059</h>
<angle>-0.0004</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#21488;&#28286;&#25163;&#25235;&#39292;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>173.845</cx>
<cy>48.12</cy>
<w>296.85854337228875</w>
<h>66.15955354293247</h>
<angle>-0.0265</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#39034;&#20016;&#21253;&#37038;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>393.175</cx>
<cy>151.455</cy>
<w>752.1399660050681</w>
<h>155.59268805441155</h>
<angle>-0.0714</angle>
</robndbox>
</object>
</annotation>
<annotation verified="no">
<folder>0</folder>
<filename>1</filename>
<path>/Users/lywen/Desktop/2019/git/chineseocr/train/data/text/0/1.jpg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>700</width>
<height>700</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<type>robndbox</type>
<name>&#20080;&#22909;&#28783;&#183;&#27431;&#38597;&#30331;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>123.4325</cx>
<cy>24.205</cy>
<w>245.0176</w>
<h>40.2823</h>
<angle>3.127593</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#35282;&#24230;&#21487;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>126.64</cx>
<cy>190.8675</cy>
<w>193.087</w>
<h>60.7164</h>
<angle>0.0107</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#35843;&#33410;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>122.555</cx>
<cy>255.325</cy>
<w>124.3703</w>
<h>61.5139</h>
<angle>3.072693</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#33322;&#31354;&#21697;&#36136;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>127.225</cx>
<cy>469.115</cy>
<w>211.75</w>
<h>48.0362</h>
<angle>0.005</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#24615;&#33021;&#21331;&#36234;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>128.68</cx>
<cy>528.615</cy>
<w>220.5</w>
<h>40.83</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22825;&#33457;&#28783;/&#26684;&#26629;&#28783;/&#35910;&#32966;&#28783;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>228.7275</cx>
<cy>624.575</cy>
<w>438.0951</w>
<h>72.1517</h>
<angle>3.138493</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#65509;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>502.6</cx>
<cy>600.95</cy>
<w>23.34</w>
<h>24.5</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>28.</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>567.06</cx>
<cy>582.285</cy>
<w>105.58</w>
<h>78.2789</h>
<angle>3.140893</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>0</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>649.6</cx>
<cy>601.53</cy>
<w>28.0</w>
<h>42.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20803;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>669.43</cx>
<cy>621.95</cy>
<w>35.0</w>
<h>29.16</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20445;&#36136;3&#24180;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>579.31</cx>
<cy>654.035</cy>
<w>97.42</w>
<h>30.4306</h>
<angle>3.140693</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20080;&#22909;&#28783;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>540.225</cx>
<cy>495.3675</cy>
<w>69.4794</w>
<h>20.536</h>
<angle>2.773793</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#27431;&#38597;&#30331;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>619.265</cx>
<cy>496.4225</cy>
<w>22.7741</w>
<h>75.1482</h>
<angle>2.092493</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>Ouyaboard</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>124.6</cx>
<cy>65.92</cy>
<w>217.0</w>
<h>31.5</h>
<angle>0.0</angle>
</robndbox>
</object>
</annotation>
<annotation>
<path>TB1CaTseDTI8KJjSsphXXcFppXa_!!0-item_pic.jpg.txt</path>
<folder/>
<filename>TB1CaTseDTI8KJjSsphXXcFppXa_!!0-item_pic</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>430</width>
<height>430</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>&#26234;&#33021;&#24555;&#20805;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>305.03749999999997</cx>
<cy>41.99</cy>
<w>219.31258155014365</w>
<h>54.86777855298209</h>
<angle>-0.0125</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#30005;&#21387;&#26816;&#27979;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>327.315</cx>
<cy>94.2625</cy>
<w>169.1330355429338</w>
<h>39.695871680678586</h>
<angle>-0.0002</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>80W&#21333;&#23380;3.1A&#21452;USB</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>332.27</cx>
<cy>128.17000000000002</cy>
<w>160.54000000000002</w>
<h>14.340000000000003</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#36865;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>272.78499999999997</cx>
<cy>178.6975</cy>
<w>49.71515285382611</w>
<h>49.0635314490438</h>
<angle>0.1152</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#25968;&#25454;&#32447;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>388.885</cx>
<cy>233.88</cy>
<w>61.629999999999995</w>
<h>20.78</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#25163;&#26426;&#26550;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>388.885</cx>
<cy>404.085</cy>
<w>63.06999999999999</w>
<h>18.629999999999995</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#36865;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>323.13250000000005</cx>
<cy>394.95000000000005</cy>
<w>41.392945310958396</w>
<h>40.33611938136691</h>
<angle>0.1528</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>12.8</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>176.035</cx>
<cy>235.135</cy>
<w>58.37048412578463</w>
<h>28.322460721803772</h>
<angle>-0.9071</angle>
</robndbox>
</object>
</annotation>
<annotation verified="no">
<folder>0</folder>
<filename>100</filename>
<path>/Users/lywen/Desktop/data/ocr/mtwi_2018_train/data/0/100.jpg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>750</width>
<height>750</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<type>robndbox</type>
<name>&#29256;&#26435;&#25152;&#26377;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>320.16</cx>
<cy>323.44</cy>
<w>90.0</w>
<h>20.62</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#30423;&#29992;&#24517;&#31350;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>440.625</cx>
<cy>322.035</cy>
<w>96.57</w>
<h>17.81</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>http://shop115132252.taobao.com</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>380.625</cx>
<cy>296.2525</cy>
<w>379.6993</w>
<h>16.9337</h>
<angle>0.0097</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#26032;&#26143;&#38136;&#36896;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>378.75</cx>
<cy>251.25</cy>
<w>201.56</w>
<h>45.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#21378;&#23478;&#38144;&#21806;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>674.4264</cx>
<cy>72.9708</cy>
<w>46.0894</w>
<h>163.0735</h>
<angle>2.332993</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20581;&#24247;&#38149;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>137.1075</cx>
<cy>591.795</cy>
<w>157.9664</w>
<h>37.9758</h>
<angle>3.140193</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#26080;&#28034;&#23618;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>96.095</cx>
<cy>547.5</cy>
<w>153.75</w>
<h>39.38</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#25163;&#24037;&#22320;&#27169;&#32819;&#38149;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>336.565</cx>
<cy>719.53</cy>
<w>379.69</w>
<h>53.44</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20256;&#32479;&#24037;&#33402;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>634.6875</cx>
<cy>648.75</cy>
<w>153.4552</w>
<h>35.3754</h>
<angle>2.588193</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22320;&#27169;&#38136;&#36896;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>658.8275</cx>
<cy>680.6275</cy>
<w>155.7262</w>
<h>37.3525</h>
<angle>2.636393</angle>
</robndbox>
</object>
</annotation>
<annotation>
<path>TB1rwJFLXXXXXcUXpXXunYpLFXX.txt</path>
<folder/>
<filename>TB1rwJFLXXXXXcUXpXXunYpLFXX</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>300</width>
<height>300</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>&#38450;&#27700;&#32784;&#30952;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>102.75</cx>
<cy>288.5</cy>
<w>96.0</w>
<h>17.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#29615;&#20445;&#26080;&#21619;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>214.0</cx>
<cy>289.0</cy>
<w>95.5</w>
<h>16.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#38597;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>146.0</cx>
<cy>216.5</cy>
<w>34.5</w>
<h>22.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#38401;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>144.25</cx>
<cy>254.0</cy>
<w>31.0</w>
<h>22.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#31934;&#20934;&#29256;&#22411;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>223.5</cx>
<cy>149.0</cy>
<w>39.5</w>
<h>11.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#33298;&#36866;&#36148;&#21512;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>75.25</cx>
<cy>149.25</cy>
<w>39.0</w>
<h>11.5</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#20840;&#21253;&#22260;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>156.5</cx>
<cy>130.375</cy>
<w>44.51804457079659</w>
<h>14.33662519118695</h>
<angle>-0.308</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#19987;&#36710;&#33050;&#22443;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>153.875</cx>
<cy>151.625</cy>
<w>90.92977091709807</w>
<h>21.193213092713833</h>
<angle>-0.3015</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#29615;&#20445;&#26448;&#36136;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>158.875</cx>
<cy>166.875</cy>
<w>43.326941937161195</w>
<h>10.75413915974994</h>
<angle>-0.3144</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#19987;&#36710;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>124.75</cx>
<cy>135.75</cy>
<w>15.341338761895148</w>
<h>8.326060996344449</h>
<angle>-0.4041</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#23450;&#21046;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>127.875</cx>
<cy>143.875</cy>
<w>15.35662588171743</w>
<h>7.618328871169966</h>
<angle>-0.2811</angle>
</robndbox>
</object>
</annotation>
<annotation>
<path>TB1NnA3gInI8KJjSsziXXb8QpXa_!!0-item_pic.jpg.txt</path>
<folder/>
<filename>TB1NnA3gInI8KJjSsziXXb8QpXa_!!0-item_pic</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>200</width>
<height>281</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>C</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>45.89</cx>
<cy>95.99999999999999</cy>
<w>50.22</w>
<h>65.33999999999999</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#31243;&#24207;&#35774;&#35745;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>128.2775</cx>
<cy>77.2775</cy>
<w>105.019076609156</w>
<h>30.57022854049137</h>
<angle>-0.0124</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#25945;&#31243;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>142.555</cx>
<cy>114.88999999999999</cy>
<w>65.33</w>
<h>27.559999999999988</h>
<angle>-0.0</angle>
</robndbox>
</object>
</annotation>
<annotation>
<path>TB1ggJHOpXXXXavaFXXXXXXXXXX_!!0-item_pic.jpg.txt</path>
<folder/>
<filename>TB1ggJHOpXXXXavaFXXXXXXXXXX_!!0-item_pic</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>800</width>
<height>800</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>&#20986;&#21475;&#21697;&#36136;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>260.63</cx>
<cy>750.84</cy>
<w>210.0</w>
<h>51.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>100%&#36866;&#37197;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>525.13</cx>
<cy>750.34</cy>
<w>253.0</w>
<h>58.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>CompatibleToner</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>373.63</cx>
<cy>464.09</cy>
<w>122.01014728887559</w>
<h>14.637440787076255</h>
<angle>0.0131</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>Ansn</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>561.13</cx>
<cy>468.09</cy>
<w>32.0641680702503</w>
<h>12.955714882252884</h>
<angle>-0.0007</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>B</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>539.38</cx>
<cy>466.59</cy>
<w>9.301162633521313</w>
<h>12.720153254455276</h>
<angle>-0.0008</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#19987;&#19994;&#29256;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>398.13</cx>
<cy>347.34</cy>
<w>103.0</w>
<h>33.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#27784;&#38451;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>140.63</cx>
<cy>172.84</cy>
<w>100.0</w>
<h>52.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22823;&#36830;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>140.13</cx>
<cy>234.34</cy>
<w>101.0</w>
<h>46.99999999999997</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#21452;&#20179;&#21457;&#36135;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>470.38</cx>
<cy>209.34</cy>
<w>523.5306495413901</w>
<h>125.01743950281967</h>
<angle>-0.0145</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#21697;&#29260;&#21319;&#32423;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>192.63</cx>
<cy>104.34</cy>
<w>172.0</w>
<h>45.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#26497;&#36895;&#20307;&#39564;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>400.13</cx>
<cy>100.84</cy>
<w>177.0</w>
<h>46.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#24403;&#22825;&#21457;&#36135;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>607.63</cx>
<cy>101.34</cy>
<w>174.0</w>
<h>45.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22823;&#23481;&#37327;&#39640;&#21697;&#36136;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>403.63</cx>
<cy>43.34</cy>
<w>210.0</w>
<h>39.0</h>
<angle>0.0</angle>
</robndbox>
</object>
</annotation>
<annotation>
<path>TB10.KXIVXXXXaCXXXXXXXXXXXX_!!0-item_pic.jpg.txt</path>
<folder/>
<filename>TB10</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>750</width>
<height>750</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>&#174;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>150.6025</cx>
<cy>34.2675</cy>
<w>21.860889380822748</w>
<h>22.09844869717261</h>
<angle>-0.0207</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>LT</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>43.73</cx>
<cy>74.7225</cy>
<w>32.322973339416</w>
<h>15.397372366583983</h>
<angle>0.0258</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>LANTIAN</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>47.0875</cx>
<cy>96.12500000000001</cy>
<w>51.16762684430408</w>
<h>13.126516083023699</h>
<angle>-0.0084</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>ELASTOMER</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>142.51</cx>
<cy>96.325</cy>
<w>134.10239672775123</w>
<h>19.301365491152357</h>
<angle>-0.002</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>METAL</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>160.715</cx>
<cy>74.9725</cy>
<w>60.32638052779855</w>
<h>12.6547878162467</h>
<angle>-0.0018</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#34013;&#22825;&#23454;&#19994;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>115.7325</cx>
<cy>125.41250000000001</cy>
<w>182.65805838672372</w>
<h>36.146454086501265</h>
<angle>0.0072</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>82.525</cx>
<cy>367.52000000000004</cy>
<w>14.59894688209435</w>
<h>49.64262737103526</h>
<angle>-0.2394</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>76.2125</cx>
<cy>468.41</cy>
<w>11.81515342494988</w>
<h>51.26640479830265</h>
<angle>0.1644</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>219.39999999999998</cx>
<cy>303.915</cy>
<w>20.355518453999075</w>
<h>56.551665926266786</h>
<angle>-0.0957</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>278.31</cx>
<cy>193.315</cy>
<w>19.084850604089404</w>
<h>45.29797590366785</h>
<angle>-0.1558</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>587.5725</cx>
<cy>129.10000000000002</cy>
<w>21.74205069480171</w>
<h>50.14251096740956</h>
<angle>-0.4</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>642.1374999999999</cx>
<cy>121.72749999999999</cy>
<w>14.14671040809138</w>
<h>42.56656664618349</h>
<angle>-0.6723</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>720.81</cx>
<cy>341.44</cy>
<w>15.536028967928457</w>
<h>54.223140978488004</h>
<angle>0.1288</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>369.73749999999995</cx>
<cy>377.4975</cy>
<w>41.14191680069028</w>
<h>21.233843954764556</h>
<angle>-0.1679</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>120</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>380.4025</cx>
<cy>482.1025</cy>
<w>41.765022783907554</w>
<h>24.444717282449744</h>
<angle>0.451</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#30452;&#25509;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>47.06</cx>
<cy>635.19</cy>
<w>82.5</w>
<h>32.5</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>50</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>114.24249999999999</cx>
<cy>636.5025</cy>
<w>46.191412276819676</w>
<h>35.162096009553935</h>
<angle>0.0377</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>mm</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>178.31</cx>
<cy>642.69</cy>
<w>80.0</w>
<h>25.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#30452;&#25509;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>262.685</cx>
<cy>637.065</cy>
<w>83.75</w>
<h>28.75</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>400</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>340.03499999999997</cx>
<cy>635.54</cy>
<w>64.57133722709212</w>
<h>31.92109853097559</h>
<angle>0.0151</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>mm</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>409.53</cx>
<cy>642.0075</cy>
<w>67.36148866047249</w>
<h>25.25968567780221</h>
<angle>0.0037</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#21402;&#24230;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>497.05999999999995</cx>
<cy>637.69</cy>
<w>82.49999999999994</w>
<h>35.0</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>30/40/50</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>618.855</cx>
<cy>636.2375</cy>
<w>153.926025342983</w>
<h>36.80246395778801</h>
<angle>-0.0245</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>mn</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>723.31</cx>
<cy>643.315</cy>
<w>50.0</w>
<h>28.75</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#19978;&#28023;&#34013;&#22825;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>545.81</cx>
<cy>374.565</cy>
<w>149.99999999999994</w>
<h>38.75</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#26071;&#33328;&#24215;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>644.985</cx>
<cy>427.89750000000004</cy>
<w>108.70921550014361</w>
<h>41.28847437279765</h>
<angle>-0.001</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>150</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>392.06</cx>
<cy>275.5025</cy>
<w>53.60670198821288</w>
<h>15.220563579889777</h>
<angle>-0.6257</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#32858;&#27688;&#37231;&#20914;&#22443;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>207.72000000000003</cx>
<cy>708.4875</cy>
<w>360.2960570393317</w>
<h>49.86599741438303</h>
<angle>-0.0063</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>PU</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>454.21000000000004</cx>
<cy>707.3299999999999</cy>
<w>98.66000000000003</w>
<h>55.95999999999992</h>
<angle>-0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#22278;&#22443;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>602.1925</cx>
<cy>706.3225</cy>
<w>143.07557700267859</w>
<h>50.589434085518604</h>
<angle>-0.0108</angle>
</robndbox>
</object>
</annotation>
此差异已折叠。
此差异已折叠。
<annotation>
<path>TB2MFz5dAfb_uJjSsrbXXb6bVXa_!!931678339.jpg.txt</path>
<folder/>
<filename>TB2MFz5dAfb_uJjSsrbXXb6bVXa_!!931678339</filename>
<source>
<database>Unknown</database>
</source>
<size>
<width>750</width>
<height>750</height>
<depth>3</depth>
</size>
<segmented>segmented</segmented>
<object>
<type>robndbox</type>
<name>12T</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>485.625</cx>
<cy>237.66000000000003</cy>
<w>22.2804398520316</w>
<h>8.009126044706752</h>
<angle>-0.3811</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>CL21A226MAFNNNE</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>437.5775</cx>
<cy>247.5</cy>
<w>116.99743821080128</w>
<h>11.798791247104818</h>
<angle>-0.4615</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#28145;&#22323;&#24066;&#21551;&#27888;&#20852;&#30005;&#23376;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>462.655</cx>
<cy>594.375</cy>
<w>373.13000000000005</w>
<h>39.36999999999989</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#28145;&#22323;&#24066;&#21551;&#27888;&#20852;&#30005;&#23376;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>407.815</cx>
<cy>349.22</cy>
<w>372.18999999999994</w>
<h>42.18000000000001</h>
<angle>0.0</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>CL10A475KP8NNNC</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>179.53</cx>
<cy>254.2975</cy>
<w>113.92931660515562</w>
<h>8.771666014233709</h>
<angle>-0.5474</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>1608A4.7uF</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>152.10999999999999</cx>
<cy>242.34499999999997</cy>
<w>75.83187354504685</w>
<h>13.835163154774545</h>
<angle>-0.5652</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>&#29105;&#34130;</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>144.375</cx>
<cy>120.705</cy>
<w>112.11928457219406</w>
<h>55.054082860311084</h>
<angle>-0.2313</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>SAMSUNG</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>618.985</cx>
<cy>57.66</cy>
<w>135.47000000000003</w>
<h>23.44942027552939</h>
<angle>-0.0012</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>2012A</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>398.4375</cx>
<cy>241.40750000000003</cy>
<w>45.09661247730976</w>
<h>17.33234562894386</h>
<angle>-0.3141</angle>
</robndbox>
</object>
<object>
<type>robndbox</type>
<name>22uF</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<robndbox>
<cx>439.91999999999996</cx>
<cy>221.71749999999997</cy>
<w>30.650335611814555</w>
<h>14.496400790090476</h>
<angle>-0.4315</angle>
</robndbox>
</object>
</annotation>
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册