提交 4ec41ed5 编写于 作者: H HypoX64

Add NikeMouth

上级 e7f65ad9
......@@ -146,6 +146,10 @@ result/
/OneImage2Video/pixel_imgs/university
/NikeMouth/media
/NikeMouth/test_media
/NikeMouth/test.py
*.mp4
*.flv
*.mp3
......
import os
import sys
import numpy as np
import cv2
import normal2nike
sys.path.append("..")
from Util import util,ffmpeg
from options import Options
opt = Options().getparse()
util.file_init(opt)
if os.path.isdir(opt.media):
files = util.Traversal(opt.media)
else:
files = [opt.media]
for file in files:
img = cv2.imread(file)
h,w = img.shape[:2]
if opt.output == 'image':
img = normal2nike.convert(img,opt.size,opt.intensity,opt.aspect_ratio,opt.ex_move,opt.mode)
cv2.imwrite(os.path.join(opt.result_dir,os.path.basename(file)), img)
elif opt.output == 'video':
frame = int(opt.time*opt.fps)
for i in range(frame):
tmp = normal2nike.convert(img,opt.size,i*opt.intensity/frame,opt.aspect_ratio,
opt.ex_move,opt.mode)[:4*(h//4),:4*(w//4)]
cv2.imwrite(os.path.join('./tmp/output_imgs','%05d' % i+'.jpg'), tmp)
cv2.imwrite(os.path.join(opt.result_dir,os.path.basename(file)), tmp)
ffmpeg.image2video(
opt.fps,
'./tmp/output_imgs/%05d.jpg',
None,
os.path.join(opt.result_dir,os.path.splitext(os.path.basename(file))[0]+'.mp4'))
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
\ No newline at end of file
import numpy as np
import cv2
import face_recognition
# 繪製delaunay triangles
def draw_delaunay(img, TriangleList, delaunary_color):
size = img.shape
r = (0, 0, size[1], size[0])
for t in TriangleList:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
# if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):
cv2.line(img, pt1, pt2, delaunary_color, 1)
cv2.line(img, pt2, pt3, delaunary_color, 1)
cv2.line(img, pt3, pt1, delaunary_color, 1)
def get_Ms(t1,t2):
Ms = []
for i in range(len(t1)):
pts1 = np.array([[t1[i][0],t1[i][1]],[t1[i][2],t1[i][3]],[t1[i][4],t1[i][5]]]).astype(np.float32)
pts2 = np.array([[t2[i][0],t2[i][1]],[t2[i][2],t2[i][3]],[t2[i][4],t2[i][5]]]).astype(np.float32)
# print(pts1)
Ms.append(cv2.getAffineTransform(pts1,pts2))
return Ms
def delaunay(h,w,points):
subdiv = cv2.Subdiv2D((0,0,w,h))
for i in range(len(points)):
subdiv.insert(tuple(points[i]))
TriangleList = subdiv.getTriangleList()
return TriangleList
def get_borderpoints(img):
h,w = img.shape[:2]
h,w = h-1,w-1
points = [[0,0],[w//2,0],[w,0],[w,h//2],[w,h],[w//2,h],[0,h],[0,h//2]]
return np.array(points)
def get_masks(img,t):
masks = np.zeros((len(t),img.shape[0],img.shape[1]), dtype=np.uint8)
for i in range(len(t)):
points = np.array([[t[i][0],t[i][1]],[t[i][2],t[i][3]],[t[i][4],t[i][5]]]).astype(np.int64)
#print(points)
masks[i] = cv2.fillConvexPoly(masks[i],points,(255))
cv2.line(masks[i], tuple(points[0]), tuple(points[1]), (255), 3)
cv2.line(masks[i], tuple(points[0]), tuple(points[2]), (255), 3)
cv2.line(masks[i], tuple(points[1]), tuple(points[2]), (255), 3)
#masks[i] = cv2.drawContours(masks[i], points, contourIdx=0, color=255, thickness=2)
return masks
def changeTriangleList(t,point,move):
t = t.astype(np.int64)
for i in range(len(t)):
if t[i][0]==point[0] and t[i][1]==point[1]:
t[i][0],t[i][1] = t[i][0]+move[0],t[i][1]+move[1]
elif t[i][2]==point[0] and t[i][3]==point[1]:
t[i][2],t[i][3] = t[i][2]+move[0],t[i][3]+move[1]
elif t[i][4]==point[0] and t[i][5]==point[1]:
t[i][4],t[i][5] = t[i][4]+move[0],t[i][5]+move[1]
return t
def replace_delaunay(img,Ms,masks):
img_new = np.zeros_like(img)
h,w = img.shape[:2]
for i in range(len(Ms)):
# _img = img.copy()
mask = cv2.merge([masks[i], masks[i], masks[i]])
mask_inv = cv2.bitwise_not(mask)
tmp = cv2.warpAffine(img,Ms[i],(w,h),borderMode = cv2.BORDER_REFLECT_101,flags=cv2.INTER_CUBIC)
tmp = cv2.bitwise_and(mask,tmp)
img_new = cv2.bitwise_and(mask_inv,img_new)
img_new = cv2.add(tmp,img_new)
return img_new
def get_nikemouth_landmark(src_landmark,alpha=1,aspect_ratio=1.0,mode = 'only_mouth'):
nike = cv2.imread('./imgs/nike.png')
landmark = face_recognition.face_landmarks(nike)[0]
if mode == 'only_mouth':
src_landmark = src_landmark[56:]
nikemouth = np.array(landmark['top_lip']+landmark['bottom_lip'])
else:
src_landmark = src_landmark[25:]
nikemouth = np.array(landmark['left_eyebrow']+landmark['right_eyebrow']+landmark['nose_bridge']+\
landmark['nose_tip']+landmark['left_eye']+landmark['right_eye']+landmark['top_lip']+\
landmark['bottom_lip'])
# 中心置0
nikemouth = nikemouth-[np.mean(nikemouth[:,0]),np.mean(nikemouth[:,1])]
nikemouth[:,0] = nikemouth[:,0]*aspect_ratio
# 获取嘴巴大小
nikemouth_h = np.max(nikemouth[:,1])-np.min(nikemouth[:,1])
nikemouth_w = np.max(nikemouth[:,0])-np.min(nikemouth[:,0])
src_h = np.max(src_landmark[:,1])-np.min(src_landmark[:,1])
src_w = np.max(src_landmark[:,0])-np.min(src_landmark[:,0])
# 调整大小及位置
beta = nikemouth_w/src_w
nikemouth = alpha*nikemouth/beta+[np.mean(src_landmark[:,0]),np.mean(src_landmark[:,1])]
return np.around(nikemouth,0)
def convert(face,size=1,intensity=1,aspect_ratio=1.0,ex_move=[0,0],mode='all_face'):
h,w = face.shape[:2]
landmark = face_recognition.face_landmarks(face)[0]
# print(landmark)
landmark_src = np.array(landmark['chin']+landmark['left_eyebrow']+landmark['right_eyebrow']+\
landmark['nose_bridge']+landmark['nose_tip']+landmark['left_eye']+landmark['right_eye']+landmark['top_lip']+\
landmark['bottom_lip'])
landmark_src = np.concatenate((get_borderpoints(face), landmark_src), axis=0)
TriangleList_src = delaunay(h,w,landmark_src)
TriangleList_dst = TriangleList_src.copy()
nikemouth_landmark = get_nikemouth_landmark(landmark_src,alpha=size,aspect_ratio=aspect_ratio,mode = mode)
if mode == 'only_mouth':
for i in range(24):
move = ex_move+(nikemouth_landmark[i]-landmark_src[56+i])*intensity
TriangleList_dst = changeTriangleList(TriangleList_dst, landmark_src[56+i],move)
else:
for i in range(80-25):
move = ex_move+(nikemouth_landmark[i]-landmark_src[25+i])*intensity
TriangleList_dst = changeTriangleList(TriangleList_dst, landmark_src[25+i],move)
Ms = get_Ms(TriangleList_src,TriangleList_dst)
masks = get_masks(face, TriangleList_dst)
face_new = replace_delaunay(face, Ms, masks)
return face_new
# # draw_delaunay(img, TriangleList, delaunary_color):
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image',face_new)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
\ No newline at end of file
import argparse
class Options():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('-m','--media', type=str, default='./imgs/test.jpg',help='your image path or dir')
self.parser.add_argument('-mode','--mode', type=str, default='all_face',help='all_face | only_mouth')
self.parser.add_argument('-o','--output', type=str, default='image',help='output image or video')
self.parser.add_argument('-t','--time', type=float, default=2.0,help='time of video')
self.parser.add_argument('-f','--fps', type=float, default=25,help='fps of video')
self.parser.add_argument('-s','--size', type=float, default=1.0,help='size of mouth')
self.parser.add_argument('-i','--intensity', type=float, default=1.0,help='effect intensity')
self.parser.add_argument('-a','--aspect_ratio', type=float, default=1.0,help='aspect ratio of mouth')
self.parser.add_argument('-e','--ex_move', type=str, default='[0,0]',help='')
self.parser.add_argument('-r','--result_dir', type=str, default='./result',help='')
# self.parser.add_argument('--temp_dir', type=str, default='./tmp',help='')
self.initialized = True
def getparse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.ex_move = eval(self.opt.ex_move)
return self.opt
# OneImage2Video
用一个或一些图片生成视频<br>
## 入门
### 前提要求
* Linux or Windows
* python3
* [ffmpeg](http://ffmpeg.org/)
```bash
sudo apt-get install ffmpeg
```
### 依赖
代码依赖于 opencv-python, 可以通过 pip install安装
```bash
pip install opencv-python
```
### 克隆这个仓库
```bash
git clone https://github.com/HypoX64/bilibili.git
cd bilibili/OneImage2Video
```
### 运行程序
* 在main.py中修改你的视频路径以及图片路径
```python
# 在github看香蕉君
# pixel_imgs_dir = './pixel_imgs/github'
# pixel_imgs_resize = 0 # resize pixel_imgs, if 0, do not resize
# output_pixel_num = 52 # how many pixels in the output video'width
# video_path = '../Video/素材/香蕉君/香蕉君_3.mp4'
# inverse = False
```
* run
```bash
python main.py
```
# OneImage2Video
用一个或一些图片生成视频<br>
## 入门
### 前提要求
* Linux or Windows
* python3
* [ffmpeg](http://ffmpeg.org/)
```bash
sudo apt-get install ffmpeg
```
### 依赖
代码依赖于 opencv-python, 可以通过 pip install安装
```bash
pip install opencv-python
```
### 克隆这个仓库
```bash
git clone https://github.com/HypoX64/bilibili.git
cd bilibili/OneImage2Video
```
### 运行程序
* 在main.py中修改你的视频路径以及图片路径
```python
# 在github看香蕉君
# pixel_imgs_dir = './pixel_imgs/github'
# pixel_imgs_resize = 0 # resize pixel_imgs, if 0, do not resize
# output_pixel_num = 52 # how many pixels in the output video'width
# video_path = '../Video/素材/香蕉君/香蕉君_3.mp4'
# inverse = False
```
* run
```bash
python main.py
```
......@@ -27,31 +27,36 @@ def run(args,mode = 0):
return sout
elif mode == 2:
p = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd = args2cmd(args)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout = p.stdout.readlines()
return sout
def video2image(videopath, imagepath, fps=0, start_time=0, last_time=0):
def video2image(videopath, imagepath, fps=0, start_time='00:00:00', last_time='00:00:00'):
args = ['ffmpeg', '-i', '"'+videopath+'"']
if last_time!=0:
if last_time != '00:00:00':
args += ['-ss', start_time]
args += ['-t', last_time]
if fps != 0:
args += ['-r', fps]
args += ['-r', str(fps)]
args += ['-f', 'image2','-q:v','-0',imagepath]
run(args)
def video2voice(videopath,voicepath,samplingrate=0):
args = ['ffmpeg', '-i', '"'+videopath+'"']
if samplingrate != 0:
args += ['-ar', str(samplingrate)]
def video2voice(videopath, voicepath, start_time='00:00:00', last_time='00:00:00'):
args = ['ffmpeg', '-i', '"'+videopath+'"','-f mp3','-b:a 320k']
if last_time != '00:00:00':
args += ['-ss', start_time]
args += ['-t', last_time]
args += [voicepath]
run(args)
def image2video(fps,imagepath,voicepath,videopath):
os.system('ffmpeg -y -r '+str(fps)+' -i '+imagepath+' -vcodec libx264 '+'./tmp/video_tmp.mp4')
os.system('ffmpeg -i ./tmp/video_tmp.mp4 -i "'+voicepath+'" -vcodec copy -acodec aac '+videopath)
if voicepath != None:
os.system('ffmpeg -y -r '+str(fps)+' -i '+imagepath+' -vcodec libx264 '+'./tmp/video_tmp.mp4')
os.system('ffmpeg -i ./tmp/video_tmp.mp4 -i "'+voicepath+'" -vcodec copy -acodec aac '+videopath)
else:
os.system('ffmpeg -y -r '+str(fps)+' -i '+imagepath+' -vcodec libx264 '+videopath)
def get_video_infos(videopath):
args = ['ffprobe -v quiet -print_format json -show_format -show_streams', '-i', '"'+videopath+'"']
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册