提交 6d92f3fa 编写于 作者: H hypox64

First commit

上级 9874c1ab
......@@ -127,3 +127,9 @@ dmypy.json
# Pyre type checker
.pyre/
#my
/pythontest.py
/testmedia
/tmp
*.exe
\ No newline at end of file
# ShellPlayer
You can watch video in shell !!!
![image](./imgs/logo_char.png)
# ShellPlayer
| English | [中文版](./README_CN.md) |
You can play colorful & soundful video in shell !!!
You can play colorful & soundful video in shell !!!
You can play colorful & soundful video in shell !!!
## Getting Started
### Prerequisites
* Linux
* python3
* [ffmpeg](http://ffmpeg.org/)
```bash
sudo apt-get install ffmpeg
```
### Dependencies
This code depends on opencv-python, available via pip install.
```bash
pip install opencv-python
```
### Clone this repo
```bash
git clone https://github.com/HypoX64/ShellPlayer.git
cd ShellPlayer
```
### Run program
```bash
python play.py -m "your_video_or_image_path"
```
![image](./imgs/kun.gif)<br>
## More parameters
| Option | Description | Default |
| :----------: | :------------------------: | :-------------------------------------: |
| -m | your video or image path | './imgs/test.jpg' |
| -g | if specified, play gray video | |
| -f | playing fps, 0-> auto | 0 |
| -c | charstyle: style of output 1 \| 2 \| 3 | 3 |
| -s | size of shell, 1:80X24 2:132X43 3:203X55 | 1 |
| --frame_num | how many frames want to play 0->all | 0 |
| --char_scale | character aspect ratio in shell | 2.0 |
![image](./imgs/logo_char.png)
# ShellPlayer
| [English](./README.md) | 中文版 |
你能在终端中看视频,色彩斑斓且富有声音的!!!
你能在终端中看视频,色彩斑斓且富有声音的!!!
你能在终端中看视频,色彩斑斓且富有声音的!!!
## 入门
### 前提要求
* Linux
* python3
* [ffmpeg](http://ffmpeg.org/)
```bash
sudo apt-get install ffmpeg
```
### 依赖
代码依赖于 opencv-python, 可以通过 pip install安装
```bash
pip install opencv-python
```
### 克隆这个仓库
```bash
git clone https://github.com/HypoX64/ShellPlayer.git
cd ShellPlayer
```
### 运行程序
```bash
python play.py -m "视频或者图片的路径"
```
![image](./imgs/kun.gif)<br>
## 更多的参数
| 选项 | 描述 | 默认值 |
| :----------: | :------------------------: | :-------------------------------------: |
| -m | 视频或者图片的路径 | './imgs/test.jpg' |
| -g | 如果输入则播放黑白的视频 | |
| -f | 播放帧速率, 0-> 自动 | 0 |
| -c | charstyle: 字符输出效果 1 \| 2 \| 3 | 3 |
| -s | 终端尺寸, 1:80X24 2:132X43 3:203X55 | 1 |
| --frame_num | 播放总帧数 0->播放所有的帧 | 0 |
| --char_scale | 字符长宽比 | 2.0 |
import numpy as np
import cv2
import time
'''
#norm
red 31m 204,0,0
green 32m 78,145,6
brown 33m 196,160,0
blue 34m 52,101,164
cyan-blue 36m 6,152,154
#highlight
#gray 30m 85, 87, 83
red 31m 239,41 ,41
green 32m 138,226,52
yellow 33m 253,233,79
blue 34m 114,159,207
purple 35m 173,127,168
blue_sky 36m 52 ,226,226
white 37m 238,238,238
'''
def char_add_color(char,color_num):
if color_num > 4:
return '\033[1;3'+str(color_num+1-5)+'m'+char+'\033[0m'
elif color_num == 4:
return '\033[36m'+char+'\033[0m'
elif color_num == 3:
return '\033[34m'+char+'\033[0m'
elif color_num == 2:
return '\033[33m'+char+'\033[0m'
elif color_num == 1:
return '\033[32m'+char+'\033[0m'
elif color_num == 0:
return '\033[31m'+char+'\033[0m'
class Transformer(object):
def __init__(self, strshape,scshape,charstyle=3):
super(Transformer, self).__init__()
self.strshape = strshape
self.scshape = scshape
self.strh,self.strw = self.strshape[:2]
self.sch,self.scw = self.scshape[:2]
self.ord = 2
if charstyle == 1:
self.chars=[' ', ',', '+', '1', 'n','D','&','M','@']
elif charstyle == 2:
self.chars=[' ', '▏', '▎', '▍', '▌','▋','▊','▉','█']
elif charstyle == 3:
self.chars=[' ', '▏', '▂', '▍', '▅','▋','▇','▉','█']
self.char_length = len(self.chars)
#self.colors = np.array([[204,0,0],[78,145,6],[196,160,0],[52,101,164],[6,152,154],[239,41 ,41],[138,226,52],[253,233,79],[114,159,207],[173,127,168],[52 ,226,226],[238,238,238]])
self.colors = np.array([[204,0,0],[78,145,6],[196,160,0],[52,101,164],[6,152,154],[239,41 ,70],[170,226,52],[253,233,100],[114,159,207],[173,127,168],[52 ,226,226],[238,238,238]])
self.color_length = len(self.colors)
self.brightness = self.colors[:,0]*0.299+self.colors[:,1]*0.587+self.colors[:,2]*0.114
self.brightness_divisor = self.brightness/8
self.colors_hue = self.colors.astype(np.float64)
for i in range(self.color_length):
#self.colors_hue[i] = self.colors_hue[i]/np.mean(self.colors_hue[i])
self.colors_hue[i] = self.colors_hue[i]/self.brightness[i]
self.color_chars=[]
for i in range(self.char_length):
tmp=[]
for j in range(self.color_length):
tmp.append(char_add_color(self.chars[i],j))
self.color_chars.append(tmp)
self.norm_matrix = np.zeros((self.strh,self.strw,3))
self.color_img_matrix = np.zeros((self.color_length,self.strh,self.strw,3))
self.color_contrast_matrix = np.zeros((self.color_length,self.strh,self.strw,3))
for i in range(self.color_length):
self.color_contrast_matrix[i,:,:] = self.colors_hue[i]
self.color_contrast_distance = np.zeros((self.color_length,self.strh,self.strw))
def blank(self,string):
if self.sch>self.strh:
for i in range((self.sch-self.strh)//2):
string = '\n'+string+'\n'
return string
def gray(self,img):
img = cv2.resize(img,(self.strw,self.strh),interpolation=cv2.INTER_AREA)
if img.ndim == 3:
if img.shape[2] == 4:
img = img[:,:,:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
string = ''
for i in range(self.strh):
for j in range(self.strw):
string += self.chars[img[i][j]//32]
if i != self.strh-1:
string += '\n'
string = self.blank(string)
return string
def pixel_color(self,Y,dis):
if Y<24:
return ' '
# color
color_num = np.where(dis==np.min(dis))[0][0]
# brightness
#Y = pixel[0]*0.299+pixel[1]*0.587+pixel[2]*0.114
brightness_level = int(Y/self.brightness_divisor[color_num])
if brightness_level>8:
brightness_level = 8
char = self.color_chars[brightness_level][color_num]
return char
def color(self,img):
img = cv2.resize(img,(self.strw,self.strh),interpolation=cv2.INTER_AREA)
if img.shape[2] == 4:
img = img[:,:,:2]
img = img[:,:,::-1]
img = img.astype(np.float64)
img = np.clip(img, 1, 255)
# get color distance matrix
bright = img[:,:,0]*0.299+img[:,:,1]*0.587+img[:,:,2]*0.114
for i in range(3):self.norm_matrix[:,:,i] = bright
self.color_img_matrix[:] = img/self.norm_matrix
# for i in range(3):self.norm_matrix[:,:,i] = np.mean(img,axis=2)
# self.color_img_matrix[:] = img/self.norm_matrix
self.color_contrast_distance = np.linalg.norm(self.color_img_matrix-self.color_contrast_matrix,ord=self.ord,axis=3)
string = ''
for i in range(self.strh):
for j in range(self.strw):
string += self.pixel_color(bright[i,j],self.color_contrast_distance[:,i,j])
if i != self.strh-1:
string += '\n'
string = self.blank(string)
return string
def convert(self,img,isgray):
if isgray:
return self.gray(img)
else :
return self.color(img)
def eval_performance(self,isgray):
t1 = time.time()
img = cv2.imread('./imgs/logo.png')
print(self.convert(img,isgray))
for i in range(10):
img = cv2.imread('./imgs/test.jpg')
img = cv2.resize(img,(1280,720))
self.convert(img,isgray)
t2 = time.time()
recommend_fps = int(1/((t2-t1)/10))-1
return recommend_fps
def main():
pass
if __name__ == '__main__':
main()
文件已添加
import argparse
class Options():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('-m','--media', type=str, default='./imgs/test.jpg',help='your video or image path')
self.parser.add_argument('-g','--gray', action='store_true', help='if specified, play gray video')
self.parser.add_argument('-f','--fps', type=int, default=0,help='playing fps, 0-> auto')
self.parser.add_argument('-c','--charstyle', type=int, default=3,help='style of output')
self.parser.add_argument('-s','--screen', type=int, default=1,help='size of shell 1:80*24 2:132*43 3:203*55')
self.parser.add_argument('--ori_fps', type=int, default=0,help='original fps for video, 0-> auto')
self.parser.add_argument('--frame_num', type=int, default=0,help='how many frames want to play 0->all')
self.parser.add_argument('--char_scale', type=float, default=2.0,help='')
self.initialized = True
def getparse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
return self.opt
import os
import sys
import time
from multiprocessing import Process, Queue
import threading
import subprocess
import numpy as np
import cv2
from util import util,ffmpeg
from img2shell import Transformer
from options import Options
def readvideo(opt,imgQueue):
cap = cv2.VideoCapture(opt.media)
play_index = np.linspace(0, opt.frame_num-1,num=int(opt.frame_num*opt.fps/opt.ori_fps),dtype=np.int64)
frame_cnt = 0; play_cnt =0
while(cap.isOpened()):
_ , frame = cap.read()
if frame_cnt == play_index[play_cnt]:
imgQueue.put(frame)
if play_cnt < len(play_index)-1:
play_cnt+= 1
frame_cnt += 1
def timer(opt,timerQueueime):
while True:
t = 1.0/opt.fps
time.sleep(t)
timerQueueime.put(True)
opt = Options().getparse()
#-------------------------------Media Init-------------------------------
if util.is_img(opt.media):
img = cv2.imread(opt.media)
h_media,w_media = img.shape[:2]
elif util.is_video(opt.media):
fps,endtime,h_media,w_media = ffmpeg.get_video_infos(opt.media)
if opt.frame_num == 0:
opt.frame_num = int(endtime*fps-5)
if opt.ori_fps == 0:
opt.ori_fps = fps
util.clean_tempfiles(tmp_init=True)
else:
print('Can not load this file!')
#-------------------------------Image Shape Init-------------------------------
if opt.screen==1:
limw = 80;limh = 24
if opt.screen==2:
limw = 132;limh = 43
if opt.screen==3:
limw = 203;limh = 55
screen_scale = limh/limw
img_scale = h_media/w_media/opt.char_scale
if img_scale >= screen_scale:
strshape = (limh,int(limh/img_scale))
else:
strshape = (int(limw*img_scale),limw)
#-------------------------------img2shell Init-------------------------------
transformer = Transformer(strshape,(limh,limw),opt.charstyle)
if util.is_video(opt.media):
recommend_fps = transformer.eval_performance(opt.gray)
if opt.fps == 0:
opt.fps = np.clip(recommend_fps,1,opt.ori_fps)
else:
opt.fps = np.clip(opt.fps,1,opt.ori_fps)
ffmpeg.video2voice(opt.media,'-ar 16000 ./tmp/tmp.wav')
#-------------------------------main-------------------------------
if util.is_img(opt.media):
print(transformer.convert(img,opt.gray))
elif util.is_video(opt.media):
imgQueue = Queue(1)
timerQueue = Queue()
imgload_p = Process(target=readvideo,args=(opt,imgQueue))
imgload_p.daemon = True
imgload_p.start()
timer_p = Process(target=timer,args=(opt,timerQueue))
timer_p.daemon = True
timer_p.start()
time.sleep(0.5)
subprocess.Popen('paplay ./tmp/tmp.wav', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for i in range(int(opt.frame_num*opt.fps/opt.ori_fps)-1):
timerQueue.get()
img = imgQueue.get()
string = transformer.convert(img,opt.gray)
t=threading.Thread(target=print,args=(string,))
t.start()
import os,json
import subprocess
# ffmpeg 3.4.6
def run(cmd_str):
#out_string = os.popen(cmd_str).read()
#For chinese path in Windows
#https://blog.csdn.net/weixin_43903378/article/details/91979025
stream = os.popen(cmd_str)._stream
out_string = stream.buffer.read().decode(encoding='utf-8')
return out_string
def video2voice(videopath,voicepath):
p = subprocess.Popen('ffmpeg -i "'+videopath+'" '+voicepath, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout = p.stdout.readlines()
# run(cmd_str)
def get_video_infos(videopath):
cmd_str = 'ffprobe -v quiet -print_format json -show_format -show_streams -i "' + videopath + '"'
out_string = run(cmd_str)
infos = json.loads(out_string)
try:
fps = eval(infos['streams'][0]['avg_frame_rate'])
endtime = float(infos['format']['duration'])
width = int(infos['streams'][0]['width'])
height = int(infos['streams'][0]['height'])
except Exception as e:
fps = eval(infos['streams'][1]['r_frame_rate'])
endtime = float(infos['format']['duration'])
width = int(infos['streams'][1]['width'])
height = int(infos['streams'][1]['height'])
return fps,endtime,height,width
\ No newline at end of file
import os
import shutil
def Traversal(filedir):
file_list=[]
for root,dirs,files in os.walk(filedir):
for file in files:
file_list.append(os.path.join(root,file))
for dir in dirs:
Traversal(dir)
return file_list
def is_img(path):
ext = os.path.splitext(path)[1]
ext = ext.lower()
if ext in ['.jpg','.png','.jpeg','.bmp']:
return True
else:
return False
def is_video(path):
ext = os.path.splitext(path)[1]
ext = ext.lower()
if ext in ['.mp4','.flv','.avi','.mov','.mkv','.wmv','.rmvb','.mts']:
return True
else:
return False
def is_imgs(paths):
tmp = []
for path in paths:
if is_img(path):
tmp.append(path)
return tmp
def is_videos(paths):
tmp = []
for path in paths:
if is_video(path):
tmp.append(path)
return tmp
def is_dirs(paths):
tmp = []
for path in paths:
if os.path.isdir(path):
tmp.append(path)
return tmp
def writelog(path,log,isprint=False):
f = open(path,'a+')
f.write(log+'\n')
f.close()
if isprint:
print(log)
def makedirs(path):
if os.path.isdir(path):
pass
# print(path,'existed')
else:
os.makedirs(path)
# print('makedir:',path)
def clean_tempfiles(tmp_init=True):
if os.path.isdir('./tmp'):
shutil.rmtree('./tmp')
if tmp_init:
os.makedirs('./tmp')
def second2stamp(s):
h = int(s/3600)
s = int(s%3600)
m = int(s/60)
s = int(s%60)
return "%02d:%02d:%02d" % (h, m, s)
def counttime(t1,t2,now_num,all_num):
'''
t1,t2: time.time()
'''
used_time = int(t2-t1)
all_time = int(used_time/now_num*all_num)
return second2stamp(used_time)+'/'+second2stamp(all_time)
def get_bar(percent,num = 25):
bar = '['
for i in range(num):
if i < round(percent/(100/num)):
bar += '#'
else:
bar += '-'
bar += ']'
return bar+' '+"%.2f"%percent+'%'
def copyfile(src,dst):
try:
shutil.copyfile(src, dst)
except Exception as e:
print(e)
def opt2str(opt):
message = ''
message += '---------------------- Options --------------------\n'
for k, v in sorted(vars(opt).items()):
message += '{:>25}: {:<35}\n'.format(str(k), str(v))
message += '----------------- End -------------------'
return message
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册