diff --git a/README.md b/README.md index 2ed73889bece66823a019459f2ab87c10d8fa647..cf73c9bde318898351f3aa709d77d39e80010058 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,9 @@ * 通过运行read_datasests.py,可以对数据的标注信息进行解析可视化。 ### 2、静态手势识别数据集 + + ![video_gesture](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/gesture.gif) + * 数据集来源3部分,且网上数据占绝大多数,具体: * 1)来源于网上数据并自制。 * 2)来源于自己相机采集并自制。 @@ -29,23 +32,23 @@ kinect_leap_dataset 数据集来源于以下paper项目组的贡献。 ``` * 数据下载 - 该项目数据集命名为:handpose_x_gesture_v1,[数据集下载地址(百度网盘 Password: )]() + 该项目数据集命名为:handpose_x_gesture_v1,[数据集下载地址(百度网盘 Password: psi1 )](https://pan.baidu.com/s/10jN4DOYcu9Pfv6gd5c3y4A) 数据集共 2850 个样本,分为 14 类。 * 手势定义 - ![one](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/one.jpg) :000-one - ![five](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/five.jpg) :001-five - ![fist](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/fist.jpg) :002-fist - ![ok](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/ok.jpg) :003-ok - ![heartSingle](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/heartSingle.jpg) :004-heartSingle - ![yearh](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/yearh.jpg) :005-yearh - ![three](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/three.jpg) :006-three + ![one](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/one.jpg) :000-one | + ![five](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/five.jpg) :001-five | + ![fist](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/fist.jpg) :002-fist | + ![ok](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/ok.jpg) :003-ok + ![heartSingle](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/heartSingle.jpg) :004-heartSingle | + ![yearh](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/yearh.jpg) :005-yearh | + ![three](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/three.jpg) :006-three | ![four](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/four.jpg) :007-four - ![six](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/six.jpg) :008-six - ![Iloveyou](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/Iloveyou.jpg) :009-Iloveyou - ![gun](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/gun.jpg) :010-gun + ![six](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/six.jpg) :008-six | + ![Iloveyou](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/Iloveyou.jpg) :009-Iloveyou | + ![gun](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/gun.jpg) :010-gun | ![thumbUp](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/thumbUp.jpg) :011-thumbUp - ![nine](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/nine.jpg) :012-nine + ![nine](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/nine.jpg) :012-nine | ![pink](https://codechina.csdn.net/EricLee/classification/-/raw/master/samples/pink.jpg) :013-pink ## 预训练模型 diff --git a/inference.py b/inference.py index 681c1bf20713de778e22277a5a61445ded6dcaff..531ef5f502ca0c5445c607899adeaabbe719c6eb 100644 --- a/inference.py +++ b/inference.py @@ -41,7 +41,7 @@ def get_xml_msg(path): if __name__ == "__main__": parser = argparse.ArgumentParser(description=' Project Classification top1 Test') - parser.add_argument('--test_model', type=str, default = './model_exp/2021-03-08_01-16-46/resnet_34-192_epoch-176.pth', + parser.add_argument('--test_model', type=str, default = './model_exp/2021-03-08_02-38-39/resnet_34-192_epoch-10.pth', help = 'test_model') # 模型路径 parser.add_argument('--model', type=str, default = 'resnet_34', help = 'model : resnet_18,resnet_34,resnet_50,resnet_101,resnet_152') # 模型类型 diff --git a/samples/gesture.gif b/samples/gesture.gif new file mode 100644 index 0000000000000000000000000000000000000000..ed1f90874096a1d32df0a8224165da6966177c40 Binary files /dev/null and b/samples/gesture.gif differ diff --git a/train.py b/train.py index 4bb38748c1c5f0881ab6f5e243703095250e71b5..82998e1f9cc438dd7b0add3d4b3e8cd63c7c36ea 100644 --- a/train.py +++ b/train.py @@ -230,7 +230,7 @@ def trainer(ops,f_log): torch.save(model_.state_dict(), ops.model_exp + 'latest.pth') # 每间隔 5 个 epoch 进行模型保存 if (epoch%5) == 0 and (epoch > 9): - torch.save(model_.state_dict(), ops.model_exp + '{}-{}_epoch-{}.pth'.format(ops.model,ops.img_size[0],epoch)) + torch.save(model_.state_dict(), ops.model_exp + '{}-size-{}_epoch-{}.pth'.format(ops.model,ops.img_size[0],epoch)) if len(val_split) > 0 and (epoch%ops.test_interval==0): # test @@ -291,7 +291,7 @@ if __name__ == "__main__": help = 'init_learningRate') # 初始化学习率 parser.add_argument('--lr_decay', type=float, default = 0.96, help = 'learningRate_decay') # 学习率权重衰减率 - parser.add_argument('--weight_decay', type=float, default = 1e-8, + parser.add_argument('--weight_decay', type=float, default = 1e-6, help = 'weight_decay') # 优化器正则损失权重 parser.add_argument('--batch_size', type=int, default = 32, help = 'batch_size') # 训练每批次图像数量