提交 227fd4e1 编写于 作者: Eric.Lee2021's avatar Eric.Lee2021 🚴🏻

add elif ops.model == "squeezenet1_0":

上级 66204355
......@@ -18,6 +18,7 @@ import cv2
import torch.nn.functional as F
from models.resnet import resnet50, resnet34
from models.squeezenet import squeezenet1_1,squeezenet1_0
from utils.common_utils import *
import copy
from hand_data_iter.datasets import draw_bd_handpose
......@@ -27,10 +28,10 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' Project Hand Pose Inference')
parser.add_argument('--model_path', type=str, default = './weights/resnet50_2021-478.pth',
parser.add_argument('--model_path', type=str, default = './model_exp/2021-02-21_23-25-14/model_epoch-2.pth',
help = 'model_path') # 模型路径
parser.add_argument('--model', type=str, default = 'resnet_50',
help = 'model : resnet_x,') # 模型类型
help = 'model : resnet_x,squeezenet_x') # 模型类型
parser.add_argument('--num_classes', type=int , default = 42,
help = 'num_classes') # 手部21关键点, (x,y)*2 = 42
parser.add_argument('--GPUS', type=str, default = '0',
......@@ -64,6 +65,10 @@ if __name__ == "__main__":
model_ = resnet50(num_classes = ops.num_classes,img_size=ops.img_size[0])
elif ops.model == 'resnet_34':
model_ = resnet34(num_classes = ops.num_classes,img_size=ops.img_size[0])
elif ops.model == "squeezenet1_0":
model_ = squeezenet1_0(num_classes=ops.num_classes)
elif ops.model == "squeezenet1_1":
model_ = squeezenet1_1(num_classes=ops.num_classes)
use_cuda = torch.cuda.is_available()
......
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000,dropout_factor = 1.):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout_factor),
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
# print("features(x):", x.size())
x = self.classifier(x)
# print("features(x):", x.size())
return x.view(x.size(0), self.num_classes)
def squeezenet1_0(pretrained=False, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.0, **kwargs)
model_dict = model.state_dict()
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['squeezenet1_0'])
pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if
k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()}
model.load_state_dict(pretrained_dict,strict=False)
return model
def squeezenet1_1(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
model_dict = model.state_dict()
if pretrained:
pretrained_state_dict = model_zoo.load_url(model_urls['squeezenet1_0'])
pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if
k in model_dict and model_dict[k].size() == pretrained_state_dict[k].size()}
model.load_state_dict(pretrained_dict,strict=False)
return model
if __name__ == "__main__":
from thop import profile
dummy = torch.from_numpy(np.random.random([16, 3, 256, 256]).astype(np.float32))
model = squeezenet1_0(pretrained=True, num_classes=42,dropout_factor = 0.5)
print(model)
flops, params = profile(model, inputs=(dummy, ))
model.eval()
output = model(dummy)
print(output.size())
print("flops: {}, params: {}".format(flops, params))
......@@ -15,6 +15,7 @@ from utils.common_utils import *
from hand_data_iter.datasets import *
from models.resnet import resnet50,resnet101
from models.squeezenet import squeezenet1_1,squeezenet1_0
from loss.loss import *
import cv2
import time
......@@ -33,8 +34,16 @@ def trainer(ops,f_log):
if ops.model == 'resnet_50':
model_ = resnet50(pretrained = True,num_classes = ops.num_classes,img_size = ops.img_size[0],dropout_factor=ops.dropout)
else:
elif ops.model == 'resnet_34':
model_ = resnet34(pretrained = True,num_classes = ops.num_classes,img_size = ops.img_size[0],dropout_factor=ops.dropout)
elif ops.model == 'resnet_101':
model_ = resnet101(pretrained = True,num_classes = ops.num_classes,img_size = ops.img_size[0],dropout_factor=ops.dropout)
elif ops.model == "squeezenet1_0":
model_ = squeezenet1_0(pretrained=True, num_classes=ops.num_classes,dropout_factor=ops.dropout)
elif ops.model == "squeezenet1_1":
model_ = squeezenet1_1(pretrained=True, num_classes=ops.num_classes,dropout_factor=ops.dropout)
else:
print(" no support the model")
use_cuda = torch.cuda.is_available()
......@@ -94,7 +103,7 @@ def trainer(ops,f_log):
else:
flag_change_lr_cnt += 1
if flag_change_lr_cnt > 20:
if flag_change_lr_cnt > 50:
init_lr = init_lr*ops.lr_decay
set_learning_rate(optimizer, init_lr)
flag_change_lr_cnt = 0
......@@ -119,7 +128,7 @@ def trainer(ops,f_log):
loc_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(' %s - %s - epoch [%s/%s] (%s/%s):'%(loc_time,ops.model,epoch,ops.epochs,i,int(dataset.__len__()/ops.batch_size)),\
'Mean Loss : %.6f - Loss: %.6f'%(loss_mean/loss_idx,loss.item()),\
' lr : %.5f'%init_lr,' bs :',ops.batch_size,\
' lr : %.8f'%init_lr,' bs :',ops.batch_size,\
' img_size: %s x %s'%(ops.img_size[0],ops.img_size[1]),' best_loss: %.6f'%best_loss)
# 计算梯度
loss.backward()
......@@ -130,7 +139,7 @@ def trainer(ops,f_log):
step += 1
torch.save(model_.state_dict(), ops.model_exp + 'model_epoch-{}.pth'.format(epoch))
torch.save(model_.state_dict(), ops.model_exp + '{}-model_epoch-{}.pth'.format(ops.model,epoch))
except Exception as e:
print('Exception : ',e) # 打印异常
......@@ -144,15 +153,15 @@ if __name__ == "__main__":
help = 'seed') # 设置随机种子
parser.add_argument('--model_exp', type=str, default = './model_exp',
help = 'model_exp') # 模型输出文件夹
parser.add_argument('--model', type=str, default = 'resnet_50',
help = 'model : resnet_50,resnet_101') # 模型类型
parser.add_argument('--model', type=str, default = 'squeezenet1_1',
help = 'model : resnet_34,resnet_50,resnet_101,squeezenet1_0,squeezenet1_1') # 模型类型
parser.add_argument('--num_classes', type=int , default = 42,
help = 'num_classes') # landmarks 个数*2
parser.add_argument('--GPUS', type=str, default = '1',
parser.add_argument('--GPUS', type=str, default = '0',
help = 'GPUS') # GPU选择
parser.add_argument('--train_path', type=str,
default = "./handpose_datasets/",
default = "./handpose_datasets_v1/",
help = 'datasets')# 训练集标注信息
parser.add_argument('--pretrained', type=bool, default = True,
......@@ -173,7 +182,7 @@ if __name__ == "__main__":
help = 'batch_size') # 训练每批次图像数量
parser.add_argument('--dropout', type=float, default = 0.5,
help = 'dropout') # dropout
parser.add_argument('--epochs', type=int, default = 2000,
parser.add_argument('--epochs', type=int, default = 3000,
help = 'epochs') # 训练周期
parser.add_argument('--num_workers', type=int, default = 10,
help = 'num_workers') # 训练数据生成器线程数
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册