Pytorch_starter

From: https://www.kaggle.com/parmarsuraj99/pytorch-starter

Author: Suraj Parmar

Score: 0.037

In [1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory

import os
print(os.listdir("../input"))

# Any results you write to the current directory are saved as output.
['vgg16-pytorch', 'imet-2019-fgvc6']
In [2]:
!ls ..
input  lib  src  working
In [3]:
import PIL
In [4]:
from skimage import io, transform
In [5]:
import torch
import torchvision
In [6]:
import cv2
from collections import OrderedDict
In [7]:
import matplotlib.pyplot as plt
from PIL import Image
from skimage import io
In [8]:
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, models
from torch import nn
In [9]:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Assuming that we are on a CUDA machine, this should print a CUDA device:

print(device)
cuda:0
In [10]:
root_dir = '../input/imet-2019-fgvc6/'
In [11]:
labels = pd.read_csv(root_dir+'labels.csv')
df_train = pd.read_csv(root_dir+'train.csv')
df_sub = pd.read_csv(root_dir+'sample_submission.csv')
In [12]:
print(df_train.shape)
df_train.head()
(109237, 2)
Out[12]:
id attribute_ids
0 1000483014d91860 147 616 813
1 1000fe2e667721fe 51 616 734 813
2 1001614cb89646ee 776
3 10041eb49b297c08 51 671 698 813 1092
4 100501c227f8beea 13 404 492 903 1093
In [13]:
labels.head()
Out[13]:
attribute_id attribute_name
0 0 culture::abruzzi
1 1 culture::achaemenid
2 2 culture::aegean
3 3 culture::afghan
4 4 culture::after british
In [14]:
labels.shape
Out[14]:
(1103, 2)
In [15]:
n_categories = len(labels['attribute_name'].unique())
print(n_categories)
1103
In [16]:
img_name = df_train.iloc[65, 0]
plt.imshow(cv2.cvtColor(cv2.imread(root_dir+'train/'+img_name+'.png'), cv2.COLOR_BGR2RGB))
print(df_train.iloc[65, 1])
79 568
In [17]:
df_train.iloc[3, 1].split(' ')
Out[17]:
['51', '671', '698', '813', '1092']
In [18]:
arr = list(map(int, df_train.iloc[3, 1].split(' ')))
print(type(arr[0]))
<class 'int'>
In [19]:
arr =[]
for i in range(df_train.shape[0]):
    arr.append(list(map(int, df_train.iloc[i, 1].split(' '))))
df_train['attributes_int'] = arr
In [20]:
class IMetDataset(Dataset):
    def __init__(self, label_file, 
                 train_csv, 
                 train_dir,
                 transform=None):
        
        self.label_file = label_file
        self.train_df = train_csv
        self.train_dir = train_dir
        self.transform = transform
        
        
    def __len__(self):
        return self.train_df.shape[0]
        
    def __getitem__(self, idx):
        img_name = os.path.join(self.train_dir, self.train_df.iloc[idx, 0]+'.png')
        
        #img = io.imread(img_name)
         
        img = cv2.imread(img_name)
        img = cv2.cvtColor(cv2.imread(img_name), cv2.COLOR_BGR2RGB) 
        img=img.transpose((2, 0, 1)) 
        
        if self.transform:
            img = self.transform(img)
        
        labs = self.train_df.iloc[idx, 2]
      #  print("Labels: ", labs)
        ans = np.zeros((1103, 1))
        for label in labs:
            ans[label]=1
        #print(ans.shape)
     #   print("One hot indices: ", np.where(ans==1)[0])
        
        return [img, ans]
In [21]:
class IMetTestDataset(Dataset):
    def __init__(self, test_dir, transformations=None):
        self.test_dir =  test_dir
        self.img_list = os.listdir(root_dir+'test')
        self.transform = transformations
            
    def __len__(self):
        'Denotes the total number of samples'
        return len(self.img_list)

    def __getitem__(self, idx):
        'Generates one sample of data'
        # Select sample
        img_loc = os.path.join(self.test_dir, self.img_list[idx])
      #  print(img_loc)
      #  img = io.imread(img_loc)
        
        img = cv2.imread(img_loc)
        img = cv2.cvtColor(cv2.imread(img_loc), cv2.COLOR_BGR2RGB) 
        img=img.transpose((2, 0, 1)) 
        
        if self.transform:
            img = self.transform(img)
        
        img_name = self.img_list[idx].split('.')[0]
        
        return [img, img_name]
In [22]:
transformations = transforms.Compose([
                                transforms.ToPILImage(),
                                transforms.Resize((224, 224)),
                                transforms.RandomHorizontalFlip(),
                                transforms.ToTensor(),
                                transforms.Normalize(
                                    [0.485, 0.456, 0.406], 
                                    [0.229, 0.224, 0.225]
                                )
                            ])

train_dataset = IMetDataset(labels, df_train, root_dir+'train/', 
                            transformations)

test_dataset = IMetTestDataset(root_dir+'test/', transformations)
In [23]:
trainloader = DataLoader(train_dataset, batch_size=64,
                        shuffle=True, num_workers=0)
testloader = DataLoader(test_dataset, batch_size=1,
                        shuffle=False, num_workers=0)
In [24]:
plt.imshow(train_dataset[1001][0].numpy().transpose(1, 2, 0))
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Out[24]:
<matplotlib.image.AxesImage at 0x7fd3736ed470>
In [25]:
os.listdir("../input/vgg16-pytorch/")
Out[25]:
['vgg16-397923af.pth']
In [26]:
model = models.vgg16(pretrained=False)
model.load_state_dict(torch.load("../input/vgg16-pytorch/vgg16-397923af.pth"))
model
Out[26]:
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace)
    (2): Dropout(p=0.5)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace)
    (5): Dropout(p=0.5)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)
In [27]:
model.classifier[-1] = nn.Linear(in_features=4096, out_features=n_categories)
model.classifier.add_module('sigmoid', nn.Sigmoid())
model.classifier.named_parameters
Out[27]:
<bound method Module.named_parameters of Sequential(
  (0): Linear(in_features=25088, out_features=4096, bias=True)
  (1): ReLU(inplace)
  (2): Dropout(p=0.5)
  (3): Linear(in_features=4096, out_features=4096, bias=True)
  (4): ReLU(inplace)
  (5): Dropout(p=0.5)
  (6): Linear(in_features=4096, out_features=1103, bias=True)
  (sigmoid): Sigmoid()
)>
In [28]:
for param in model.parameters():
    param.require_grad=False
for param in model.classifier.parameters():
    param.require_grad=True
for param in model.features[-1: -4]:
    param.require_grad=True
In [29]:
#model.load_state_dict(torch.load("../models/model1.pth"))
In [30]:
model.to(device)
Out[30]:
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace)
    (2): Dropout(p=0.5)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace)
    (5): Dropout(p=0.5)
    (6): Linear(in_features=4096, out_features=1103, bias=True)
    (sigmoid): Sigmoid()
  )
)
In [31]:
import torch.optim as optim
from torch.autograd import Variable
criterion = nn.BCELoss(reduction='mean').to(device)
optimizer = optim.SGD(model.parameters(), lr=0.001)
In [32]:
def show_cuda():
    if device.type == 'cuda':
        print(torch.cuda.get_device_name(0))
        print('Memory Usage:')
        print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
        print('Cached:   ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
show_cuda()
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 0.5 GB
Cached:    0.5 GB
In [33]:
for epoch in range(15):
    print("Epoch", epoch, "Started...")
    running_loss=0
    for i, data in enumerate(trainloader, 0):
        optimizer.zero_grad()
        
        images, label = data
        
        inputs = images.type(torch.FloatTensor)
        '''
        inputs=Variable(inputs).cuda()
     #   labels = labels.view()
        label = Variable(label).cuda()
        '''
        inputs, label = Variable(inputs.to(device)), Variable(label.to(device))
        
        outputs = model(inputs)
        '''  
        print(outputs.shape)
        print(label.shape)
        
        print(type(outputs))
        print(type(label))
        
        '''
        loss=criterion(outputs.type(torch.FloatTensor), label.type(torch.FloatTensor))
        
        loss.backward()
        optimizer.step()
        loss=loss.item()
        running_loss+=float(loss)
    #    if running_loss<1.5:
    #        break
        if i%200==0:
            print("Epoch: ", epoch,"  Running Loss:",running_loss)
            
            running_loss=0
    print("Epoch", epoch, "Completed")   
    show_cuda()
Epoch 0 Started...
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([64, 1103, 1])) that is different to the input size (torch.Size([64, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
Epoch:  0   Running Loss: 0.7052268385887146
Epoch:  0   Running Loss: 139.08171886205673
Epoch:  0   Running Loss: 136.28155678510666
Epoch:  0   Running Loss: 132.1236664056778
Epoch:  0   Running Loss: 117.94519710540771
Epoch:  0   Running Loss: 73.93161261081696
Epoch:  0   Running Loss: 30.93942330777645
Epoch:  0   Running Loss: 16.830761641263962
Epoch:  0   Running Loss: 12.18664924800396
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([53, 1103, 1])) that is different to the input size (torch.Size([53, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
Epoch 0 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 1 Started...
Epoch:  1   Running Loss: 0.0487612821161747
Epoch:  1   Running Loss: 9.33392058685422
Epoch:  1   Running Loss: 8.377468310296535
Epoch:  1   Running Loss: 7.731079556047916
Epoch:  1   Running Loss: 7.280074805021286
Epoch:  1   Running Loss: 6.929635014384985
Epoch:  1   Running Loss: 6.6296462416648865
Epoch:  1   Running Loss: 6.395642884075642
Epoch:  1   Running Loss: 6.210741797462106
Epoch 1 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 2 Started...
Epoch:  2   Running Loss: 0.028866782784461975
Epoch:  2   Running Loss: 5.97527763620019
Epoch:  2   Running Loss: 5.857981149107218
Epoch:  2   Running Loss: 5.7340781688690186
Epoch:  2   Running Loss: 5.642286840826273
Epoch:  2   Running Loss: 5.530422903597355
Epoch:  2   Running Loss: 5.473425107076764
Epoch:  2   Running Loss: 5.384594114497304
Epoch:  2   Running Loss: 5.32579255849123
Epoch 2 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 3 Started...
Epoch:  3   Running Loss: 0.02676411345601082
Epoch:  3   Running Loss: 5.242587259039283
Epoch:  3   Running Loss: 5.168191304430366
Epoch:  3   Running Loss: 5.120507838204503
Epoch:  3   Running Loss: 5.029807569459081
Epoch:  3   Running Loss: 5.001701932400465
Epoch:  3   Running Loss: 4.992075677961111
Epoch:  3   Running Loss: 4.968971824273467
Epoch:  3   Running Loss: 4.902806401252747
Epoch 3 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 4 Started...
Epoch:  4   Running Loss: 0.02267553098499775
Epoch:  4   Running Loss: 4.850136712193489
Epoch:  4   Running Loss: 4.8263904713094234
Epoch:  4   Running Loss: 4.787792114540935
Epoch:  4   Running Loss: 4.7535990457981825
Epoch:  4   Running Loss: 4.739057091996074
Epoch:  4   Running Loss: 4.708709172904491
Epoch:  4   Running Loss: 4.695199279114604
Epoch:  4   Running Loss: 4.637678522616625
Epoch 4 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 5 Started...
Epoch:  5   Running Loss: 0.02299860492348671
Epoch:  5   Running Loss: 4.611716764047742
Epoch:  5   Running Loss: 4.614151243120432
Epoch:  5   Running Loss: 4.573998708277941
Epoch:  5   Running Loss: 4.556024162098765
Epoch:  5   Running Loss: 4.524611057713628
Epoch:  5   Running Loss: 4.5637379083782434
Epoch:  5   Running Loss: 4.512708563357592
Epoch:  5   Running Loss: 4.473118586465716
Epoch 5 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 6 Started...
Epoch:  6   Running Loss: 0.02212982438504696
Epoch:  6   Running Loss: 4.4593673553317785
Epoch:  6   Running Loss: 4.450991058722138
Epoch:  6   Running Loss: 4.444116907194257
Epoch:  6   Running Loss: 4.443137906491756
Epoch:  6   Running Loss: 4.410469437018037
Epoch:  6   Running Loss: 4.366828862577677
Epoch:  6   Running Loss: 4.366677483543754
Epoch:  6   Running Loss: 4.378308456391096
Epoch 6 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 7 Started...
Epoch:  7   Running Loss: 0.02184033952653408
Epoch:  7   Running Loss: 4.3458356484770775
Epoch:  7   Running Loss: 4.325332287698984
Epoch:  7   Running Loss: 4.319599540904164
Epoch:  7   Running Loss: 4.303710790351033
Epoch:  7   Running Loss: 4.302817305549979
Epoch:  7   Running Loss: 4.267500594258308
Epoch:  7   Running Loss: 4.253794405609369
Epoch:  7   Running Loss: 4.2650536466389894
Epoch 7 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 8 Started...
Epoch:  8   Running Loss: 0.021188681945204735
Epoch:  8   Running Loss: 4.241446858271956
Epoch:  8   Running Loss: 4.244071641936898
Epoch:  8   Running Loss: 4.21626603603363
Epoch:  8   Running Loss: 4.208911323919892
Epoch:  8   Running Loss: 4.198675464838743
Epoch:  8   Running Loss: 4.221047615632415
Epoch:  8   Running Loss: 4.18433210067451
Epoch:  8   Running Loss: 4.1602477096021175
Epoch 8 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 9 Started...
Epoch:  9   Running Loss: 0.022156769409775734
Epoch:  9   Running Loss: 4.135144930332899
Epoch:  9   Running Loss: 4.152865935117006
Epoch:  9   Running Loss: 4.14080442674458
Epoch:  9   Running Loss: 4.135097762569785
Epoch:  9   Running Loss: 4.1273831166327
Epoch:  9   Running Loss: 4.125909766182303
Epoch:  9   Running Loss: 4.114565700292587
Epoch:  9   Running Loss: 4.111151047050953
Epoch 9 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 10 Started...
Epoch:  10   Running Loss: 0.020824376493692398
Epoch:  10   Running Loss: 4.088263479992747
Epoch:  10   Running Loss: 4.0976390819996595
Epoch:  10   Running Loss: 4.089338671416044
Epoch:  10   Running Loss: 4.067875614389777
Epoch:  10   Running Loss: 4.076598474755883
Epoch:  10   Running Loss: 4.046134322881699
Epoch:  10   Running Loss: 4.057412041351199
Epoch:  10   Running Loss: 4.028603808954358
Epoch 10 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 11 Started...
Epoch:  11   Running Loss: 0.02078869193792343
Epoch:  11   Running Loss: 4.0572508331388235
Epoch:  11   Running Loss: 4.031976947560906
Epoch:  11   Running Loss: 4.016277015209198
Epoch:  11   Running Loss: 4.015590766444802
Epoch:  11   Running Loss: 4.030005054548383
Epoch:  11   Running Loss: 3.9975266214460135
Epoch:  11   Running Loss: 3.9767398796975613
Epoch:  11   Running Loss: 4.005596868693829
Epoch 11 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 12 Started...
Epoch:  12   Running Loss: 0.017951499670743942
Epoch:  12   Running Loss: 4.011764807626605
Epoch:  12   Running Loss: 3.9812746308743954
Epoch:  12   Running Loss: 3.9806036595255136
Epoch:  12   Running Loss: 3.9632973428815603
Epoch:  12   Running Loss: 3.967312891036272
Epoch:  12   Running Loss: 3.987637812271714
Epoch:  12   Running Loss: 3.9626688957214355
Epoch:  12   Running Loss: 3.925647161900997
Epoch 12 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 13 Started...
Epoch:  13   Running Loss: 0.019984664395451546
Epoch:  13   Running Loss: 3.9401056468486786
Epoch:  13   Running Loss: 3.9596101194620132
Epoch:  13   Running Loss: 3.929850732907653
Epoch:  13   Running Loss: 3.9275804813951254
Epoch:  13   Running Loss: 3.9197246935218573
Epoch:  13   Running Loss: 3.931005021557212
Epoch:  13   Running Loss: 3.8984250258654356
Epoch:  13   Running Loss: 3.9170430060476065
Epoch 13 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
Epoch 14 Started...
Epoch:  14   Running Loss: 0.01915057934820652
Epoch:  14   Running Loss: 3.895632093772292
Epoch:  14   Running Loss: 3.907653832808137
Epoch:  14   Running Loss: 3.8724518325179815
Epoch:  14   Running Loss: 3.9154386576265097
Epoch:  14   Running Loss: 3.8918325901031494
Epoch:  14   Running Loss: 3.8939753528684378
Epoch:  14   Running Loss: 3.884835669770837
Epoch:  14   Running Loss: 3.8673188611865044
Epoch 14 Completed
Tesla P100-PCIE-16GB
Memory Usage:
Allocated: 1.1 GB
Cached:    11.9 GB
In [34]:
torch.cuda.empty_cache()
In [35]:
import gc
gc.collect()
Out[35]:
15
In [36]:
output=[]
names = []
with torch.no_grad():
    for i, data in enumerate(testloader, 0):
        images, name = data    
        inputs = images.type(torch.FloatTensor)
        inputs, label = Variable(inputs.to(device)), Variable(label.to(device))
        output.append(model(inputs).cpu().numpy())
        names.append(name[0])
In [37]:
ops = output
In [38]:
indices_op=[]
for i in range(len(ops)):
    indices_op.append(np.where(ops[i]>=0.383)[1])
indices_op_str=[]
for i in range(len(indices_op)):
    indices_op_str.append(' '.join(map(str, indices_op[i])))
In [39]:
!mkdir ../models
torch.save(model.state_dict(), '../models/model1.pth')
In [40]:
d = {'id':names, 'attribute_ids':indices_op_str}
df = pd.DataFrame(d)
In [41]:
df
Out[41]:
id attribute_ids
0 a2a95283eb58b94c
1 e4e955b48481a7d6 409
2 d647fca60389d8a2 1059
3 1f01df9cbc938b76 13 671
4 54b586c17a235b73 1092
5 c8478ea019a1ca59
6 291edd354e9b6673
7 cc4d7e36b987f9de
8 cdf83ea3cb44c7ec 79 147
9 ad44f27a088a6d73 1092
10 e1d3fb43ce743d71
11 a7db7ed18cf49804 189
12 f43d6b493f0c1c79
13 e408becf9e950af4 813
14 13ecc4305970b120
15 9f731feb8f18b558
16 43d55f04571b6312 950
17 f171db4af61b5906
18 af718dcf75451928 147
19 21ad17656f29b3c5
20 20d2e9d66b1142cf 189
21 266b6f0b61f0853f
22 539a1694b77de7f8
23 f5ea594f34971f2e 304
24 9f937e231be7a57c
25 1ce990f6a4ac7eb2 13
26 199e955bc361cade
27 fdbdbcaac91ac764 189
28 bdb6c5e559b209d7 663
29 39fb0b18807af500 698
... ... ...
7413 c456c0eb6b58a770 79 188 1062
7414 46f494d49a68f310
7415 d816683360f65530
7416 b52cd8628d7ac210 477 671 1092
7417 240ccb069c92dc7d 489
7418 3f4ba23154c40e65
7419 3603c62cce01020a
7420 e86ef8a55ca90556
7421 1725073e66a23994 813
7422 965e5f07905b543f
7423 e24cc486590c33bc
7424 4bd5d40a5a7201f9
7425 83a9aeca41c5716c 147 553 796
7426 4749573b0311616e
7427 303243516bbd7d07 189 671 813
7428 c9750852ce9a844c 554
7429 3d21e5ad7fdbb71 813
7430 30509a0f97bc3b1b
7431 b04bdcde13bce3c6 744 1092
7432 6947b32179ed586d
7433 75a9533ee12a482d
7434 c1ce8878b1387201 718
7435 b72552d8cba9ce1c 79
7436 a49734bc93c5212e 1059
7437 b464440543b931d0 813
7438 3c3ed1554556a536
7439 2d608758319f2832
7440 a3c22537defb14eb
7441 7c2ba183bee9bfc
7442 fa8f5298d174e3f9 188

7443 rows × 2 columns

In [42]:
df.to_csv('submission.csv', index=False)