iMetv3

From: https://www.kaggle.com/bigyellower/imetv3

Author: BigYellower

Score: 0.503

In [1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory

import os
print(os.listdir("../input"))

# Any results you write to the current directory are saved as output.
['pytorch-pretrained-image-models', 'imet-2019-fgvc6']
In [2]:
import gc
import os
import sys
import time
import random
import logging
import datetime as dt

import numpy as np
import pandas as pd

import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
import torchvision as vision

from torch.optim.lr_scheduler import CosineAnnealingLR

from pathlib import Path
from PIL import Image
from contextlib import contextmanager

from joblib import Parallel, delayed
from tqdm import tqdm
from fastprogress import master_bar, progress_bar

from sklearn.model_selection import KFold
from sklearn.metrics import fbeta_score

torch.multiprocessing.set_start_method("spawn")
In [3]:
@contextmanager
def timer(name="Main", logger=None):
    t0 = time.time()
    yield
    msg = f"[{name}] done in {time.time() - t0} s"
    if logger is not None:
        logger.info(msg)
    else:
        print(msg)
        

def get_logger(name="Main", tag="exp", log_dir="log/"):
    log_path = Path(log_dir)
    path = log_path / tag
    path.mkdir(exist_ok=True, parents=True)

    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)

    fh = logging.FileHandler(
        path / (dt.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + ".log"))
    sh = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        "%(asctime)s %(name)s %(levelname)s %(message)s")

    fh.setFormatter(formatter)
    sh.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(sh)
    return logger


def seed_torch(seed=1029):
    random.seed(seed)
    os.environ["PYTHONHASHSEED"] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
In [4]:
logger = get_logger(name="Main", tag="Pytorch-VGG16")

labels = pd.read_csv("../input/imet-2019-fgvc6/labels.csv")
train = pd.read_csv("../input/imet-2019-fgvc6/train.csv")
sample = pd.read_csv("../input/imet-2019-fgvc6/sample_submission.csv")
In [5]:
# This loader is to extract 1024d features from the images.
class ImageDataLoader(data.DataLoader):
    def __init__(self, root_dir: Path, 
                 df: pd.DataFrame, 
                 mode="train", 
                 transforms=None):
        self._root = root_dir
        self.transform = transforms[mode]
        self._img_id = (df["id"] + ".png").values
        
    def __len__(self):
        return len(self._img_id)
    
    def __getitem__(self, idx):
        img_id = self._img_id[idx]
        file_name = self._root / img_id
        img = Image.open(file_name)
        
        if self.transform:
            img = self.transform(img)
            
        return [img]
    
    
data_transforms = {
    'train': vision.transforms.Compose([
        vision.transforms.RandomResizedCrop(224),
        vision.transforms.RandomHorizontalFlip(),
        vision.transforms.ToTensor(),
        vision.transforms.Normalize(
            [0.485, 0.456, 0.406], 
            [0.229, 0.224, 0.225])
    ]),
    'val': vision.transforms.Compose([
        vision.transforms.Resize(256),
        vision.transforms.CenterCrop(224),
        vision.transforms.ToTensor(),
        vision.transforms.Normalize(
            [0.485, 0.456, 0.406], 
            [0.229, 0.224, 0.225])
    ]),
}

data_transforms["test"] = data_transforms["val"]

# This loader is to be used for serving image tensors for the MLP.
class IMetDataset(data.Dataset):
    def __init__(self, tensor, device="cuda:0", labels=None):
        self.tensor = tensor
        self.labels = labels
        self.device= device
        
    def __len__(self):
        return self.tensor.size(0)
    
    def __getitem__(self, idx):
        tensor = self.tensor[idx, :]
        if self.labels is not None:
            label = self.labels[idx]
            label_tensor = torch.zeros((1, 1103))
            for i in label:
                label_tensor[0, int(i)] = 1
            label_tensor = label_tensor.to(self.device)
            return [tensor, label_tensor]
        else:
            return [tensor]
In [6]:
class Classifier(nn.Module):
    def __init__(self):
        super(Classifier, self).__init__()
        
    def forward(self, x):
        return x


class Densenet201(nn.Module):
    def __init__(self, pretrained: Path):
        super(Densenet201, self).__init__()
        self.densenet201 = vision.models.densenet201()
        self.densenet201.load_state_dict(torch.load(pretrained))
        self.densenet201.classifier = Classifier()
        
        dense = nn.Sequential(*list(self.densenet201.children())[:-1])
        for param in dense.parameters():
            param.requires_grad = False
        
    def forward(self, x):
        return self.densenet201(x)

class MultiLayerPerceptron(nn.Module):
    def __init__(self):
        super(MultiLayerPerceptron, self).__init__()
        self.linear1 = nn.Linear(1920, 1024)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(1024, 1024)
        self.linear3 = nn.Linear(1024, 1103)
        self.dropout = nn.Dropout(0.5)
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.dropout(x)
        return self.sigmoid(self.linear3(x))
In [7]:
train_dataset = ImageDataLoader(
    root_dir=Path("../input/imet-2019-fgvc6/train/"),
    df=train,
    mode="train",
    transforms=data_transforms)
train_loader = data.DataLoader(dataset=train_dataset,
                               shuffle=False,
                               batch_size=128)
test_dataset = ImageDataLoader(
    root_dir=Path("../input/imet-2019-fgvc6/test/"),
    df=sample,
    mode="test",
    transforms=data_transforms)
test_loader = data.DataLoader(dataset=test_dataset,
                              shuffle=False,
                              batch_size=128)

def get_feature_vector(df, loader, device):
    matrix = torch.zeros((df.shape[0], 1920)).to(device)
    model = Densenet201("../input/pytorch-pretrained-image-models/densenet201.pth")
    model.to(device)
    batch = loader.batch_size
    for i, (i_batch,) in tqdm(enumerate(loader)):
        i_batch = i_batch.to(device)
        pred = model(i_batch).detach()
        matrix[i * batch:(i + 1) * batch] = pred
    return matrix

train_tensor = get_feature_vector(train, train_loader, "cuda:0")
test_tensor = get_feature_vector(sample, test_loader, "cuda:0")

del train_dataset, train_loader
del test_dataset, test_loader
gc.collect()
854it [19:31,  1.11s/it]
59it [01:26,  1.12s/it]
Out[7]:
623
In [8]:
class Trainer:
    def __init__(self, 
                 model, 
                 logger,
                 n_splits=5,
                 seed=42,
                 device="cuda:0",
                 train_batch=32,
                 valid_batch=128,
                 kwargs={}):
        self.model = model
        self.logger = logger
        self.device = device
        self.n_splits = n_splits
        self.seed = seed
        self.train_batch = train_batch
        self.valid_batch = valid_batch
        self.kwargs = kwargs
        
        self.best_score = None
        self.tag = dt.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        self.loss_fn = nn.BCELoss(reduction="mean").to(self.device)
        
        path = Path(f"bin/{self.tag}")
        path.mkdir(exist_ok=True, parents=True)
        self.path = path
        
    def fit(self, X, y, n_epochs=10):
        train_preds = np.zeros((len(X), 1103))
        fold = KFold(n_splits=self.n_splits, random_state=self.seed)
        for i, (trn_idx, val_idx) in enumerate(fold.split(X)):
            self.fold_num = i
            self.logger.info(f"Fold {i + 1}")
            X_train, X_val = X[trn_idx, :], X[val_idx, :]
            y_train, y_val = y[trn_idx], y[val_idx]
            
            valid_preds = self._fit(X_train, y_train, X_val, y_val, n_epochs)
            train_preds[val_idx] = valid_preds
        return train_preds
    
    def _fit(self, X_train, y_train, X_val, y_val, n_epochs):
        seed_torch(self.seed)
        train_dataset = IMetDataset(X_train, labels=y_train, device=self.device)
        train_loader = data.DataLoader(train_dataset, 
                                       batch_size=self.train_batch,
                                       shuffle=True)

        valid_dataset = IMetDataset(X_val, labels=y_val, device=self.device)
        valid_loader = data.DataLoader(valid_dataset,
                                       batch_size=self.valid_batch,
                                       shuffle=False)
        
        model = self.model(**self.kwargs)
        model.to(self.device)
        
        optimizer = optim.Adam(params=model.parameters(), 
                                lr=0.0001)
        scheduler = CosineAnnealingLR(optimizer, T_max=n_epochs)
        best_score = np.inf
        mb = master_bar(range(n_epochs))
        for epoch in mb:
            model.train()
            avg_loss = 0.0
            for i_batch, y_batch in progress_bar(train_loader, parent=mb):
                y_pred = model(i_batch)
                loss = self.loss_fn(y_pred, y_batch)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                avg_loss += loss.item() / len(train_loader)
            valid_preds, avg_val_loss = self._val(valid_loader, model)
            scheduler.step()

            self.logger.info("=========================================")
            self.logger.info(f"Epoch {epoch + 1} / {n_epochs}")
            self.logger.info("=========================================")
            self.logger.info(f"avg_loss: {avg_loss:.8f}")
            self.logger.info(f"avg_val_loss: {avg_val_loss:.8f}")
            
            if best_score > avg_val_loss:
                torch.save(model.state_dict(),
                           self.path / f"best{self.fold_num}.pth")
                self.logger.info(f"Save model at Epoch {epoch + 1}")
                best_score = avg_val_loss
        model.load_state_dict(torch.load(self.path / f"best{self.fold_num}.pth"))
        valid_preds, avg_val_loss = self._val(valid_loader, model)
        self.logger.info(f"Best Validation Loss: {avg_val_loss:.8f}")
        return valid_preds
    
    def _val(self, loader, model):
        model.eval()
        valid_preds = np.zeros((len(loader.dataset), 1103))
        avg_val_loss = 0.0
        for i, (i_batch, y_batch) in enumerate(loader):
            with torch.no_grad():
                y_pred = model(i_batch).detach()
                avg_val_loss += self.loss_fn(y_pred, y_batch).item() / len(loader)
                valid_preds[i * self.valid_batch:(i + 1) * self.valid_batch] = \
                    y_pred.cpu().numpy()
        return valid_preds, avg_val_loss
    
    def predict(self, X):
        dataset = IMetDataset(X, labels=None)
        loader = data.DataLoader(dataset, 
                                 batch_size=self.valid_batch, 
                                 shuffle=False)
        model = self.model(**self.kwargs)
        preds = np.zeros((X.size(0), 1103))
        for path in self.path.iterdir():
            with timer(f"Using {str(path)}", self.logger):
                model.load_state_dict(torch.load(path))
                model.to(self.device)
                model.eval()
                temp = np.zeros_like(preds)
                for i, (i_batch, ) in enumerate(loader):
                    with torch.no_grad():
                        y_pred = model(i_batch).detach()
                        temp[i * self.valid_batch:(i + 1) * self.valid_batch] = \
                            y_pred.cpu().numpy()
                preds += temp / self.n_splits
        return preds
In [9]:
trainer = Trainer(MultiLayerPerceptron, logger, train_batch=64, kwargs={})
y = train.attribute_ids.map(lambda x: x.split()).values
valid_preds = trainer.fit(train_tensor, y, n_epochs=40)

def threshold_search(y_pred, y_true):
    score = []
    candidates = np.arange(0, 1.0, 0.01)
    for th in progress_bar(candidates):
        yp = (y_pred > th).astype(int)
        score.append(fbeta_score(y_pred=yp, y_true=y_true, beta=2, average="samples"))
    score = np.array(score)
    pm = score.argmax()
    best_th, best_score = candidates[pm], score[pm]
    return best_th, best_score

y_true = np.zeros((train.shape[0], 1103)).astype(int)
for i, row in enumerate(y):
    for idx in row:
        y_true[i, int(idx)] = 1

best_threshold, best_score = threshold_search(valid_preds, y_true)
test_preds = trainer.predict(test_tensor)
preds = (test_preds > best_threshold).astype(int)

prediction = []
for i in range(preds.shape[0]):
    pred1 = np.argwhere(preds[i] == 1.0).reshape(-1).tolist()
    pred_str = " ".join(list(map(str, pred1)))
    prediction.append(pred_str)
    
sample.attribute_ids = prediction
sample.to_csv("submission.csv", index=False)
sample.head()
2019-04-30 09:43:20,907 Main INFO Fold 1
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([64, 1, 1103])) that is different to the input size (torch.Size([64, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([29, 1, 1103])) that is different to the input size (torch.Size([29, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([128, 1, 1103])) that is different to the input size (torch.Size([128, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
2019-04-30 09:43:31,697 Main INFO =========================================
2019-04-30 09:43:31,698 Main INFO Epoch 1 / 40
2019-04-30 09:43:31,699 Main INFO =========================================
2019-04-30 09:43:31,699 Main INFO avg_loss: 0.02363032
2019-04-30 09:43:31,700 Main INFO avg_val_loss: 0.01269507
2019-04-30 09:43:31,721 Main INFO Save model at Epoch 1
/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:2016: UserWarning: Using a target size (torch.Size([88, 1, 1103])) that is different to the input size (torch.Size([88, 1103])) is deprecated. Please ensure they have the same size.
  "Please ensure they have the same size.".format(target.size(), input.size()))
2019-04-30 09:43:42,488 Main INFO =========================================
2019-04-30 09:43:42,489 Main INFO Epoch 2 / 40
2019-04-30 09:43:42,490 Main INFO =========================================
2019-04-30 09:43:42,491 Main INFO avg_loss: 0.01265483
2019-04-30 09:43:42,492 Main INFO avg_val_loss: 0.01164715
2019-04-30 09:43:42,515 Main INFO Save model at Epoch 2
2019-04-30 09:43:53,275 Main INFO =========================================
2019-04-30 09:43:53,276 Main INFO Epoch 3 / 40
2019-04-30 09:43:53,277 Main INFO =========================================
2019-04-30 09:43:53,277 Main INFO avg_loss: 0.01175407
2019-04-30 09:43:53,278 Main INFO avg_val_loss: 0.01108318
2019-04-30 09:43:53,301 Main INFO Save model at Epoch 3
2019-04-30 09:44:04,995 Main INFO =========================================
2019-04-30 09:44:04,996 Main INFO Epoch 4 / 40
2019-04-30 09:44:04,997 Main INFO =========================================
2019-04-30 09:44:04,998 Main INFO avg_loss: 0.01122237
2019-04-30 09:44:04,999 Main INFO avg_val_loss: 0.01077207
2019-04-30 09:44:05,024 Main INFO Save model at Epoch 4
2019-04-30 09:44:15,676 Main INFO =========================================
2019-04-30 09:44:15,677 Main INFO Epoch 5 / 40
2019-04-30 09:44:15,678 Main INFO =========================================
2019-04-30 09:44:15,678 Main INFO avg_loss: 0.01084254
2019-04-30 09:44:15,679 Main INFO avg_val_loss: 0.01049902
2019-04-30 09:44:15,702 Main INFO Save model at Epoch 5
2019-04-30 09:44:26,361 Main INFO =========================================
2019-04-30 09:44:26,362 Main INFO Epoch 6 / 40
2019-04-30 09:44:26,362 Main INFO =========================================
2019-04-30 09:44:26,363 Main INFO avg_loss: 0.01054277
2019-04-30 09:44:26,364 Main INFO avg_val_loss: 0.01030598
2019-04-30 09:44:26,387 Main INFO Save model at Epoch 6
2019-04-30 09:44:37,019 Main INFO =========================================
2019-04-30 09:44:37,021 Main INFO Epoch 7 / 40
2019-04-30 09:44:37,021 Main INFO =========================================
2019-04-30 09:44:37,022 Main INFO avg_loss: 0.01029242
2019-04-30 09:44:37,023 Main INFO avg_val_loss: 0.01017515
2019-04-30 09:44:37,047 Main INFO Save model at Epoch 7
2019-04-30 09:44:47,699 Main INFO =========================================
2019-04-30 09:44:47,700 Main INFO Epoch 8 / 40
2019-04-30 09:44:47,701 Main INFO =========================================
2019-04-30 09:44:47,702 Main INFO avg_loss: 0.01007195
2019-04-30 09:44:47,703 Main INFO avg_val_loss: 0.01006054
2019-04-30 09:44:47,729 Main INFO Save model at Epoch 8
2019-04-30 09:44:59,626 Main INFO =========================================
2019-04-30 09:44:59,627 Main INFO Epoch 9 / 40
2019-04-30 09:44:59,628 Main INFO =========================================
2019-04-30 09:44:59,628 Main INFO avg_loss: 0.00988417
2019-04-30 09:44:59,629 Main INFO avg_val_loss: 0.00995587
2019-04-30 09:44:59,655 Main INFO Save model at Epoch 9
2019-04-30 09:45:10,312 Main INFO =========================================
2019-04-30 09:45:10,313 Main INFO Epoch 10 / 40
2019-04-30 09:45:10,313 Main INFO =========================================
2019-04-30 09:45:10,314 Main INFO avg_loss: 0.00971114
2019-04-30 09:45:10,315 Main INFO avg_val_loss: 0.00988468
2019-04-30 09:45:10,338 Main INFO Save model at Epoch 10
2019-04-30 09:45:21,640 Main INFO =========================================
2019-04-30 09:45:21,641 Main INFO Epoch 11 / 40
2019-04-30 09:45:21,642 Main INFO =========================================
2019-04-30 09:45:21,643 Main INFO avg_loss: 0.00955036
2019-04-30 09:45:21,644 Main INFO avg_val_loss: 0.00980393
2019-04-30 09:45:21,669 Main INFO Save model at Epoch 11
2019-04-30 09:45:32,731 Main INFO =========================================
2019-04-30 09:45:32,732 Main INFO Epoch 12 / 40
2019-04-30 09:45:32,733 Main INFO =========================================
2019-04-30 09:45:32,734 Main INFO avg_loss: 0.00940797
2019-04-30 09:45:32,734 Main INFO avg_val_loss: 0.00975774
2019-04-30 09:45:32,758 Main INFO Save model at Epoch 12
2019-04-30 09:45:43,359 Main INFO =========================================
2019-04-30 09:45:43,360 Main INFO Epoch 13 / 40
2019-04-30 09:45:43,361 Main INFO =========================================
2019-04-30 09:45:43,361 Main INFO avg_loss: 0.00926395
2019-04-30 09:45:43,362 Main INFO avg_val_loss: 0.00979886
2019-04-30 09:45:53,986 Main INFO =========================================
2019-04-30 09:45:53,987 Main INFO Epoch 14 / 40
2019-04-30 09:45:53,987 Main INFO =========================================
2019-04-30 09:45:53,988 Main INFO avg_loss: 0.00913396
2019-04-30 09:45:53,989 Main INFO avg_val_loss: 0.00969291
2019-04-30 09:45:54,012 Main INFO Save model at Epoch 14
2019-04-30 09:46:04,633 Main INFO =========================================
2019-04-30 09:46:04,634 Main INFO Epoch 15 / 40
2019-04-30 09:46:04,635 Main INFO =========================================
2019-04-30 09:46:04,635 Main INFO avg_loss: 0.00900742
2019-04-30 09:46:04,636 Main INFO avg_val_loss: 0.00964757
2019-04-30 09:46:04,660 Main INFO Save model at Epoch 15
2019-04-30 09:46:15,275 Main INFO =========================================
2019-04-30 09:46:15,276 Main INFO Epoch 16 / 40
2019-04-30 09:46:15,277 Main INFO =========================================
2019-04-30 09:46:15,278 Main INFO avg_loss: 0.00888827
2019-04-30 09:46:15,278 Main INFO avg_val_loss: 0.00964685
2019-04-30 09:46:15,302 Main INFO Save model at Epoch 16
2019-04-30 09:46:25,918 Main INFO =========================================
2019-04-30 09:46:25,920 Main INFO Epoch 17 / 40
2019-04-30 09:46:25,921 Main INFO =========================================
2019-04-30 09:46:25,921 Main INFO avg_loss: 0.00877817
2019-04-30 09:46:25,922 Main INFO avg_val_loss: 0.00961453
2019-04-30 09:46:25,945 Main INFO Save model at Epoch 17
2019-04-30 09:46:36,638 Main INFO =========================================
2019-04-30 09:46:36,639 Main INFO Epoch 18 / 40
2019-04-30 09:46:36,640 Main INFO =========================================
2019-04-30 09:46:36,641 Main INFO avg_loss: 0.00866776
2019-04-30 09:46:36,642 Main INFO avg_val_loss: 0.00957892
2019-04-30 09:46:36,668 Main INFO Save model at Epoch 18
2019-04-30 09:46:48,270 Main INFO =========================================
2019-04-30 09:46:48,271 Main INFO Epoch 19 / 40
2019-04-30 09:46:48,272 Main INFO =========================================
2019-04-30 09:46:48,272 Main INFO avg_loss: 0.00857282
2019-04-30 09:46:48,273 Main INFO avg_val_loss: 0.00959377
2019-04-30 09:46:58,850 Main INFO =========================================
2019-04-30 09:46:58,851 Main INFO Epoch 20 / 40
2019-04-30 09:46:58,851 Main INFO =========================================
2019-04-30 09:46:58,852 Main INFO avg_loss: 0.00846500
2019-04-30 09:46:58,853 Main INFO avg_val_loss: 0.00953359
2019-04-30 09:46:58,880 Main INFO Save model at Epoch 20
2019-04-30 09:47:09,487 Main INFO =========================================
2019-04-30 09:47:09,488 Main INFO Epoch 21 / 40
2019-04-30 09:47:09,489 Main INFO =========================================
2019-04-30 09:47:09,490 Main INFO avg_loss: 0.00837721
2019-04-30 09:47:09,491 Main INFO avg_val_loss: 0.00953084
2019-04-30 09:47:09,514 Main INFO Save model at Epoch 21
2019-04-30 09:47:20,164 Main INFO =========================================
2019-04-30 09:47:20,165 Main INFO Epoch 22 / 40
2019-04-30 09:47:20,166 Main INFO =========================================
2019-04-30 09:47:20,166 Main INFO avg_loss: 0.00828490
2019-04-30 09:47:20,167 Main INFO avg_val_loss: 0.00953900
2019-04-30 09:47:30,793 Main INFO =========================================
2019-04-30 09:47:30,794 Main INFO Epoch 23 / 40
2019-04-30 09:47:30,795 Main INFO =========================================
2019-04-30 09:47:30,796 Main INFO avg_loss: 0.00820802
2019-04-30 09:47:30,797 Main INFO avg_val_loss: 0.00954508
2019-04-30 09:47:41,996 Main INFO =========================================
2019-04-30 09:47:41,997 Main INFO Epoch 24 / 40
2019-04-30 09:47:41,998 Main INFO =========================================
2019-04-30 09:47:41,999 Main INFO avg_loss: 0.00814082
2019-04-30 09:47:41,999 Main INFO avg_val_loss: 0.00956160
2019-04-30 09:47:52,622 Main INFO =========================================
2019-04-30 09:47:52,623 Main INFO Epoch 25 / 40
2019-04-30 09:47:52,624 Main INFO =========================================
2019-04-30 09:47:52,625 Main INFO avg_loss: 0.00805962
2019-04-30 09:47:52,625 Main INFO avg_val_loss: 0.00954950
2019-04-30 09:48:04,023 Main INFO =========================================
2019-04-30 09:48:04,024 Main INFO Epoch 26 / 40
2019-04-30 09:48:04,025 Main INFO =========================================
2019-04-30 09:48:04,026 Main INFO avg_loss: 0.00799714
2019-04-30 09:48:04,026 Main INFO avg_val_loss: 0.00955674
2019-04-30 09:48:14,887 Main INFO =========================================
2019-04-30 09:48:14,888 Main INFO Epoch 27 / 40
2019-04-30 09:48:14,889 Main INFO =========================================
2019-04-30 09:48:14,890 Main INFO avg_loss: 0.00793900
2019-04-30 09:48:14,891 Main INFO avg_val_loss: 0.00954770
2019-04-30 09:48:25,484 Main INFO =========================================
2019-04-30 09:48:25,485 Main INFO Epoch 28 / 40
2019-04-30 09:48:25,485 Main INFO =========================================
2019-04-30 09:48:25,486 Main INFO avg_loss: 0.00788250
2019-04-30 09:48:25,487 Main INFO avg_val_loss: 0.00955169
2019-04-30 09:48:36,067 Main INFO =========================================
2019-04-30 09:48:36,068 Main INFO Epoch 29 / 40
2019-04-30 09:48:36,068 Main INFO =========================================
2019-04-30 09:48:36,069 Main INFO avg_loss: 0.00783460
2019-04-30 09:48:36,070 Main INFO avg_val_loss: 0.00952471
2019-04-30 09:48:36,093 Main INFO Save model at Epoch 29
2019-04-30 09:48:46,821 Main INFO =========================================
2019-04-30 09:48:46,822 Main INFO Epoch 30 / 40
2019-04-30 09:48:46,823 Main INFO =========================================
2019-04-30 09:48:46,823 Main INFO avg_loss: 0.00779483
2019-04-30 09:48:46,824 Main INFO avg_val_loss: 0.00952882
2019-04-30 09:48:57,441 Main INFO =========================================
2019-04-30 09:48:57,442 Main INFO Epoch 31 / 40
2019-04-30 09:48:57,443 Main INFO =========================================
2019-04-30 09:48:57,444 Main INFO avg_loss: 0.00774793
2019-04-30 09:48:57,444 Main INFO avg_val_loss: 0.00953918
2019-04-30 09:49:08,031 Main INFO =========================================
2019-04-30 09:49:08,032 Main INFO Epoch 32 / 40
2019-04-30 09:49:08,033 Main INFO =========================================
2019-04-30 09:49:08,033 Main INFO avg_loss: 0.00771882
2019-04-30 09:49:08,034 Main INFO avg_val_loss: 0.00955244
2019-04-30 09:49:18,862 Main INFO =========================================
2019-04-30 09:49:18,863 Main INFO Epoch 33 / 40
2019-04-30 09:49:18,864 Main INFO =========================================
2019-04-30 09:49:18,865 Main INFO avg_loss: 0.00768284
2019-04-30 09:49:18,866 Main INFO avg_val_loss: 0.00956469
2019-04-30 09:49:30,356 Main INFO =========================================
2019-04-30 09:49:30,357 Main INFO Epoch 34 / 40
2019-04-30 09:49:30,358 Main INFO =========================================
2019-04-30 09:49:30,358 Main INFO avg_loss: 0.00766387
2019-04-30 09:49:30,359 Main INFO avg_val_loss: 0.00954313
2019-04-30 09:49:40,946 Main INFO =========================================
2019-04-30 09:49:40,947 Main INFO Epoch 35 / 40
2019-04-30 09:49:40,947 Main INFO =========================================
2019-04-30 09:49:40,948 Main INFO avg_loss: 0.00764861
2019-04-30 09:49:40,949 Main INFO avg_val_loss: 0.00954270
2019-04-30 09:49:51,542 Main INFO =========================================
2019-04-30 09:49:51,543 Main INFO Epoch 36 / 40
2019-04-30 09:49:51,544 Main INFO =========================================
2019-04-30 09:49:51,545 Main INFO avg_loss: 0.00762351
2019-04-30 09:49:51,546 Main INFO avg_val_loss: 0.00955160
2019-04-30 09:50:02,463 Main INFO =========================================
2019-04-30 09:50:02,464 Main INFO Epoch 37 / 40
2019-04-30 09:50:02,465 Main INFO =========================================
2019-04-30 09:50:02,465 Main INFO avg_loss: 0.00760933
2019-04-30 09:50:02,466 Main INFO avg_val_loss: 0.00956268
2019-04-30 09:50:14,081 Main INFO =========================================
2019-04-30 09:50:14,082 Main INFO Epoch 38 / 40
2019-04-30 09:50:14,083 Main INFO =========================================
2019-04-30 09:50:14,084 Main INFO avg_loss: 0.00760035
2019-04-30 09:50:14,085 Main INFO avg_val_loss: 0.00955363
2019-04-30 09:50:24,667 Main INFO =========================================
2019-04-30 09:50:24,668 Main INFO Epoch 39 / 40
2019-04-30 09:50:24,669 Main INFO =========================================
2019-04-30 09:50:24,670 Main INFO avg_loss: 0.00759353
2019-04-30 09:50:24,670 Main INFO avg_val_loss: 0.00956115
2019-04-30 09:50:35,265 Main INFO =========================================
2019-04-30 09:50:35,266 Main INFO Epoch 40 / 40
2019-04-30 09:50:35,267 Main INFO =========================================
2019-04-30 09:50:35,267 Main INFO avg_loss: 0.00758889
2019-04-30 09:50:35,268 Main INFO avg_val_loss: 0.00955709
2019-04-30 09:50:37,055 Main INFO Best Validation Loss: 0.00952471
2019-04-30 09:50:37,207 Main INFO Fold 2
35.00% [14/40 02:31<04:42]
25.11% [343/1366 00:02<00:06]
2019-04-30 09:50:48,912 Main INFO =========================================
2019-04-30 09:50:48,913 Main INFO Epoch 1 / 40
2019-04-30 09:50:48,913 Main INFO =========================================
2019-04-30 09:50:48,914 Main INFO avg_loss: 0.02360141
2019-04-30 09:50:48,915 Main INFO avg_val_loss: 0.01279735
2019-04-30 09:50:48,939 Main INFO Save model at Epoch 1
2019-04-30 09:50:59,596 Main INFO =========================================
2019-04-30 09:50:59,597 Main INFO Epoch 2 / 40
2019-04-30 09:50:59,598 Main INFO =========================================
2019-04-30 09:50:59,599 Main INFO avg_loss: 0.01263097
2019-04-30 09:50:59,599 Main INFO avg_val_loss: 0.01171250
2019-04-30 09:50:59,623 Main INFO Save model at Epoch 2
2019-04-30 09:51:10,267 Main INFO =========================================
2019-04-30 09:51:10,268 Main INFO Epoch 3 / 40
2019-04-30 09:51:10,269 Main INFO =========================================
2019-04-30 09:51:10,269 Main INFO avg_loss: 0.01172976
2019-04-30 09:51:10,270 Main INFO avg_val_loss: 0.01118806
2019-04-30 09:51:10,293 Main INFO Save model at Epoch 3
2019-04-30 09:51:20,967 Main INFO =========================================
2019-04-30 09:51:20,968 Main INFO Epoch 4 / 40
2019-04-30 09:51:20,968 Main INFO =========================================
2019-04-30 09:51:20,969 Main INFO avg_loss: 0.01120052
2019-04-30 09:51:20,970 Main INFO avg_val_loss: 0.01086266
2019-04-30 09:51:20,993 Main INFO Save model at Epoch 4
2019-04-30 09:51:31,631 Main INFO =========================================
2019-04-30 09:51:31,632 Main INFO Epoch 5 / 40
2019-04-30 09:51:31,633 Main INFO =========================================
2019-04-30 09:51:31,634 Main INFO avg_loss: 0.01081811
2019-04-30 09:51:31,634 Main INFO avg_val_loss: 0.01060326
2019-04-30 09:51:31,660 Main INFO Save model at Epoch 5
2019-04-30 09:51:42,255 Main INFO =========================================
2019-04-30 09:51:42,256 Main INFO Epoch 6 / 40
2019-04-30 09:51:42,257 Main INFO =========================================
2019-04-30 09:51:42,258 Main INFO avg_loss: 0.01052030
2019-04-30 09:51:42,258 Main INFO avg_val_loss: 0.01040898
2019-04-30 09:51:42,282 Main INFO Save model at Epoch 6
2019-04-30 09:51:52,930 Main INFO =========================================
2019-04-30 09:51:52,931 Main INFO Epoch 7 / 40
2019-04-30 09:51:52,932 Main INFO =========================================
2019-04-30 09:51:52,932 Main INFO avg_loss: 0.01027040
2019-04-30 09:51:52,933 Main INFO avg_val_loss: 0.01029070
2019-04-30 09:51:52,956 Main INFO Save model at Epoch 7
2019-04-30 09:52:04,266 Main INFO =========================================
2019-04-30 09:52:04,268 Main INFO Epoch 8 / 40
2019-04-30 09:52:04,268 Main INFO =========================================
2019-04-30 09:52:04,269 Main INFO avg_loss: 0.01005002
2019-04-30 09:52:04,270 Main INFO avg_val_loss: 0.01014570
2019-04-30 09:52:04,296 Main INFO Save model at Epoch 8
2019-04-30 09:52:15,319 Main INFO =========================================
2019-04-30 09:52:15,320 Main INFO Epoch 9 / 40
2019-04-30 09:52:15,321 Main INFO =========================================
2019-04-30 09:52:15,321 Main INFO avg_loss: 0.00985590
2019-04-30 09:52:15,322 Main INFO avg_val_loss: 0.01003471
2019-04-30 09:52:15,346 Main INFO Save model at Epoch 9
2019-04-30 09:52:25,946 Main INFO =========================================
2019-04-30 09:52:25,948 Main INFO Epoch 10 / 40
2019-04-30 09:52:25,948 Main INFO =========================================
2019-04-30 09:52:25,949 Main INFO avg_loss: 0.00967915
2019-04-30 09:52:25,950 Main INFO avg_val_loss: 0.00994328
2019-04-30 09:52:25,973 Main INFO Save model at Epoch 10
2019-04-30 09:52:36,624 Main INFO =========================================
2019-04-30 09:52:36,625 Main INFO Epoch 11 / 40
2019-04-30 09:52:36,626 Main INFO =========================================
2019-04-30 09:52:36,627 Main INFO avg_loss: 0.00951604
2019-04-30 09:52:36,627 Main INFO avg_val_loss: 0.00988495
2019-04-30 09:52:36,651 Main INFO Save model at Epoch 11
2019-04-30 09:52:47,882 Main INFO =========================================
2019-04-30 09:52:47,883 Main INFO Epoch 12 / 40
2019-04-30 09:52:47,884 Main INFO =========================================
2019-04-30 09:52:47,884 Main INFO avg_loss: 0.00937189
2019-04-30 09:52:47,885 Main INFO avg_val_loss: 0.00982616
2019-04-30 09:52:47,909 Main INFO Save model at Epoch 12
2019-04-30 09:52:58,512 Main INFO =========================================
2019-04-30 09:52:58,514 Main INFO Epoch 13 / 40
2019-04-30 09:52:58,514 Main INFO =========================================
2019-04-30 09:52:58,515 Main INFO avg_loss: 0.00922523
2019-04-30 09:52:58,516 Main INFO avg_val_loss: 0.00983884
2019-04-30 09:53:09,109 Main INFO =========================================
2019-04-30 09:53:09,111 Main INFO Epoch 14 / 40
2019-04-30 09:53:09,111 Main INFO =========================================
2019-04-30 09:53:09,112 Main INFO avg_loss: 0.00909850
2019-04-30 09:53:09,113 Main INFO avg_val_loss: 0.00976357
2019-04-30 09:53:09,137 Main INFO Save model at Epoch 14
In [10]:
!ls ../input/
imet-2019-fgvc6  pytorch-pretrained-image-models