Learn practical skills, build real-world projects, and advance your career
!pip install jovian --upgrade --quiet
WARNING: You are using pip version 20.1; however, version 20.1.1 is available. You should consider upgrading via the '/opt/conda/bin/python3.7 -m pip install --upgrade pip' command.
import torch
import torchvision
from torchvision.datasets import KMNIST
from torchvision.transforms import ToTensor
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
dataset = KMNIST('/data', train=True, download=True, transform=ToTensor())
dataset
Dataset KMNIST
    Number of datapoints: 60000
    Root location: /data
    Split: Train
    StandardTransform
Transform: ToTensor()
import numpy as np
from torch.utils.data import random_split, DataLoader
from torch.optim import SGD

class KmnistModel(nn.Module):
    
    @staticmethod
    def accuracy(outputs, labels):
        _, preds = torch.max(outputs, dim=1)
        return torch.tensor(torch.sum(preds == labels).item() / len(preds))
    
    @staticmethod
    def _split_training_set(dataset):
        val_ds_ratio = np.random.uniform(0.1, 0.2)
        dataset_size = len(dataset)
        val_size = int(val_ds_ratio * dataset_size)
        train_size = dataset_size - val_size
        print('Training set size: ', train_size)
        print('Validation set size: ', val_size)
        return random_split(dataset, [train_size, val_size])
    
    input_size = 28 * 28
    output_size = 10
    
    def __init__(self, dataset, batch_size=16):
        super().__init__()
        self.loss_fn = F.cross_entropy
        self.model = nn.Linear(KmnistModel.input_size, KmnistModel.output_size)
        train_ds, val_ds = KmnistModel._split_training_set(dataset)
        self.train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
        self.val_loader = DataLoader(val_ds, batch_size=batch_size)
        self.batch_size = batch_size
    
    def forward(self, inputs):
        return self.model(inputs.reshape(-1, self.input_size))
    
    def _validate(self):
        losses = []
        accuracy = []
        for batch in self.val_loader:
            inputs, labels = batch
            preds = self(inputs)
            loss = self.loss_fn(preds, labels)
            losses.append(loss)
            acc = self.accuracy(preds, labels)
            accuracy.append(acc)
        avg_loss = torch.tensor(losses).mean().item()
        avg_acc = torch.tensor(accuracy).mean().item()
        return {'loss_avg': avg_loss, 'acc_avg': avg_acc}
        
    
    def train(self, epochs, lr, opt_func=SGD):
        training_history = []
        optimizer = opt_func(self.model.parameters(), lr=lr)
        for epoch in range(epochs):
            epoch_history = []
            for batch in self.train_loader:
                optimizer.zero_grad()
                input, target = batch
                pred = self(input)
                loss = self.loss_fn(pred, target)
                loss.backward()
                optimizer.step()
                epoch_history.append(loss)
            result = self._validate()
            loss_avg = result['loss_avg']
            acc_avg = result['acc_avg']
            print('Epoch {epoch}: average loss: {avg}, average accuracy: {acc}'.format(epoch=epoch,
                                                                                       avg=loss_avg,
                                                                                       acc=acc_avg))
            training_history.append(result)
        return training_history                
def plot_accuracy(history):
    accuracy = [x['acc_avg'] for x in history]
    plt.plot(accuracy)
    plt.show()

model = KmnistModel(dataset)
Training set size: 53346 Validation set size: 6654