# python imports
import os
from tqdm import tqdm
# torch imports
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
# helper functions for computer vision
import torchvision
import torchvision.transforms as transforms
# {1: [1, 6, 14, 14], 2: [1, 16, 5, 5], 3: [1, 400], 4: [1, 256], 5: [1, 128], 6: [1, 100]}
class LeNet(nn.Module):
    def __init__(self, input_shape=(32, 32), num_classes=100):
        super(LeNet, self).__init__()
        fc_dim = 16 * (input_shape[0] // 4 - 3) * (input_shape[1] // 4 - 3)
        self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
        self.fc1   = nn.Linear(fc_dim, 256)
        self.fc2   = nn.Linear(256, 128)
        self.fc3   = nn.Linear(128, num_classes)
    def forward(self, x):
        shape_dict = {}
        out = self.relu(self.conv1(x))
        out = self.maxpool(out)
        shape_dict[1] = list(out.size())
        out = self.relu(self.conv2(out))
        out = self.maxpool(out)
        shape_dict[2] = list(out.size())
        # out = out.view(out.size(0), -1)
        out = self.flatten(out)
        shape_dict[3] = list(out.size())
        out = self.relu(self.fc1(out))
        shape_dict[4] = list(out.size())
        out = self.relu(self.fc2(out))
        shape_dict[5] = list(out.size())
        out = self.fc3(out)
        shape_dict[6] = list(out.size())
        return out, shape_dict
def count_model_params():
    '''
    return the number of trainable parameters of LeNet.
    '''
    model = LeNet()
    breakpoint
    model_params = np.sum(np.prod(v.size()) for name, v in model.named_parameters()) / 1e6
    return model_params
class SimpleConvNet(nn.Module):
    """
    A simple convolutional neural network
    """
    def __init__(self, input_shape=(32, 32), num_classes=100):
        super(SimpleConvNet, self).__init__()
        ###################################
        # fill in the code here
        ###################################
        # this is a simple implementation of LeNet-5
        layers = []
        fc_dim = 16 * (input_shape[0] // 4 - 3) * (input_shape[1] // 4 - 3)
        # 2 convs
        layers.append(nn.Conv2d(3, 6, kernel_size=5, stride=1))
        layers.append(nn.ReLU(inplace=True))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
        layers.append(nn.Conv2d(6, 16, kernel_size=5, stride=1))
        layers.append(nn.ReLU(inplace=True))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
        layers.append(nn.Flatten())
        # 3 FCs
        layers.append(nn.Linear(fc_dim, 256))
        layers.append(nn.ReLU(inplace=True))
        layers.append(nn.Linear(256, 128))
        layers.append(nn.ReLU(inplace=True))
        layers.append(nn.Linear(128, num_classes))
        self.layers = nn.Sequential(*layers)
    def forward(self, x):
        ###################################
        # fill in the code here
        ###################################
        # the forward propagation
        out = self.layers(x)
        if self.training:
            # softmax is merged into the loss during training
            return out
        else:
            # attach softmax during inference
            out = nn.functional.softmax(out, dim=1)
            return out
def train_model(model, train_loader, optimizer, criterion, epoch):
    """
    model (torch.nn.module): The model created to train
    train_loader (pytorch data loader): Training data loader
    optimizer (optimizer.*): A instance of some sort of optimizer, usually SGD
    criterion (nn.CrossEntropyLoss) : Loss function used to train the network
    epoch (int): Current epoch number
    """
    model.train()
    train_loss = 0.0
    for input, target in tqdm(train_loader, total=len(train_loader)):
        ###################################
        # fill in the standard training loop of forward pass,
        # backward pass, loss computation and optimizer step
        ###################################
        # 1) zero the parameter gradients
        optimizer.zero_grad()
        # 2) forward + backward + optimize
        output = model(input)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        # Update the train_loss variable
        # .item() detaches the node from the computational graph
        # Uncomment the below line after you fill block 1 and 2
        train_loss += loss.item()
    train_loss /= len(train_loader)
    print('[Training set] Epoch: {:d}, Average loss: {:.4f}'.format(epoch+1, train_loss))
    return train_loss
def test_model(model, test_loader, epoch):
    model.eval()
    correct = 0
    with torch.no_grad():
        for input, target in test_loader:
            ###################################
            # fill in the code here
            ###################################
            output = model(input)
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()
    test_acc = correct / len(test_loader.dataset)
    print('[Test set] Epoch: {:d}, Accuracy: {:.2f}%\n'.format(
        epoch+1, 100. * test_acc))
    return test_acc
if __name__ == '__main__':
    model = LeNet()
    outputs1, outputs2 = model(torch.rand(1, 3, 32, 32))
    params2 = count_model_params()
    breakpoint() Write, Run & Share Python code online using OneCompiler's Python online compiler for free. It's one of the robust, feature-rich online compilers for python language, supporting both the versions which are Python 3 and Python 2.7. Getting started with the OneCompiler's Python editor is easy and fast. The editor shows sample boilerplate code when you choose language as Python or Python2 and start coding.
OneCompiler's python online editor supports stdin and users can give inputs to programs using the STDIN textbox under the I/O tab. Following is a sample python program which takes name as input and print your name with hello.
import sys
name = sys.stdin.readline()
print("Hello "+ name)
Python is a very popular general-purpose programming language which was created by Guido van Rossum, and released in 1991. It is very popular for web development and you can build almost anything like mobile apps, web apps, tools, data analytics, machine learning etc. It is designed to be simple and easy like english language. It's is highly productive and efficient making it a very popular language.
When ever you want to perform a set of operations based on a condition IF-ELSE is used.
if conditional-expression
    #code
elif conditional-expression
    #code
else:
    #code
Indentation is very important in Python, make sure the indentation is followed correctly
For loop is used to iterate over arrays(list, tuple, set, dictionary) or strings.
mylist=("Iphone","Pixel","Samsung")
for i in mylist:
    print(i)
While is also used to iterate a set of statements based on a condition. Usually while is preferred when number of iterations are not known in advance.
while condition  
    #code 
There are four types of collections in Python.
List is a collection which is ordered and can be changed. Lists are specified in square brackets.
mylist=["iPhone","Pixel","Samsung"]
print(mylist)
Tuple is a collection which is ordered and can not be changed. Tuples are specified in round brackets.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
Below throws an error if you assign another value to tuple again.
myTuple=("iPhone","Pixel","Samsung")
print(myTuple)
myTuple[1]="onePlus"
print(myTuple)
Set is a collection which is unordered and unindexed. Sets are specified in curly brackets.
myset = {"iPhone","Pixel","Samsung"}
print(myset)
Dictionary is a collection of key value pairs which is unordered, can be changed, and indexed. They are written in curly brackets with key - value pairs.
mydict = {
    "brand" :"iPhone",
    "model": "iPhone 11"
}
print(mydict)
Following are the libraries supported by OneCompiler's Python compiler
| Name | Description | 
|---|---|
| NumPy | NumPy python library helps users to work on arrays with ease | 
| SciPy | SciPy is a scientific computation library which depends on NumPy for convenient and fast N-dimensional array manipulation | 
| SKLearn/Scikit-learn | Scikit-learn or Scikit-learn is the most useful library for machine learning in Python | 
| Pandas | Pandas is the most efficient Python library for data manipulation and analysis | 
| DOcplex | DOcplex is IBM Decision Optimization CPLEX Modeling for Python, is a library composed of Mathematical Programming Modeling and Constraint Programming Modeling |