|
@@ -1,48 +1,42 @@
|
|
|
-import torch
|
|
|
-import torch.nn as nn
|
|
|
-import torch.nn.Functional as F
|
|
|
-
|
|
|
+#!/usr/bin/env python
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+# Python version: 3.6
|
|
|
|
|
|
-# MLP Arch with 1 Hidden layer
|
|
|
+import torch
|
|
|
+from torch import nn
|
|
|
+import torch.nn.functional as F
|
|
|
|
|
|
|
|
|
class MLP(nn.Module):
|
|
|
- def __init__(self, input_dim, hidden, out_dim):
|
|
|
-
|
|
|
+ def __init__(self, dim_in, dim_hidden, dim_out):
|
|
|
super(MLP, self).__init__()
|
|
|
- self.linear1 = nn.Linear(input_dim, hidden)
|
|
|
- self.linear2 = nn.Linear(hidden, out_dim)
|
|
|
+ self.layer_input = nn.Linear(dim_in, dim_hidden)
|
|
|
self.relu = nn.ReLU()
|
|
|
self.dropout = nn.Dropout()
|
|
|
+ self.layer_hidden = nn.Linear(dim_hidden, dim_out)
|
|
|
self.softmax = nn.Softmax(dim=1)
|
|
|
|
|
|
def forward(self, x):
|
|
|
x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
|
|
|
- x = self.linear1(x)
|
|
|
+ x = self.layer_input(x)
|
|
|
x = self.dropout(x)
|
|
|
x = self.relu(x)
|
|
|
- x = self.linear2(x)
|
|
|
+ x = self.layer_hidden(x)
|
|
|
return self.softmax(x)
|
|
|
|
|
|
|
|
|
-# CNN Arch for MNIST
|
|
|
-
|
|
|
-
|
|
|
-class CNN_Mnist(nn.Module):
|
|
|
+class CNNMnist(nn.Module):
|
|
|
def __init__(self, args):
|
|
|
-
|
|
|
- super(CNN_Mnist, self).__init__()
|
|
|
+ super(CNNMnist, self).__init__()
|
|
|
self.conv1 = nn.Conv2d(args.num_channels, 10, kernel_size=5)
|
|
|
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
|
|
|
- self.dropout_2d = nn.Dropout2d()
|
|
|
+ self.conv2_drop = nn.Dropout2d()
|
|
|
self.fc1 = nn.Linear(320, 50)
|
|
|
self.fc2 = nn.Linear(50, args.num_classes)
|
|
|
|
|
|
def forward(self, x):
|
|
|
- x = F.max_pool2d(self.conv1(x), 2)
|
|
|
- x = F.relu(x)
|
|
|
- x = F.max_pool2d(nn.Dropout2d(self.conv2(x)), 2)
|
|
|
- x = F.relu(x)
|
|
|
+ x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
|
|
+ x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
|
|
x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
|
|
|
x = F.relu(self.fc1(x))
|
|
|
x = F.dropout(x, training=self.training)
|
|
@@ -50,27 +44,20 @@ class CNN_Mnist(nn.Module):
|
|
|
return F.log_softmax(x, dim=1)
|
|
|
|
|
|
|
|
|
-# CNN Arch -- CIFAR
|
|
|
-
|
|
|
-
|
|
|
-class CNN_Cifar(nn.Module):
|
|
|
-
|
|
|
+class CNNCifar(nn.Module):
|
|
|
def __init__(self, args):
|
|
|
-
|
|
|
- super(CNN_Cifar, self).__init__()
|
|
|
+ super(CNNCifar, self).__init__()
|
|
|
self.conv1 = nn.Conv2d(3, 6, 5)
|
|
|
self.pool = nn.MaxPool2d(2, 2)
|
|
|
self.conv2 = nn.Conv2d(6, 16, 5)
|
|
|
- self.fc1 = nn.Linear(16*5*5, 120)
|
|
|
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
|
|
self.fc2 = nn.Linear(120, 84)
|
|
|
self.fc3 = nn.Linear(84, args.num_classes)
|
|
|
|
|
|
def forward(self, x):
|
|
|
- x = F.relu(self.conv1(x))
|
|
|
- x = self.pool(x)
|
|
|
- x = F.relu(self.conv2(x))
|
|
|
- x = self.pool(x)
|
|
|
- x = x.view(-1, 16*5*5) # Dim of fc1
|
|
|
+ x = self.pool(F.relu(self.conv1(x)))
|
|
|
+ x = self.pool(F.relu(self.conv2(x)))
|
|
|
+ x = x.view(-1, 16 * 5 * 5)
|
|
|
x = F.relu(self.fc1(x))
|
|
|
x = F.relu(self.fc2(x))
|
|
|
x = self.fc3(x)
|