models.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # Python version: 3.6
  4. from torch import nn
  5. import torch.nn.functional as F
  6. # class MLP(nn.Module):
  7. # def __init__(self, dim_in, dim_hidden, dim_out):
  8. # super(MLP, self).__init__()
  9. # self.layer_input = nn.Linear(dim_in, dim_hidden)
  10. # self.relu = nn.ReLU()
  11. # self.dropout = nn.Dropout()
  12. # self.layer_hidden = nn.Linear(dim_hidden, dim_out)
  13. # self.softmax = nn.Softmax(dim=1)
  14. # def forward(self, x):
  15. # x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
  16. # x = self.layer_input(x)
  17. # x = self.dropout(x)
  18. # x = self.relu(x)
  19. # x = self.layer_hidden(x)
  20. # return self.softmax(x)
  21. # Change MLP model to 2 hidden layers with 200 units
  22. class MLP(nn.Module):
  23. def __init__(self, dim_in, dim_hidden, dim_out):
  24. super(MLP, self).__init__()
  25. self.layer_input = nn.Linear(dim_in, dim_hidden)
  26. self.relu = nn.ReLU()
  27. self.dropout = nn.Dropout()
  28. self.layer_hidden1 = nn.Linear(dim_hidden, dim_hidden)
  29. self.relu = nn.ReLU()
  30. self.dropout = nn.Dropout()
  31. self.layer_hidden2 = nn.Linear(dim_hidden, dim_out)
  32. self.softmax = nn.Softmax(dim=1)
  33. def forward(self, x):
  34. x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
  35. x = self.layer_input(x)
  36. x = self.dropout(x)
  37. x = self.relu(x)
  38. x = self.layer_hidden1(x)
  39. x = self.dropout(x)
  40. x = self.relu(x)
  41. x = self.layer_hidden2(x)
  42. return self.softmax(x)
  43. # class CNNMnist(nn.Module):
  44. # def __init__(self, args):
  45. # super(CNNMnist, self).__init__()
  46. # self.conv1 = nn.Conv2d(args.num_channels, 10, kernel_size=5)
  47. # self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
  48. # self.conv2_drop = nn.Dropout2d()
  49. # self.fc1 = nn.Linear(320, 50)
  50. # self.fc2 = nn.Linear(50, args.num_classes)
  51. # def forward(self, x):
  52. # x = F.relu(F.max_pool2d(self.conv1(x), 2))
  53. # x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
  54. # x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
  55. # x = F.relu(self.fc1(x))
  56. # x = F.dropout(x, training=self.training)
  57. # x = self.fc2(x)
  58. # return F.log_softmax(x, dim=1)
  59. # Change CNN model to
  60. class CNNMnist(nn.Module):
  61. def __init__(self, args):
  62. super(CNNMnist, self).__init__()
  63. self.conv1 = nn.Conv2d(args.num_channels, 10, kernel_size=5)
  64. self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
  65. self.conv2_drop = nn.Dropout2d()
  66. self.fc1 = nn.Linear(320, 50)
  67. self.fc2 = nn.Linear(50, args.num_classes)
  68. def forward(self, x):
  69. x = F.relu(F.max_pool2d(self.conv1(x), 2))
  70. x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
  71. x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
  72. x = F.relu(self.fc1(x))
  73. x = F.dropout(x, training=self.training)
  74. x = self.fc2(x)
  75. return F.log_softmax(x, dim=1)
  76. class CNNFashion_Mnist(nn.Module):
  77. def __init__(self, args):
  78. super(CNNFashion_Mnist, self).__init__()
  79. self.layer1 = nn.Sequential(
  80. nn.Conv2d(1, 16, kernel_size=5, padding=2),
  81. nn.BatchNorm2d(16),
  82. nn.ReLU(),
  83. nn.MaxPool2d(2))
  84. self.layer2 = nn.Sequential(
  85. nn.Conv2d(16, 32, kernel_size=5, padding=2),
  86. nn.BatchNorm2d(32),
  87. nn.ReLU(),
  88. nn.MaxPool2d(2))
  89. self.fc = nn.Linear(7*7*32, 10)
  90. def forward(self, x):
  91. out = self.layer1(x)
  92. out = self.layer2(out)
  93. out = out.view(out.size(0), -1)
  94. out = self.fc(out)
  95. return out
  96. class CNNCifar(nn.Module):
  97. def __init__(self, args):
  98. super(CNNCifar, self).__init__()
  99. self.conv1 = nn.Conv2d(3, 32, 5)
  100. self.pool = nn.MaxPool2d(2, 2)
  101. self.conv2 = nn.Conv2d(32, 64, 5)
  102. self.fc1 = nn.Linear(64 * 5 * 5, 512)
  103. self.fc2 = nn.Linear(512, 84)
  104. self.fc3 = nn.Linear(84, args.num_classes)
  105. def forward(self, x):
  106. x = self.pool(F.relu(self.conv1(x)))
  107. x = self.pool(F.relu(self.conv2(x)))
  108. x = x.view(-1, 64 * 5 * 5)
  109. x = F.relu(self.fc1(x))
  110. x = F.relu(self.fc2(x))
  111. x = self.fc3(x)
  112. return F.log_softmax(x, dim=1)
  113. # class CNNCifar(nn.Module):
  114. # def __init__(self, args):
  115. # super(CNNCifar, self).__init__()
  116. # self.conv1 = nn.Conv2d(3, 6, 5)
  117. # self.pool = nn.MaxPool2d(2, 2)
  118. # self.conv2 = nn.Conv2d(6, 16, 5)
  119. # self.fc1 = nn.Linear(16 * 5 * 5, 120)
  120. # self.fc2 = nn.Linear(120, 84)
  121. # self.fc3 = nn.Linear(84, args.num_classes)
  122. # def forward(self, x):
  123. # x = self.pool(F.relu(self.conv1(x)))
  124. # x = self.pool(F.relu(self.conv2(x)))
  125. # x = x.view(-1, 16 * 5 * 5)
  126. # x = F.relu(self.fc1(x))
  127. # x = F.relu(self.fc2(x))
  128. # x = self.fc3(x)
  129. # return F.log_softmax(x, dim=1)
  130. # Change CNNCifar model to 917350 params