Added Basic Neural Networkpull/26/head
@ -0,0 +1,23 @@ | |||||
from torch import nn | |||||
class BasicNeuralNet(nn.Module): | |||||
def __init__(self): | |||||
super().__init__() | |||||
# Inputs to hidden layer linear transformation | |||||
self.hidden = nn.Linear(784, 256) | |||||
# Output layer, 10 units | |||||
self.output = nn.Linear(256, 10) | |||||
# Define sigmoid activation and softmax output | |||||
self.sigmoid = nn.Sigmoid() | |||||
self.softmax = nn.Softmax(dim=1) | |||||
def forward(self, x): | |||||
# Pass the input tensor through each of the operations | |||||
x = self.hidden(x) | |||||
x = self.sigmoid(x) | |||||
x = self.output(x) | |||||
x = self.softmax(x) | |||||
return x |
@ -0,0 +1,16 @@ | |||||
import torch.nn as nn | |||||
import torch.nn.functional as F | |||||
# CNN architecture definition | |||||
class Net(nn.Module): | |||||
def __init__(self): | |||||
super(Net, self).__init__() | |||||
# convolutional layer | |||||
self.conv1 = nn.Conv2d(3, 16, 3, padding=1) | |||||
# max pooling layer | |||||
self.pool = nn.MaxPool2d(2, 2) | |||||
def forward(self, x): | |||||
# add sequence of convolutional and max pooling layers | |||||
x = self.pool(F.relu(self.conv1(x))) | |||||
return x |