diff --git a/ML Cookbook/BasicNeuralNet.py b/ML Cookbook/BasicNeuralNet.py new file mode 100644 index 0000000..92e6d2f --- /dev/null +++ b/ML Cookbook/BasicNeuralNet.py @@ -0,0 +1,23 @@ +from torch import nn + +class BasicNeuralNet(nn.Module): + def __init__(self): + super().__init__() + + # Inputs to hidden layer linear transformation + self.hidden = nn.Linear(784, 256) + # Output layer, 10 units + self.output = nn.Linear(256, 10) + + # Define sigmoid activation and softmax output + self.sigmoid = nn.Sigmoid() + self.softmax = nn.Softmax(dim=1) + + def forward(self, x): + # Pass the input tensor through each of the operations + x = self.hidden(x) + x = self.sigmoid(x) + x = self.output(x) + x = self.softmax(x) + + return x \ No newline at end of file diff --git a/ML Cookbook/CNNs/BasicCNN.py b/ML Cookbook/CNNs/BasicCNN.py new file mode 100644 index 0000000..178ba57 --- /dev/null +++ b/ML Cookbook/CNNs/BasicCNN.py @@ -0,0 +1,16 @@ +import torch.nn as nn +import torch.nn.functional as F + +# CNN architecture definition +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + # convolutional layer + self.conv1 = nn.Conv2d(3, 16, 3, padding=1) + # max pooling layer + self.pool = nn.MaxPool2d(2, 2) + + def forward(self, x): + # add sequence of convolutional and max pooling layers + x = self.pool(F.relu(self.conv1(x))) + return x \ No newline at end of file