2c PyTorch4
2c PyTorch4
3 4
Building a Net from Individual Components Defining a Sequential Network
class MyModel(torch.nn.Module):
class MyModel(torch.nn.Module):
def init (self, num input, num hid, num out):
def init (self):
super(MyModel, self). init ()
super(MyModel, self). init ()
self.main = nn.Sequential(
self.in to hid = torch.nn.Linear(2,2)
nn.Linear(num input, num hid),
self.hid to out = torch.nn.Linear(2,1)
nn.Tanh(),
def forward(self, input): nn.Linear(num hid, num out),
hid sum = self.in to hid(input) nn.Sigmoid()
hidden = torch.tanh(hid sum) )
out sum = self.hid to out(hidden) def forward(self, input):
output = torch.sigmoid(out sum) output = self.main(input)
return output return output
5 6
Network layers:
➛ nn.Linear()
➛ nn.Conv2d() (Week 4) import torch.utils.data
Intermediate Operators:
# input and target values for the XOR task
➛ nn.Dropout()
input = torch.Tensor([[0,0],[0,1],[1,0],[1,1]])
➛ nn.BatchNorm() (Week 4)
target = torch.Tensor([[0],[1],[1],[0]])
Activation Functions:
➛ nn.Sigmoid() xdata = torch.utils.data.TensorDataset(input,target)
➛ nn.Tanh() train loader = torch.utils.data.DataLoader(xdata,batch size=4)
➛ nn.ReLU() (Week 3)
7 8
Loading Data from a .csv File Custom Datasets
9 10
# SGD stands for "Stochastic Gradient Descent" def train(args, net, device, train loader, optimizer):
optimizer = torch.optim.SGD( net.parameters(),
lr=0.01, momentum=0.9, for batch idx, (data,target) in enumerate(train loader):
weight decay=0.0001)
optimizer.zero grad() # zero the gradients
# Adam = Adaptive Moment Estimation (good for deep networks) output = net(data) # apply network
optimizer = torch.optim.Adam(net.parameters(),eps=0.000001, loss = ... # compute loss function
lr=0.01, betas=(0.5,0.999), loss.backward() # compute gradients
weight decay=0.0001) optimizer.step() # update weights
11 12
Loss Functions Testing
13 14
15 16