Deep Learning Lab Practicals
Deep Learning Lab Practicals
January 3, 2025
1
def predict(self, X):
predictions = []
for i in range(len(X)):
linear_output = np.dot(X[i], self.weights) + self.bias
prediction = self.activation(linear_output)
predictions.append(prediction)
return predictions
Predictions: [0, 1, 1, 1]
Weights: [0.1 0.1]
Bias: -0.1
2
2.0.1 Steps for Implementation:
1. Load the MNIST dataset.
2. Preprocess the data (normalize, reshape, etc.).
3. Build the neural network model.
4. Compile the model with an optimizer, loss function, and metrics.
5. Train the model on the MNIST dataset.
6. Evaluate the model on the test set.
7. Make predictions on new data.
Install Required Libraries
!pip install tensorflow
])
3
loss='categorical_crossentropy', # Loss function for multi-class␣
↪classification
metrics=['accuracy']) # Metric to track
Epoch 1/5
1875/1875 [==============================] - 6s 3ms/step - loss: 0.3030 -
accuracy: 0.9121 - val_loss: 0.1391 - val_accuracy: 0.9576
Epoch 2/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.1467 -
accuracy: 0.9569 - val_loss: 0.0991 - val_accuracy: 0.9700
Epoch 3/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.1088 -
accuracy: 0.9672 - val_loss: 0.0861 - val_accuracy: 0.9737
Epoch 4/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0908 -
accuracy: 0.9724 - val_loss: 0.0780 - val_accuracy: 0.9766
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0751 -
accuracy: 0.9768 - val_loss: 0.0765 - val_accuracy: 0.9756
313/313 [==============================] - 0s 1ms/step - loss: 0.0765 -
accuracy: 0.9756
Test accuracy: 0.975600004196167
313/313 [==============================] - 0s 1ms/step
First prediction: 7
True label: 7
4
3 Practical 3: Implement a simple CNN starting from filtering,
Convolution and pooling operations and arithmetic of these with
Visualization in PyTorch and Tensorflow
Implementing a simple Convolutional Neural Network (CNN) that demonstrates the filtering, con-
volution, and pooling operations, along with the arithmetic involved, in both PyTorch and Tensor-
Flow, is a great way to understand how CNNs work. Below is a step-by-step approach to building
this in both frameworks.
5
ax[0].set_title('Input Image')
ax[0].axis('off')
ax[1].imshow(conv.squeeze().detach().numpy(), cmap='gray')
ax[1].set_title('After Convolution')
ax[1].axis('off')
ax[2].imshow(pooled_image.detach().numpy(), cmap='gray')
ax[2].set_title('After Max Pooling')
ax[2].axis('off')
plt.show()
6
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
↪concepts in fewer lines of code than would be possible in languages such as␣
↪C++ or Java.
"""
def __len__(self):
return len(self.text_as_int) - self.seq_length
seq_length = 30
dataset = TextDataset(text_as_int, seq_length)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
7
# Step 3: Define the RNN Language Model
class RNNLanguageModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, seq_length):
super(RNNLanguageModel, self).__init__()
# RNN layer
out, _ = self.rnn(x)
# Output layer
out = self.fc(out)
return out
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
total_loss += loss.item()
8
print(f"Epoch [{epoch+1}/{epochs}], Loss: {total_loss/len(dataloader)}")
model.eval()
generated_text = start_text
with torch.no_grad():
for _ in range(length):
output = model(input_seq)
output = output.squeeze(0) # Remove batch dimension
return generated_text
print("\nGenerated Text:\n")
print(generated)
9
Epoch [3/10], Loss: 2.1279394328594208
Epoch [4/10], Loss: 1.8134359568357468
Epoch [5/10], Loss: 1.537606194615364
Epoch [6/10], Loss: 1.2762676626443863
Epoch [7/10], Loss: 1.0253751650452614
Epoch [8/10], Loss: 0.8073843643069267
Epoch [9/10], Loss: 0.6257664263248444
Epoch [10/10], Loss: 0.49387771263718605
Generated Text:
10
# Convert data to a PyTorch tensor
data_tensor = torch.tensor(data_normalized, dtype=torch.float32)
# Model parameters
input_size = 1 # One feature (sine wave value)
hidden_size = 64
output_size = 1 # Predicting one value
learning_rate = 0.001
11
# Step 3: Training the Model
def train_model(model, X_train, y_train, criterion, optimizer, epochs=100):
model.train()
for epoch in range(epochs):
optimizer.zero_grad()
output = model(X_train.unsqueeze(-1)) # Add feature dimension
loss = criterion(output, y_train.unsqueeze(-1))
loss.backward()
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")
# Inverse transform the predictions and actual values back to original scale
predictions = scaler.inverse_transform(predictions.numpy())
y_test_original = scaler.inverse_transform(y_test.numpy().reshape(-1, 1))
# Step 5: Visualization
plt.figure(figsize=(10, 6))
plt.plot(time[train_size + seq_length:], y_test_original, label="Actual")
plt.plot(time[train_size + seq_length:], predictions, label="Predicted",␣
↪linestyle='--')
12
6 Practical 6: Implement Sentiment Analysis using LSTM
Implementing sentiment analysis using an LSTM (Long Short-Term Memory) model in PyTorch
involves the following steps:
1. Data Preprocessing: Tokenizing the text and converting it to numerical representations (like
word embeddings).
2. Model Building: Defining the architecture using LSTM layers for sequential processing.
3. Training the Model: Training the model with the dataset and monitoring its performance.
4. Evaluation: Evaluating the model on unseen test data.
5. Prediction: Using the trained model to predict the sentiment of new text inputs.
13
from nltk.tokenize import word_tokenize
from collections import Counter
from torch.nn.utils.rnn import pad_sequence
def preprocess_text(text):
text = text.lower()
text = re.sub(r'\W', ' ', text) # Remove non-alphabetical characters
text = re.sub(r'\s+', ' ', text) # Remove multiple spaces
return text
negative_reviews = [
"The movie was boring and predictable.",
"Waste of time. Terrible plot and bad acting.",
"Not worth watching, I fell asleep halfway through.",
"This movie is awful. Don't waste your time."
]
# Create labels
positive_labels = [1] * len(positive_reviews) # Positive reviews labeled as 1
negative_labels = [0] * len(negative_reviews) # Negative reviews labeled as 0
tokenized_reviews = tokenize_text(reviews)
14
word_counter = Counter([word for review in tokenized_reviews for word in␣
↪review])
def __len__(self):
return len(self.data)
# Create a DataLoader
dataset = SentimentDataset(padded_sequences, labels)
train_data, test_data = train_test_split(dataset, test_size=0.2,␣
↪random_state=42)
15
def forward(self, x):
embedded = self.embedding(x)
lstm_out, (hidden, cell) = self.lstm(embedded)
output = self.fc(hidden[-1]) # Use the last hidden state
return self.softmax(output)
# Hyperparameters
vocab_size = len(vocab) + 1 # Add 1 for padding token
embedding_dim = 100
hidden_dim = 128
output_dim = 2 # Positive (1) or Negative (0)
learning_rate = 0.001
epoch_loss += loss.item()
preds = torch.argmax(output, dim=1)
epoch_accuracy += accuracy_score(labels.numpy(), preds.numpy())
16
with torch.no_grad():
for data, target in test_loader:
output = model(data)
preds = torch.argmax(output, dim=1)
predictions.extend(preds.numpy())
labels.extend(target.numpy())
evaluate(model, test_loader)
new_review = "This movie is awful. Don't waste your time for this."
print(f"Predicted Sentiment: {predict_sentiment(model, new_review, vocab)}")
17
realistic images over time.
7.0.1 Steps:
1. Define the Generator Network: This network takes random noise as input and outputs an
image.
2. Define the Discriminator Network: This network takes an image as input and outputs a
probability of whether the image is real or fake.
3. Train the GAN: The Generator and Discriminator are trained together. The Generator tries
to fool the Discriminator, while the Discriminator tries to correctly classify real and fake
images.
# Hyperparameters
batch_size = 64
latent_dim = 100
epochs = 50
lr = 0.0002
beta1 = 0.5
18
nn.Linear(1024, 28*28),
nn.Tanh() # Output the image in the range [-1, 1]
)
# Optimizers
optimizer_G = optim.Adam(generator.parameters(), lr=lr, betas=(beta1, 0.999))
optimizer_D = optim.Adam(discriminator.parameters(), lr=lr, betas=(beta1, 0.
↪999))
19
fig, axes = plt.subplots(4, 4, figsize=(4, 4))
for i in range(4):
for j in range(4):
axes[i, j].imshow(generated_images[i*4 + j, 0], cmap='gray')
axes[i, j].axis('off')
plt.tight_layout()
plt.savefig(f'gan_generated_images_epoch_{epoch}.png')
# Training loop
for epoch in range(epochs):
for i, (imgs, _) in enumerate(dataloader):
# Train the Discriminator
real_imgs = imgs
batch_size = real_imgs.size(0)
# Real images
optimizer_D.zero_grad()
outputs = discriminator(real_imgs)
d_loss_real = criterion(outputs, real_labels[:batch_size])
d_loss_real.backward()
# Fake images
z = torch.randn(batch_size, latent_dim)
fake_imgs = generator(z)
outputs = discriminator(fake_imgs.detach()) # Detach to avoid updating␣
↪generator
optimizer_D.step()
g_loss.backward()
optimizer_G.step()
20
# Save final generator model
torch.save(generator.state_dict(), "generator.pth")
21
Epoch [46/50], D Loss: 100.0, G Loss: 0.0
Epoch [47/50], D Loss: 100.0, G Loss: 0.0
Epoch [48/50], D Loss: 100.0, G Loss: 0.0
Epoch [49/50], D Loss: 100.0, G Loss: 0.0
Epoch [50/50], D Loss: 100.0, G Loss: 0.0
22
23
24