Deep Learning LAB
Deep Learning LAB
import numpy as np
# Load dataset
# CNN Model
model = Sequential([
MaxPooling2D((2,2)),
MaxPooling2D((2,2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])
# Compile Model
# Train Model
# Evaluate Model
# Predictions
predictions = model.predict(x_test[:5])
for i in range(5):
plt.figure(figsize=(4,4),dpi=300)
plt.imshow(x_test[i],interpolation=””)
plt.axis(“off”)
plt.show()
def build_model(hp):
model = keras.Sequential()
model.add(layers.Conv2D(filters=hp.Int('filters',
min_value=32,max_value=128, step=32),
kernel_size=hp.Choice('kernel_size', values=[3, 5]),
activation='relu',
input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Conv2D(filters=hp.Int('filters_2', min_value=32,
max_value=128, step=32),
kernel_size=hp.Choice('kernel_size_2', values=[3, 5]),
activation='relu'))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Flatten())
model.add(layers.Dense(units=hp.Int('units', min_value=32, max_value=128,
step=32), activation='relu'))
model.add(layers.Dropout(rate=hp.Float('dropout', min_value=0.1,
max_value=0.5, step=0.1)))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp.Float('learn
ing_rate', min_value=1e-4, max_value=1e-2, sampling='LOG')),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def early_stopping_callback():
return keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
EXERCISE-4:
: Implement a Recurrence Neural Network for
Predicting Sequential Data
Code:
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Make predictions
predictions = model.predict(X_test)
# Visualize results
plt.plot(y_test, label='Actual Data')
plt.plot(predictions, label='Predicted Data')
plt.legend()
plt.title("RNN Predictions vs Actual Data")
plt.show()
output:
/usr/local/lib/python3.11/dist-packages/keras/src/layers/rnn/rnn.py:200:
UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer.
When using Sequential models, prefer using an `Input(shape)` object as the
first layer in the model instead.
super().__init__(**kwargs)
Epoch 1/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 5s 9ms/step - loss: 0.3625
Epoch 2/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - loss: 0.0150
Epoch 3/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 15ms/step - loss: 0.0016
Epoch 4/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - loss: 3.2351e-04
Epoch 5/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - loss: 1.8168e-04
Epoch 6/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 1.0661e-04
Epoch 7/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 6.9849e-05
Epoch 8/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 4.6706e-05
Epoch 9/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 4.3910e-05
Epoch 10/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 3.5222e-05
Epoch 11/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.4322e-05
Epoch 12/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.9496e-05
Epoch 13/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.5485e-05
Epoch 14/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.5900e-05
Epoch 15/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.2644e-05
Epoch 16/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.3633e-05
Epoch 17/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.3374e-05
Epoch 18/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 8.4114e-06
Epoch 19/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1619e-05
Epoch 20/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.3083e-05
Epoch 21/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.9959e-06
Epoch 22/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 7.2074e-06
Epoch 23/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.8178e-06
Epoch 24/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 5.3313e-06
Epoch 25/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.8431e-06
Epoch 26/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 4.3399e-06
Epoch 27/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.3617e-06
Epoch 28/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.7868e-06
Epoch 29/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.7098e-06
Epoch 30/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.0948e-06
Epoch 31/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.1495e-06
Epoch 32/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 3.6084e-06
Epoch 33/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 3.8976e-06
Epoch 34/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.4221e-06
Epoch 35/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.2972e-06
Epoch 36/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.5364e-06
Epoch 37/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 2.8882e-06
Epoch 38/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.9125e-06
Epoch 39/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 3.7844e-06
Epoch 40/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.1490e-06
Epoch 41/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 2.9625e-06
Epoch 42/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 1.8659e-06
Epoch 43/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.9545e-06
Epoch 44/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.4747e-06
Epoch 45/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 2.5827e-06
Epoch 46/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.1454e-06
Epoch 47/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.0774e-06
Epoch 48/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.2902e-06
Epoch 49/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.9725e-06
Epoch 50/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.2797e-06
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.3730e-06
Test Loss: 1.3749261142947944e-06
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
EXERCISE-5
: Implement Multi-Layer Perceptron algorithm for
Image denoising hyperparameter tuning.
Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import keras_tuner as kt
import matplotlib.pyplot as plt
# Load the MNIST dataset
(x_train, _), (x_test, _) =
keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 #
Normalize
model.compile(optimizer=keras.optimizers.Adam(hp.C
hoice('learning_rate', [0.001, 0.0005, 0.0001])),
loss='mse')
return model
plt.subplot(3, n, i+n+1)
plt.imshow(x_test_noisy[i].reshape(28, 28),
cmap='gray')
plt.axis('off')
plt.subplot(3, n, i+2*n+1)
plt.imshow(x_test_denoised[i], cmap='gray')
plt.axis('off')
plt.show()
OUTPUT:
Trial 1 Complete [00h 02m 48s]
val_loss: 0.017095204442739487
Epoch 1/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0843 - val_loss: 0.0387
Epoch 2/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 22s 7ms/step
- loss: 0.0360 - val_loss: 0.0291
Epoch 3/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0278 - val_loss: 0.0242
Epoch 4/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 20s 6ms/step
- loss: 0.0238 - val_loss: 0.0216
Epoch 5/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 20s 6ms/step
- loss: 0.0215 - val_loss: 0.0200
Epoch 6/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 22s 6ms/step
- loss: 0.0199 - val_loss: 0.0190
Epoch 7/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0189 - val_loss: 0.0182
Epoch 8/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0181 - val_loss: 0.0177
Epoch 9/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 21s 6ms/step
- loss: 0.0176 - val_loss: 0.0172
Epoch 10/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 19s 6ms/step
- loss: 0.0171 - val_loss: 0.0169
EXERCISE-6:
Code: Implement Object Detec on Using YOLO.
# Install Ultralytics YOLOv8 (if not installed)
!pip install ultralytics
if image is None:
print(f"Error: Could not read '{image_path}'! Check the file format.")
else:
# Load YOLO model
model = YOLO("yolov8n.pt") # Using YOLOv8 nano model
output:
first time execution:
# Parameters
input_shape = (32, 32, 3) # Example: CIFAR-10 dataset
num_classes = 10
output:
EXECISE-8:
Code: Build AlexNet using Advanced CNN
import tensorflow as tf
from tensorflow.keras import layers, models, Input
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout,
BatchNormalization, Flatten, Activation
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.datasets import cifar10 # Load dataset
# Load dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Dense Layers
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.4))
model.add(BatchNormalization())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.4))
model.add(BatchNormalization())
# Output Layer
model.add(Dense(10, activation='softmax')) # 10 classes in CIFAR-10
# Make predictions
predictions = model.predict(x_test[:100])
predicted_class = np.argmax(predictions[0])
output:
Epoch 1/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 300s 423ms/step - accuracy:
0.2408 - loss: 2.7379 - val_accuracy: 0.2472 - val_loss: 2.2091
Epoch 2/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 294s 418ms/step - accuracy:
0.2989 - loss: 2.0863 - val_accuracy: 0.3036 - val_loss: 2.0798
Epoch 3/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 323s 419ms/step - accuracy:
0.3067 - loss: 1.9623 - val_accuracy: 0.3048 - val_loss: 2.3234
Epoch 4/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 329s 430ms/step - accuracy:
0.3201 - loss: 1.9092 - val_accuracy: 0.3684 - val_loss: 2.6944
Epoch 5/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 319s 426ms/step - accuracy:
0.3229 - loss: 1.8966 - val_accuracy: 0.3454 - val_loss: 2.9305
Epoch 6/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 323s 427ms/step - accuracy:
0.3256 - loss: 1.8717 - val_accuracy: 0.3846 - val_loss: 3.0424
Epoch 7/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 298s 424ms/step - accuracy:
0.3290 - loss: 1.8508 - val_accuracy: 0.3576 - val_loss: 2.8947
Epoch 8/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 325s 429ms/step - accuracy:
0.3315 - loss: 1.8492 - val_accuracy: 0.4048 - val_loss: 3.8287
Epoch 9/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 302s 430ms/step - accuracy:
0.3436 - loss: 1.8211 - val_accuracy: 0.2302 - val_loss: 12.9044
Epoch 10/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 303s 431ms/step - accuracy:
0.3450 - loss: 1.8126 - val_accuracy: 0.3766 - val_loss: 3.2894
313/313 ━━━━━━━━━━━━━━━━━━━━ 11s 34ms/step - accuracy:
0.3874 - loss: 1.9723
Evaluation Loss: 2.0299267768859863, Accuracy:
0.3774000108242035
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 48ms/step
EXERCISE-9:
CODE: Demonstration of Application of Autoencoders.
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Flatten, Reshape
from tensorflow.keras.datasets import mnist
# Load dataset
(x_train, _), (x_test, _) = mnist.load_data()
# Normalize images (0 to 1)
x_train, x_test = x_train / 255.0, x_test / 255.0
# Train Autoencoder
autoencoder.fit(x_train_noisy, x_train, epochs=10, batch_size=256,
shuffle=True, validation_data=(x_test_noisy, x_test))
# Get Predictions
decoded_imgs = autoencoder.predict(x_test_noisy)
# Denoised Image
plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i], cmap='gray')
plt.axis('off')
plt.show()
OUPUT:
EXERCISE-10:
Code: Demonstration of GAN
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# Hyperparameters
latent_dim = 100
batch_size = 64
epochs = 10
lr = 0.0002
# Define Discriminator
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 1),
nn.Sigmoid()
)
# Initialize models
generator = Generator()
discriminator = Discriminator()
# Training Loop
for epoch in range(epochs):
for real_imgs, _ in dataloader:
batch_size = real_imgs.size(0)
# Generate fake images
z = torch.randn(batch_size, latent_dim)
fake_imgs = generator(z)
# Labels
real_labels = torch.ones(batch_size, 1)
fake_labels = torch.zeros(batch_size, 1)
# Train Discriminator
optimizer_D.zero_grad()
real_loss = criterion(discriminator(real_imgs), real_labels)
fake_loss = criterion(discriminator(fake_imgs.detach()),
fake_labels)
d_loss = real_loss + fake_loss
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
g_loss = criterion(discriminator(fake_imgs), real_labels)
g_loss.backward()
optimizer_G.step()
print(f"Epoch {epoch+1}/{epochs} | D Loss: {d_loss:.4f} | G Loss:
{g_loss:.4f}")