Deep Learning Practical File
Deep Learning Practical File
train_dir = 'data/train'
validation_dir = 'data/validation'
test_dir = 'data/test'
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
model = models.Sequential()
model.add(layers.Flatten(input_shape=train_features.shape[1:]))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
conv_base.trainable = True
set_trainable = False
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
import tensorflow as tf
PATH_TO_MODEL = 'path/to/your/frozen_inference_graph.pb'
PATH_TO_LABELS = 'path/to/your/label_map.pbtxt'
NUM_CLASSES = 90
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
with detection_graph.as_default():
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image = cv2.imread('path/to/your/image.jpg')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),)
Practical 3: Improve the Deep Learning Model by Tuning Hyper Parameters
import numpy as np
X = np.load('X.npy')
y = np.load('y.npy')
params = {
model = Sequential()
model.add(Dropout(dropout_rate))
model.add(Dense(num_neurons, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(learning_rate=learning_rate)
return model
model = KerasClassifier(build_fn=create_model)
search.fit(X_train, y_train)
y_pred = search.predict(X_val)
import pandas as pd
sales_df = pd.read_csv('sales_data.csv')
user_encoder = LabelEncoder()
item_encoder = LabelEncoder()
sales_df['user_id'] = user_encoder.fit_transform(sales_df['user_id'])
sales_df['item_id'] = item_encoder.fit_transform(sales_df['item_id'])
X = sales_df[['user_id', 'item_id']].values
y = sales_df['sales_count'].values
U, S, Vt = svds(sales_matrix, k=20)
# Construct the user and item embeddings
user_embeddings = U
item_embeddings = Vt.T
user_input = Input(shape=(1,))
item_input = Input(shape=(1,))
dropout1 = Dropout(0.5)(dense1)
dropout2 = Dropout(0.5)(dense2)
import numpy as np
user_items = user_items.set_index('user_id')
user_items['item_scores'] = user_item_scores_normalized
# Get top
Practical 5 Perform Sentiment Analysis in Network Graph Using RNN
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU, Bidirectional, Dense, Embedding
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
import numpy as np
print(x_train[0])
# Compiling model
RNN_model.compile(
loss="binary_crossentropy",
optimizer='adam',
metrics=['accuracy']
)
def define_generator(latent_dim):
init = RandomNormal(stddev=0.02)
in_lat = Input(shape=(latent_dim,))
gen = Dense(256, kernel_initializer=init)(in_lat)
gen = LeakyReLU(alpha=0.2)(gen)
gen = Dense(512, kernel_initializer=init)(gen)
gen = LeakyReLU(alpha=0.2)(gen)
gen = Dense(1024, kernel_initializer=init)(gen)
gen = LeakyReLU(alpha=0.2)(gen)
gen = Dense(28 * 28 * 1, kernel_initializer=init)(gen)
out_layer = Activation('tanh')(gen)
out_layer = Reshape((28, 28, 1))(gen)
model = Model(in_lat, out_layer)
return model
generator = define_generator(100)
latent_dim = 100
train(generator, discriminator, gan_model, X_train, latent_dim, n_epochs=20, n_batch=64)
import math
model = load_model('model_18740.h5')
latent_dim = 100
n_examples = 100
latent_points = generate_latent_points(latent_dim, n_examples)
X = model.predict(latent_points)
X = (X + 1) / 2.0
# Calculate the number of rows needed for the plot
n_rows = math.ceil(n_examples / 10)
save_plot(X, n_examples, )
Practical 7 Implement Convolution Neural Network Application Using Tensorflow and Keras,
import numpy as np
import pandas as pd
from numpy import unique, argmax
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
print(train_x.shape, train_y.shape)
print(test_x.shape , test_y.shape)
print(test_x.shape , test_y.shape)
train_x = train_x.astype('float32')/255.0
test_x = test_x.astype('float32')/255.0
for i in range(20):
ax.imshow(np.squeeze(train_x[i]), cmap='gray')
ax.set_title(train_y[i])
shape = train_x.shape[1:]
shape
#CNN Model
model = Sequential()
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
#compiling model
print(f'Accuracy: {accuracy*100}')
Practical 8 Implement Object Detection Using Transfer Learning of CNN Architectures
# image processing
import matplotlib.image as mpimg
# utility functions
from tensorflow.keras.utils import to_categorical
# sequential model
from tensorflow.keras.models import Sequential
# layers
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
train = pd.read_csv("/content/test.csv")
test = pd.read_csv("/content/test.csv")
train.head()
test.head()
print(train.isna().sum().sum())
print(test.isna().sum().sum())
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(8, 5))
sns.countplot(train, palette='Dark2')
plt.title('Train labels count')
plt.show()
Practical 9 Implement Hyper Parameter Tuning and Regularization Practice -
• Minibatch Gradient Descent
import torch
import numpy as np
import matplotlib.pyplot as plt
...
# Plot and visualizing the data points in blue
plt.plot(X.numpy(), Y.numpy(), 'b+', label='Y')
plt.plot(X.numpy(), func.numpy(), 'r', label='func')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.grid('True', color='y')
plt.show()
...
# defining the function for forward pass for prediction
def forward(x):
return w * x + b
step_size = 0.1
loss_SGD = []
n_iter = 20
step_size = 0.1
loss_SGD = []
n_iter = 20
...
train_loader_10 = DataLoader(dataset=dataset, batch_size=10)
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
step_size = 0.1
loss_MBGD_10 = []
iter = 20
...
train_loader_20 = DataLoader(dataset=dataset, batch_size=20)
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
step_size = 0.1
loss_MBGD_20 = []
iter = 20
for i in range(iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_MBGD_20.append(criterion(y_pred, Y).tolist())
for x, y in train_loader_20:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updating the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
step_size = 0.1
loss_MBGD_10 = []
iter = 20
for i in range(n_iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_MBGD_10.append(criterion(y_pred, Y).tolist())
for x, y in train_loader_10:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updateing the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()
# Reset w and b
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
loss_MBGD_20 = []
for i in range(n_iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_MBGD_20.append(criterion(y_pred, Y).tolist())
for x, y in train_loader_20:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updating the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
step_size = 0.1
loss_SGD = []
n_iter = 20
for i in range(n_iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_SGD.append(criterion(y_pred, Y).tolist())
for x, y in train_loader:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updating the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()
# Reset w and b
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
loss_MBGD_10 = []
for i in range(n_iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_MBGD_10.append(criterion(y_pred, Y).tolist())
for x, y in train_loader_10:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updating the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()
train_loader_20 = DataLoader(dataset=dataset, batch_size=20)
# Reset w and b
w = torch.tensor(-10.0, requires_grad=True)
b = torch.tensor(-20.0, requires_grad=True)
loss_MBGD_20 = []
for i in range(n_iter):
# calculating loss as in the beginning of an epoch and storing it
y_pred = forward(X)
loss_MBGD_20.append(criterion(y_pred, Y).tolist())
for x, y in train_loader_20:
# making a prediction in forward pass
y_hat = forward(x)
# calculating the loss between original and predicted data points
loss = criterion(y_hat, y)
# backward pass for computing the gradients of the loss w.r.t to
learnable parameters
loss.backward()
# updating the parameters after each iteration
w.data = w.data - step_size * w.grad.data
b.data = b.data - step_size * b.grad.data
# zeroing gradients after each iteration
w.grad.data.zero_()
b.grad.data.zero_()