Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                

Aiml Lab Manual

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 39

CS3491-ARTIFICIAL INTELLIGENCE AND MACHINE LEARNING

LABORATORY

II CSE/IV
LIST OF EXPERIMENTS

1. Implementation of Uninformed search algorithms (BFS, DFS)

2. Implementation of Informed search algorithms (A*, memory-bounded A*)

3. Implement naïve Bayes models

4. Implement Bayesian Networks

5. Build Regression models

6. Build decision trees and random forests

7. Build SVM models

8. Implement ensembling techniques

9. Implement clustering algorithms

10. Implement EM for Bayesian networks

11. Build simple NN models

12. Build deep learning NN model

1
EX.NO: 1 IMPLEMENTATION OF UNINFORMED SEARCH ALGORITHMS(BFS,
DFS)
DATE:

AIM:
To implement uninformed search algorithms of BFS(Breadth-First Search) and DFS(Depth-First
Search) using Python.

a) Breadth-First Search(BFS)
PROGRAM
graph = {'5' : ['3','7'],'3' : ['2', '4'],'7' : ['8'],'2' : [],'4' : ['8'],'8' : []}
visited = [] # List for visited nodes.
queue = [] #Initialize a queue
def bfs(visited, graph, node): #function for BFS
visited.append(node)
queue.append(node)
while queue: # Creating loop to visit each node
m = queue.pop(0)
print (m, end = " ")
for neighbour in graph[m]:
if neighbour not in visited:
visited.append(neighbour)
queue.append(neighbour)
# Driver Code
print("The Breadth-First Search of the graph is:")
bfs(visited, graph, '5') # function calling

2
3
b) Depth-First Search(DFS)
PROGRAM
# Using a Python dictionary to act as an adjacency list
graph = { '5' : ['3','7'], '3' : ['2', '4'], '7' : ['8'], '2' : [], '4' : ['8'], '8' : [] }
visited = set() # Set to keep track of visited nodes of the graph.
def dfs(visited, graph, node): #function for dfs
if node not in visited:
print (node)
visited.add(node)
for neighbour in graph[node]:
dfs(visited, graph, neighbour)
# Driver Code
print("The Depth-First Search of the graph is:")
dfs(visited, graph, '5') # function calling

OUTPUT:

4
RESULT:
Thus the implementation of uninformed search algorithms of BFS and DFS is written and
executed successfully.

5
EX.NO:2 IMPLEMENTATION OF INFORMED SEARCH ALGORITHMS(A*,
MEMORY-BOUNDED A*)
DATE:

AIM:
To implement informed search algorithms of A* and memory-bounded A* using python.

a) A*
PROGRAM
print("A* Implementation in Python:\n")
def f(g, h, n):
return g[n] + h[n]
#remove front, add to visited
def update(to_remove, to_add, m):
to_remove.remove(m)
to_add.append(m)
def a_star_algo(cost, heuristic, start, goals):
path = [] #optimal path
pathSet = []
## closed list
closed_list = [] # ex: S, A, ...
## open list
open_list = [start]
path_len = {}
path_len[start] = 0
#for back-tracking:
parent_node = {}
parent_node[start]=start
while len(open_list) > 0:
#get node with least f
node = None
6
for n in open_list:
if node == None or f(path_len, heuristic, n) < f(path_len, heuristic, node):
node = n
if node == None: #path does not exist
break
if node in goals: #[6, 7, 10]
f_n = f(path_len, heuristic, node)
reconstruct = []
aux = node
while parent_node[aux] != aux: # [(S, 9, S), (A, 6, S)]
reconstruct.append(aux) #[ A, S]
aux = parent_node[aux]
reconstruct.append(start)
reconstruct.reverse()
pathSet.append((reconstruct, f_n))
update(open_list, closed_list, node)
continue
#explore the current node
path_cost = cost[node] #[0, 0, 5, 9, -1, 6, -1, -1, -1, -1, -1]
for adj_node in range(0, len(path_cost)):
weight = path_cost[adj_node]
if weight > 0:
if adj_node not in open_list and adj_node not in closed_list:
open_list.append(adj_node)
parent_node[adj_node] = node
path_len[adj_node] = path_len[node] + weight
else:
if path_len[adj_node] > path_len[node] + weight:
path_len[adj_node] = path_len[node] + weight
parent_node[adj_node] = node
if adj_node in closed_list:
7
update(closed_list, open_list, adj_node)
update(open_list, closed_list, node)
if len(pathSet) > 0:
pathSet = sorted(pathSet, key=lambda x: x[1]) #[([1,5,7], 8), ([1,2,3], 10)]
path = pathSet[0][0]
return path
#driver code
#Input
give_cost = [[0,1,2.1],[1,0,1],[3.1,1,0]]
start=0
give_goals=[2,3]
heuristic = [1,2.1,0]
getPath = a_star_algo(give_cost, heuristic, start, give_goals)
print(getPath)

OUTPUT:

b) Memory-Bounded A*
PROGRAM
import copy
from heapq import heappush, heappop

# we have defined 3 x 3 board therefore n = 3..


n=3

# bottom, left, top, right


8
row = [ 1, 0, -1, 0 ]
col = [ 0, -1, 0, 1 ]

class priorityQueue:

def init (self):


self.heap = []

# Inserts a new key 'k'


def push(self, k):
heappush(self.heap, k)

# remove minimum element


def pop(self):
return heappop(self.heap)

# Check if queue is empty


def empty(self):
if not self.heap:
return True
else:
return False

class node:
def init (self, parent, mat, empty_tile_pos,
cost, level):

# parent node of current node


self.parent = parent

9
# matrix
self.mat = mat

# position of empty tile


self.empty_tile_pos = empty_tile_pos

# Total Misplaced
tiles self.cost = cost

# Number of moves so far


self.level = level

def lt (self, nxt):


return self.cost < nxt.cost

# Calculate number of non-blank tiles not in their goal position


def calculateCost(mat, final) -> int:

count = 0
for i in range(n):
for j in range(n):
if ((mat[i][j]) and (mat[i]
[j] != final[i][j])):
count += 1
return count

def newNode(mat, empty_tile_pos, new_empty_tile_pos,


level, parent, final) -> node:

new_mat = copy.deepcopy(mat)
10
x1 = empty_tile_pos[0]
y1 = empty_tile_pos[1]
x2 = new_empty_tile_pos[0]
y2 = new_empty_tile_pos[1]
new_mat[x1][y1], new_mat[x2][y2] = new_mat[x2][y2], new_mat[x1][y1]

# Set number of misplaced tiles


cost = calculateCost(new_mat, final)
new_node = node(parent, new_mat, new_empty_tile_pos,
cost, level)
return new_node

#print the N x N matrix


def printMatrix(mat):
for i in range(n):
for j in range(n):
print("%d " % (mat[i][j]), end = " ")
print()

def isSafe(x, y):


return x >= 0 and x < n and y >= 0 and y < n

def printPath(root):
if root == None:
return

printPath(root.parent)
printMatrix(root.mat)
print()

11
def solve(initial, empty_tile_pos, final):
pq = priorityQueue()

# Create the root node


cost = calculateCost(initial, final)
root = node(None, initial,
empty_tile_pos, cost, 0)

pq.push(root)

while not pq.empty():


minimum = pq.pop()

# If minimum is the answer node


if minimum.cost == 0:

# Print the path from root to destination;


printPath(minimum)
return

# Produce all possible children


for i in range(4):
new_tile_pos = [
minimum.empty_tile_pos[0] + row[i],
minimum.empty_tile_pos[1] + col[i], ]

if isSafe(new_tile_pos[0], new_tile_pos[1]):

# Create a child node


child = newNode(minimum.mat,
12
minimum.empty_tile_pos,
new_tile_pos,
minimum.level + 1,
minimum, final,)

# Add child to list of live nodes


pq.push(child)

# Driver Code
# 0 represents the blank space
# Initial state
initial = [ [ 2, 8, 3 ],
[ 1, 6, 4 ],
[ 7, 0, 5 ] ]

# Final State
final = [ [ 1, 2, 3 ],
[ 8, 0, 4 ],
[ 7, 6, 5 ] ]

# Blank tile position during start state


empty_tile_pos = [ 2, 1 ]

# Function call
solve(initial, empty_tile_pos, final)

OUTPUT:

13
RESULT:
Thus the implementation of A* and memory-bounded A* using python is written
and executed successfully.

14
EX.NO: 3 IMPLEMENT NAIVE BAYES
MODELS DATE:

AIM:
To implement a naïve Bayes model using Python.

PROGRAM:
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
gnb = GaussianNB()
y_pred = gnb.fit(X_train, y_train).predict(X_test)
print("Number of mislabeled points out of a total %d points : %d"
% (X_test.shape[0], (y_test != y_pred).sum()))

OUTPUT:

RESULT:
Thus the implementation of the naïve Bayes model using python is written and executed
successfully.

15
EX.NO: 4 IMPLEMENT BAYESIAN
NETWORKS DATE:

AIM:
To implement Bayesian Networks using Python.

PROGRAM:
import pandas as pd # for data manipulation
import networkx as nx # for drawing graphs
import matplotlib.pyplot as plt # for drawing graphs
# for creating Bayesian Belief Networks (BBN)
from pybbn.graph.dag import Bbn
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.jointree import EvidenceBuilder
from pybbn.graph.node import BbnNode
from pybbn.graph.variable import Variable
from pybbn.pptc.inferencecontroller import InferenceController

# Set Pandas options to display more columns


pd.options.display.max_columns=50

# Read in the weather data csv


df=pd.read_csv('weatherAUS.csv', encoding='utf-8')

# Drop records where target


RainTomorrow=NaN
df=df[pd.isnull(df['RainTomorrow'])==False]

# For other columns with missing values, fill them in with column mean
df=df.fillna(df.mean())

16
# Create bands for variables that we want to use in the model

17
df['WindGustSpeedCat']=df['WindGustSpeed'].apply(lambda x: '0.<=40' if x<=40 else
'1.40-50' if 40<x<=50 else '2.>50')
df['Humidity9amCat']=df['Humidity9am'].apply(lambda x: '1.>60' if x>60 else '0.<=60')
df['Humidity3pmCat']=df['Humidity3pm'].apply(lambda x: '1.>60' if x>60 else '0.<=60')

# Create nodes by manually typing in probabilities


H9am = BbnNode(Variable(0, 'H9am', ['<=60', '>60']), [0.30658, 0.69342])
H3pm = BbnNode(Variable(1, 'H3pm', ['<=60', '>60']), [0.92827, 0.07173, 0.55760, 0.44240])
W = BbnNode(Variable(2, 'W', ['<=40', '40-50', '>50']), [0.58660, 0.24040, 0.17300])
RT = BbnNode(Variable(3, 'RT', ['No', 'Yes']), [0.92314, 0.07686, 0.89072, 0.10928, 0.76008,
0.23992, 0.64250, 0.35750, 0.49168, 0.50832, 0.32182, 0.67818])

OUTPUT:
<ipython-input-26-4b316ae6c79a>:23: FutureWarning: Dropping of nuisance
columns in DataFrame reductions (with 'numeric_only=None') is deprecated;
in a future version this will raise TypeError. Select only valid columns
before calling the reduction.
df=df.fillna(df.mean())

RESULT:
Thus the implementation of the Bayesian Networks using python is written and executed
successfully.
18
EX.NO: 5 BUILD REGRESSION MODELS
DATE:

AIM:
To build Regression Models using Python.

PROGRAM:
import numpy as np
from sklearn.linear_model import LinearRegression
x = [[0, 1], [5, 1], [15, 2], [25, 5], [35, 11], [45, 15], [55, 34], [60, 35]]
y = [4, 5, 20, 14, 32, 22, 38, 43]
x, y = np.array(x), np.array(y)
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print(f"coefficient of determination: {r_sq}")
print(f"intercept: {model.intercept_}")
print(f"coefficients: {model.coef_}")
y_pred = model.predict(x)
print(f"predicted response:\n{y_pred}")

OUTPUT:

RESULT:
Thus the building of regression model using python is written and executed successfully.

19
EX.NO: 6 BUILD DECISION TREES AND RANDOM FORESTS
DATE:

AIM:
To build decision trees and random forests using Python.

PROGRAM:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from sklearn.datasets import make_blobs

X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=1.0)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50,
cmap='rainbow'); from sklearn.tree import
DecisionTreeClassifier
tree = DecisionTreeClassifier().fit(X, y)
def visualize_classifier(model, X, y, ax=None,
cmap='rainbow'): ax = ax or plt.gca()

# Plot the training points


ax.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()),
zorder=3) ax.axis('tight')
ax.axis('off')
xlim = ax.get_xlim()
ylim = ax.get_ylim()

20
# fit the estimator

21
model.fit(X, y)
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)

# Create a color plot with the results


n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap, clim=(y.min(), y.max()),
zorder=1)

ax.set(xlim=xlim, ylim=ylim)
visualize_classifier(DecisionTreeClassifier(), X,
y)

OUTPUT:

RESULT:
Thus the building of decision trees and random forests using python is written and
executed successfully.
22
EX.NO: 7 BUILD SVM MODELS
DATE:

AIM:
To build SVM models using Python.

PROGRAM:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state =
0) from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test,y_pred)
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test

23
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step
= 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()

OUTPUT:

RESULT:
Thus the building of SVM models using python is written and executed successfully.
24
EX.NO: 8 IMPLEMENT ENSEMBLING
TECHNIQUES DATE:

AIM:
To implement ensembling techniques using Python.

PROGRAM:
# importing utility modules
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error

# importing machine learning models for prediction


from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.linear_model import LinearRegression

# loading train data set in dataframe from train_data.csv file


df = pd.read_csv("train_data.csv")

# getting target data from the


dataframe target = df["target"]

# getting train data from the dataframe


train = df.drop("target")

# Splitting between train data into training and validation dataset


X_train, X_test, y_train, y_test = train_test_split(
train, target, test_size=0.20)

25
# initializing all the model objects with default parameters
model_1 = LinearRegression()
model_2 = xgb.XGBRegressor()
model_3 = RandomForestRegressor()

# training all the model on the training dataset


model_1.fit(X_train, y_target)
model_2.fit(X_train, y_target)
model_3.fit(X_train, y_target)

# predicting the output on the validation dataset


pred_1 = model_1.predict(X_test)
pred_2 = model_2.predict(X_test)
pred_3 = model_3.predict(X_test)

# final prediction after averaging on the prediction of all 3 models


pred_final = (pred_1+pred_2+pred_3)/3.0

# printing the mean squared error between real value and predicted value
print(mean_squared_error(y_test, pred_final))

OUTPUT:

RESULT:
Thus the implementation of ensembling techniques using python is written and executed
successfully.
26
EX.NO: 9 IMPLEMENT CLUSTERING ALGORITHMS
DATE:

AIM:
To implement clustering algorithms using Python.

PROGRAM:
# affinity propagation
clustering from numpy import
unique from numpy import
where
from sklearn.datasets import make_classification
from sklearn.cluster import AffinityPropagation
from matplotlib import pyplot
# define dataset
X, _ = make_classification(n_samples=1000, n_features=2, n_informative=2, n_redundant=0,
n_clusters_per_class=1, random_state=4)
# define the model
model = AffinityPropagation(damping=0.9)
# fit the model
model.fit(X)
# assign a cluster to each example
yhat = model.predict(X)
# retrieve unique clusters
clusters = unique(yhat)
# create scatter plot for samples from each cluster
for cluster in clusters:
# get row indexes for samples with this cluster
row_ix = where(yhat == cluster)
# create scatter of these samples
27
pyplot.scatter(X[row_ix, 0], X[row_ix, 1])

28
# show the plot
pyplot.show()

OUTPUT:

RESULT:
Thus the implementation of clustering algorithms using python is written and executed
successfully.

29
EX.NO: 10 IMPLEMENT EM FOR BAYESIAN
NETWORKS DATE:

AIM:
To implement EM for Bayesian Networks using Python.

PROGRAM:

import numpy as np

import pandas as pd

import csv

from pgmpy.estimators import

MaximumLikelihoodEstimator from pgmpy.models import

BayesianModel

from pgmpy.inference import VariableElimination

heartDisease = pd.read_csv('heart.csv')

heartDisease = heartDisease.replace('?',np.nan)

print('Sample instances from the dataset are given below')

print(heartDisease.head())

30
print('\n Attributes and datatypes')

31
print(heartDisease.dtypes)

model= BayesianModel([('age','heartdisease'),('sex','heartdisease'),('exang','heartdisease'),

('cp','heartdisease'),('heartdisease','restecg'),('heartdisease','chol')])

print('\nLearning CPD using Maximum likelihood estimators')

model.fit(heartDisease,estimator=MaximumLikelihoodEstimator)

print('\n Inferencing with Bayesian Network:')

HeartDiseasetest_infer =

VariableElimination(model)

print('\n 1. Probability of HeartDisease given evidence= restecg')

q1=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'restecg':1})

print(q1)

print('\n 2. Probability of HeartDisease given evidence= cp ')

q2=HeartDiseasetest_infer.query(variables=['heartdisease'],evidence={'cp':2})

print(q2)

OUTPUT:

32
RESULT:
Thus the implementation of EM for Bayesian Networks using python is written and
executed successfully.

33
EX.NO: 11 BUILD SIMPLE NN MODELS
DATE:

AIM:
To build simple NN models using Python.

PROGRAM:
# Import python libraries required in this example:
import numpy as np
from scipy.special import expit as
activation_function from scipy.stats import
truncnorm

# DEFINE THE NETWORK

# Generate random numbers within a truncated (bounded)


# normal distribution:
def truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)

# Create the ‘Nnetwork’ class and define its arguments:


# Set the number of neurons/nodes for each layer
# and initialize the weight matrices:
class Nnetwork:

def init (self,


no_of_in_nodes,
no_of_out_nodes,
no_of_hidden_nodes,
34
learning_rate):

35
self.no_of_in_nodes = no_of_in_nodes
self.no_of_out_nodes = no_of_out_nodes
self.no_of_hidden_nodes = no_of_hidden_nodes
self.learning_rate = learning_rate
self.create_weight_matrices()

def create_weight_matrices(self):
""" A method to initialize the weight matrices of the neural network"""
rad = 1 / np.sqrt(self.no_of_in_nodes)
X = truncated_normal(mean=0, sd=1, low=-rad, upp=rad)
self.weights_in_hidden = X.rvs((self.no_of_hidden_nodes,
self.no_of_in_nodes))
rad = 1 / np.sqrt(self.no_of_hidden_nodes)
X = truncated_normal(mean=0, sd=1, low=-rad, upp=rad)
self.weights_hidden_out = X.rvs((self.no_of_out_nodes,
self.no_of_hidden_nodes))

def train(self, input_vector, target_vector):


pass # More work is needed to train the network

def run(self, input_vector):


"""
running the network with an input vector 'input_vector'.
'input_vector' can be tuple, list or ndarray
"""
# Turn the input vector into a column vector:
input_vector = np.array(input_vector, ndmin=2).T
# activation_function() implements the expit function,
# which is an implementation of the sigmoid function:
input_hidden = activation_function(self.weights_in_hidden @ input_vector)
output_vector = activation_function(self.weights_hidden_out @ input_hidden)
36
return output_vector

# RUN THE NETWORK AND GET A RESULT

# Initialize an instance of the class:


simple_network = Nnetwork(no_of_in_nodes=2,
no_of_out_nodes=2,
no_of_hidden_nodes=4,
learning_rate=0.6)

# Run simple_network for arrays, lists and tuples with shape (2):
# and get a result:
simple_network.run([(3, 4)])

OUTPUT:

RESULT:
Thus the building of simple NN models using python is written and executed
successfully.

37
EX.NO: 12 BUILD DEEP LEARNING NN MODELS
DATE:

AIM:
To build deep learning NN models using Python.

PROGRAM:
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.]]
y = [0, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5, 2), random_state=1)

clf.fit(X, y)

OUTPUT:

RESULT:
Thus the building of deep learning NN models using python is written and executed
successfully.

38
39

You might also like