REPORT On Intern Work
REPORT On Intern Work
REPORT On Intern Work
on
Dr. Radhakant Padhi, Associate Professor, Aerospace Engineering, Indian Institute Of Science , Bangalore, India
Mr. Ashoka Vanjare, Project assistant, Indian institute of science, Bangalore
At
Submitted by
SUMAN MISHRA
Roll No : 113ID0331
Bachelor of Technology
in
Department of Industrial Design
National Institute Of Technology Rourkela
Odisha, India
18 May’15 - 17 July’15
Preface
Acknowledgements
Abstract
INTRODUCTION
TEST FUNCTIONS
1. Sphere function – It
f (x) i 0 xi2
n
80
60
sphere function
40
20
0
50
50
0
0
Y -50 -50
X
2. Rosenbrock function:
1500
1000
500
0
1.5
1
2
0.5
1
0
-0.5 0
-1 -1
Y Range(0,1) -1.5 -2 X Range(-1.5,1.5)
3. Rastrigin function:
50
40
30
20
10
0
2
1 2
0 1
0
-1 -1
-2 -2
4. Griewank function:
n
xi
i 0 xi2 cos(
n
f (x) 1 1
4000 )
i 1 i
5
0
100
80
60 100
80
40
60
20 40
20
0 0
5. Schaffer function:
∈
sin 2 ( x 2 y 2 ) 0.5
f (x, y) 0.5
(0 0.001(x 2 y 2 )) 2
Schaffer function
0.8
0.6
0.4
0.2
0
2
1 2
0 1
0
-1 -1
x2 -2 -2
x1
6. Schwefel function:
schwefel function
4500
4400
4300
4200
4100
4000
3900
200
100 200
0 100
0
-100 -100
-200 -200
7. Ackley’s function:
clear,clc,close all
%% Initializing
hidden_neurons = 4;
epochs = 300;
N=3;
X1 = [1,2,3]; %input
X2 = [4,5,6]; %input
Y_train = h(X1);
Y1_train= h(X2);
% X = linspace(0,1,N);
% [X1,X2]=meshgrid(X);
% Z=(X1.^2+X2.^2-1);
% plot3(X1,X2,Z)
% train_inp=[X1(:);X2(:)];
% train_out=[Z(:);Z(:)];
% Y_train =sin(2*pi*X1).*sin(2*pi*X2); %satisfy output%f(X1)*f(X2);%
%% Learining
end
% -- another epoch finished
figure(1);
plot(err)
xlabel('no of iteration')
ylabel('MSE')
%stop if requested
if earlystop
fprintf('stopped at epoch: %d\n',iter);
break
end
%stop if error is small
if err(iter) < 0.001
fprintf('converged at epoch: %d\n',iter);
break
end
end
%% Testing
X3 = [0.2,0.5,0.8]';
X4 = [0.3,0.6,0.7]';
mu_test = mean(train_test);
sigma_test = std(train_test);
train_test = (train_test(:,:) - mu_test(:,1)) / sigma_test(:,1);
train_test = [train_test bias];
pred = weight_hidden_output*tanh(train_test*weight_input_hidden)';
%% Finish
fprintf('state after %d epochs\n',iter);
a = (train_out* sigma_out(:,1)) + mu_out(:,1);
b = (pred'* sigma_out(:,1)) + mu_out(:,1);
act_pred_err = [a b b-a] %display actual,predicted & error
figure,plot3(X3,X4,act_pred_err((1:(size(train_inp,1)/2)),2),'color','red'
,'linewidth',2)
grid on,title(' Approximatly result (using Neural Networks');
figure,plot3(X1,X2,Y_train, 'color','green','linewidth',2)
grid on,title(' Y = Orginal result');
Minimum error = 0.0370.
Observation:
Taxonomy:
Inspiration:
Strategy:
Procedure
Heuristics:
σ
%clc
clear all;
close all;
epochs=300;
n1 = [1,2,3];
x1 = f(n1);
n2 = [4,5,6];
x2 = f(n2);
xn_train = n1;
dn_train = x1;
xn_test = n2;
dn_test = x2;
%---------------------------------------------------
switch 1
case 1
P = xn_train;
T = dn_train;
spread = 50;
net = newrbe(P,T,spread);
case 2
P = xn_train;
T = dn_train;
goal = 1e-12;
spread = 50;
MN = size(xn_train,2);
DF = 1;
net = newrb(P,T,goal,spread,MN,DF);
case 3
P = xn_train;
T = dn_train;
spread = 0.5;
net = newgrnn(P,T,spread);
end
for iter =1:epochs
err1 = sum((dn_train-sim(net,xn_train)).^2);
X = sim(net,xn_test);
err2(iter) = (sum((dn_test-X).^2))^0.5 ;
figure(1);
plot(err2)
xlabel('no of iteration')
ylabel('MSE')
end
%---------------------------------------------------
figure,plot(1:length(n2),x2,'r+:',1:length(n2),X,'bo:')
title('figure');
xlabel('testing output')
ylabel('processed output')
Error1(difference b/w predicted and training data)=1.2037*exp(-24)
Error2(difference b/w predicted and testing data) =42.3675
Inspiration:
Strategy:
Procedure:
Heuristics:
1.
2.
3.
4.
Code Listing:
clear all
clc
stu_num= 3; % the number of students
subj_num=3; % the number of design variables or no of subjects which
are taken by students
Ngen=500; % the number of generations
up=ones(1,subj_num)*100; % upper boundary to the variables
low=ones(1,subj_num)*(-100); % lower boundary to the variables
Nrun=50;
fitness=zeros(stu_num,1);
xnew=zeros(stu_num,subj_num);
fxnew=zeros(stu_num,1);
gbest_fitness=zeros(1,Ngen);
global_value=zeros(Nrun,Ngen);
t0=cputime;
% aa=0;
for run=1:Nrun
%----------------------------------------------------------
range=repmat((up-low),stu_num,1); %10*2
lower=repmat(low,stu_num,1); % 10*2
x=[1,2,3;4,5,6;7,8,9].*range+lower; %intial random solution
for i=1:stu_num
fitness(i)=f(x(i,:));
end
%------------------------------------------------------%
for gen=1:Ngen
% aa=aa+1
%%%%%%%%%%% teaching phase %%%%%%%%
xmean=mean(x); % mean of the total students
index=find(fitness==min(fitness)); % capturing the teacher
position using objective function values
bt=index;
teacher=x(bt,:);
difference=0.5*(teacher(1,:)-round(1+0.5)*xmean(1,:));
%------------------------------------------------------------%
for i=1:stu_num
for j=1:subj_num %%% improving the initial solution in
teacher phase
xnew(i,j)=x(i,j)+difference(1,j);
if(xnew(i,j)>up(1,j)) % for values goes beyound upper
limit
xnew(i,j)=up(1,j);
elseif(xnew(i,j)<low(1,j)) % for values goes beyound lower
limit
xnew(i,j)=low(1,j);
end
end
end
%------------------------------------------------------------%
for i=1:stu_num
fxnew(i)=f(xnew(i,:));
end
for i=1:stu_num %% applying greedy selection process
if( fxnew(i)<fitness(i)) % if old value is better then keep it
as it is
fitness(i)=fxnew(i);
x(i,:)=xnew(i,:);
end
end
end
disp ('num of iterations')
disp (Ngen)
mean_bestvalue=mean(global_value) ; % Mean global min
disp('fitness')
disp (mean_bestvalue(Ngen))
disp('bestparticle')
disp(x(c1,:))
gen=1:Ngen;
plot(gen,mean_bestvalue(gen))
xlabel('Generations')
ylabel('Mean of global minimum')
t=cputime-t0;
disp('total time of running')
disp(t)
.
Mean of Best fitness =14.1858
Mean of best fitness values = 0.0013
GENETIC ALGORITHM
Taxonomy:
Inspiration:
Strategy:
Procedure:
Heuristics:
Code-Listing:
clc;
close all;
clear all;
x1=zeros(1,4);
%------------------------- Generating binary code -----------------------%
for i=1:1:4
x1(i)=x(i);
for j=1:1:4
X(i,5-j)=rem(x1(i),2);
x1(i)=floor(x1(i)/2);
end
end
x2=x;
x2=x2';
for l=1:1:12
%--------------------- Generating Non Linear Function -------------------%
y=h(x); h(x)-> function define
p=max(y);
Y=sort(y,'descend'); %-------- sorting in descending order
for j=1:1:4
if (y(j)==Y(i))
Z(i,:)=X(j,:);
end
end
end
n=[1 2];
m=ceil(0.5*4)-1;
for i=1:2:2
CrossoverZ(n(i),1:4)=[Z(n(i),1:m) Z(n(i+1),m+1:4)];
CrossoverZ(n(i+1),1:4)=[Z(n(i+1),1:m) Z(n(i),m+1:4)];
end
Crossover_Child=CrossoverZ; %crossover
%--------------------------- Starting Mutation --------------------------%
o=ceil(0.5*4);
for i=1:1:2
if (Crossover_Child(i,o)==0)
Mutation_Child(i,o)=1;
else if (Crossover_Child(i,o)==1)
Mutation_Child(i,o)=0;
end
end
end
for j=1:1:4
Final_data_int(i)= Final_data_int(i)+power(2,4-j).*Final_data(i,j);
end
end
Final_data_int1=k(Final_data_int);
Output=Final_data_int1;
Output1=sort(Output,'descend');
for j=1:1:6
if (Output1(i)==Output(j))
Final_Output(i,:)=Final_data(j,:);
end
end
end
Final_Output=Final_Output(1:4,:);
x2=Output1(1:1,:);
maximum(l)=Output1(1);
end
for i = 1: 2 * popSize
cnum = [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16];
winsingle = 0;
for j = 1: c
if (fit (pop (i)) <fit (pop (cnum (j)))) ;% of each individual
and the individuals randomized comparison
winsingle = winsingle + 1;% record number of victories
end
end
winnum (i) = winsingle;
end
index = 1;
for i = 1: 2 * popSize% will pop sorted by number of wins, Bubble Act
for j = (i + 1): 2 * popSize
if (winnum (i) <winnum (j)) % The number of victories in
descending order
uusee = winnum (i);
winnum (i) = winnum (j);
winnum (j) = uusee;
index = j; % index is the most pop index number
corresponding to the i-th win
end
end
pptv = pop (i);
pop (i) = pop (index);% will pop the new row by winning number
will win more times before the 40 individuals into the parent
pop (index) = pptv;
end
for i = 1: 2 * popSize
f (i) = fit (pop (i));% compute a new generation of parent
individuals fitness value
end
if((pop(i)<normKnowledge(i,1))||(fit(pop(i))<fit(normKnowledge(i,1))))
if(0.6<=acc)% probability acc merit accepted norms of knowledge
normKnowledge(i,1)=pop(i);
end
end
if((pop(i)>normKnowledge(i,2))||(fit(pop(i))<fit(normKnowledge(i,2))))
if(0.5<=acc)% probability acc merit accepted norms accepted knowledge
normKnowledge(i,2)=pop(i);
end
end
end
pop;
disp(trend)
disp(Fbest)
g=1:6;
figure(2);
plot(g,trend,'k');
title ('cultural algorithm ');
xlabel('no of generation')
ylabel('best fitness')
MEMETIC ALGORITHM
Procedure Memetic Algorithm
Initialize: Generate an initial population;
while Stopping conditions are not satisfied do
Evaluate all individuals in the population.
Evolve a new population using stochastic search operators.
Select the subset of individuals, , that should undergo the individual
improvement procedure.
for each individual in do
Perform individual learning using meme(s) with frequency or probability of
, for a period of .
Proceed with Lamarckian or Baldwinian learning.
end for
end while
% Chromosome number
P_mutate=0.1;
% Mutation probability
P_cross=0.95;
% Crossover rate
D=8;
%Dimension function
NCmax=6;
%Iterations
Xmax=100; %% depends on function %%
Xmin=-100;
for i=1:M
for j=1:D
X(i,j)=0.8*(Xmax-Xmin)+Xmin;
%Population initialization
end
end
for i=1:M
fitness(i)=Test_Function(X(i,:));
%Calculation fitness
end
for NC=1:NCmax
%% Hybrid
randrow=[2 1 3 4];
s=1;
for i=1:M/2
if 0.4<=P_cross
w1=0.4;w2=0.3;
X(M+s,:)=w1*X(randrow(i),:)+(1-w1)*X(randrow(M-i),:);
X(M+s+1,:)=w2*X(randrow(M-i),:)+(1-w2)*X(randrow(i),:);
s=s+2;
end
end
%% Variation
s=1;
new_M=size(X,1);
for i=1:new_M
if 0.03<=P_mutate
if 0.4<0.5
X(new_M+s,:)=X(i,:)+(Xmax-X(i,:))*(rand*(1-NC/NCmax))^2;
else
X(new_M+s,:)=X(i,1)+(Xmax-X(i,1))*(rand*(1-NC/NCmax))^2;
end
s=s+1;
end
end
%% Selection
new_fitness(1:M)=fitness;
for i=M+1:length(X)
new_fitness(i)=Test_Function(X(i,:));
end
[iteration_fitness(NC),flag]=min(new_fitness);
nextX(1,:)=X(flag,:);
nextfitness(1)=new_fitness(flag);
P_select=(1./new_fitness)/sum((1./new_fitness));
%Computing selection probabilities
cum_P_select=cumsum(P_select);
for i=2:M
pos=find(cum_P_select>=0.3);
nextX(i,:)=X(pos(1),:);
nextfitness(i)=new_fitness(i);
end
X=[];fitness=[];
X=nextX;
fitness=nextfitness;
nextX=[];P_select=[];sum_P_select=[];nextfitness=[];
new_fitness=[];
for i=1:M
[X(i,:),fitness(i)]=fminsearch('Test_Function',X(i,:));
%Use matlab own local search functions
end
end
toc;
plot(iteration_fitness);
title('MEMETIC ALGORITHM')
xlabel('no of iterations')
ylabel('best fitness')
disp('best fitness')
[Best_fitness,flag]=min(fitness);
disp(Best_fitness);
disp('best solution')
disp(Best_fitness);
Best_solution=X(flag,:);
In Sphere function:
In Rastrigin function:
Cultural algo (best fitness =10.4431) Memetic algo. (best fitness =71.6368)
In Rosenbrock function:
Cultural algo. (best fitness =0 ) Memetic algo. (best fitness =3.9859)
In Griewank function:
Cultural algo.( Best fitness = 9.6985) Memetic algo.( Best fitness = 0.0197)
In Schaffer function:
Cultural algo. (best fitness =0) Memetic algo. (best fitness = 0.1384)
In Schwefel function:
Cultural algo.( Best fitness = 415.1478) Memetic algo.( Best fitness = 3.3203e+3)
In Ackley function:
Cultural algo.(best fitness =-2.7882e+04)