Autoencoder - MPL - Basic - Ipynb - Colaboratory PDF
Autoencoder - MPL - Basic - Ipynb - Colaboratory PDF
ipynb - Colaboratory
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/
Instructions for updating:
non-resource variables are not supported in the long term
import numpy as np
import math
import numpy as np
from os.path import join, exists, basename, split
import json
import shutil
import scipy.io
class HypImg:
self.labels = self.labels[:,np.newaxis]
else:
self.labels = None
self.labelsOnehot = None
self.numClasses = None
self.wavelengths = wavelengths
self.bands = bands
class Iterator:
self.dataSamples = dataSamples
self.targets = targets
self.numSamples = np.shape(dataSamples)[0]
if batchSize is not None:
self.batchSize = batchSize
else:
self.batchSize = self.numSamples
self.currentBatch = np.arange(self.batchSize)
def next_batch(self):
batchData = self.dataSamples[self.currentBatch, :]
batchTargets = self.targets[self.currentBatch, :]
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 2/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
batchData = self.dataSamples[idx, :]
batchTargets = self.targets[idx, :]
def reset_batch(self):
""" Resets the current batch to the beginning.
"""
self.currentBatch = np.arange(self.batchSize)
def shuffle(self):
""" Randomly permutes all dataSamples (and corresponding targets).
"""
idx = np.random.permutation(np.shape(self.dataSamples)[0])
self.dataSamples = self.dataSamples[idx,:]
self.targets = self.targets[idx,:]
def create_variable(shape,method='gaussian',wd=False):
return tf.matmul(input, W) + b
return tf.nn.conv1d(input,W,stride=stride,padding=padding) + b
if (padding!='SAME')&(padding!='VALID'):
raise ValueError('unknown padding type: %s. Use SAME or VALID' % padding)
if stride < 1:
raise ValueError('stride must be greater than 0. Stride = %d found in deconv la
return tf.nn.conv1d_transpose(input,W,outputShape,strides=stride,padding=padding) +
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 3/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
if func == 'relu':
a = tf.nn.relu(input)
elif func == 'sigmoid':
a = tf.nn.sigmoid(input)
elif func == 'linear':
a = input
else:
raise ValueError('unknown activation function: %s. Use relu, sigmoid or linear
return a
if padding=='VALID':
outputShape = np.ceil( (inputShape - (filterSize-1))/stride )
elif padding=='SAME':
outputShape = np.ceil(inputShape / stride)
else:
raise ValueError('unknown padding type: %s. Use SAME or VALID' % padding)
return int(outputShape)
if method == 'Adam':
optimizer = tf.train.AdamOptimizer(lr)
elif method == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(lr)
else:
raise ValueError('unknown optimisation method: %s. Use Adam or SGD.' % method)
return train_op
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 4/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
def loss_function_reconstruction_1D(y_reconstructed,y_target,func='SSE'):
if func == 'SSE':
# sum of squared errors loss
loss = tf.reduce_sum( tf.square(y_target - y_reconstructed) )
return loss
if class_weights==None:
class_weights = tf.constant(1,shape=[num_classes],dtype=tf.dtypes.float32)
return loss
def loss_weight_decay(wdLambda):
def balance_classes(y_target,num_classes):
return class_weights
def save_model(addr,sess,saver,current_epoch,epochs_to_save):
if current_epoch in epochs_to_save:
saver.save(sess, join(addr,"epoch_%i"%(current_epoch),"model.ckpt"))
def load_model(addr,sess):
"""Loads a model from the address of a checkpoint.
Args:
addr (str): Address of a directory to save checkpoint for current epoch.
sess (obj): Tensor flow session object.
"""
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, join(addr, 'model.ckpt'))
def save_config(net_obj,addr):
"""Saves a network config file. Saves the variables listed in net_config within the
Args:
net_obj (obj): Network object.
addr (obj): Directory of where to store the config.json file.
"""
data = {}
for config_parameter in net_obj.net_config:
data[config_parameter] = getattr(net_obj,config_parameter)
def load_config(net_obj,addr):
"""Loads a network config file. Loads from variables in the config.json file and ov
object. Applies to variables in the net_config list in the network object.
Args:
net_obj (obj): Network object.
addr (obj): Directory location of config.json file.
"""
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 6/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
if np.shape(dataTrain.dataSamples)[1] != net_obj.inputSize:
raise Exception('the data dimensionality must match the network input size. '
'Data size: %d, network input size: %d'%(np.shape(dataTrain.dat
batchSize = dataTrain.batchSize
numSamples = dataTrain.numSamples
# training loss
if visualiseRateTrain > 0:
if epoch_i % visualiseRateTrain == 0:
train_error.append( net_obj.train_ops['%s_loss' % (train_op_nam
{net_obj.x: train_batch_x, net_obj.y_target: train_batch_y
if batch_i == numIters - 1:
dataTrain.reset_batch()
val_error = []
for batch_i in range(dataVal.numSamples // dataVal.batchSize):
val_batch_x, val_batch_y = dataVal.next_batch()
val_error = np.array(val_error)
print("epoch: %d, validation loss: %g" % (epoch_i, np.mean(val_erro
save_model(save_addr,sess,saver,epoch_i,save_epochs)
if opts == 'gaussian':
weights = tf.random_normal(shape, stddev=stddev, dtype=dtype)
elif opts == 'truncated_normal':
weights = tf.truncated_normal(shape, stddev=stddev)
elif opts == 'xavier':
h = shape[0]
w = shape[1]
try:
num_in = shape[2]
except:
num_in = 1
sc = math.sqrt(3.0 / (h * w * num_in))
weights = tf.multiply(tf.random_normal(shape, dtype=dtype) * 2 - 1, sc)
elif opts == 'xavier_improved':
h = shape[0]
w = shape[1]
try:
num_out = shape[3]
except:
num_out = 1
sc = math.sqrt(2.0 / (h * w * num_out))
weights = tf.multiply(tf.random_normal(shape, dtype=dtype), sc)
elif opts == 'constant':
weights = tf.constant(const, shape)
else:
raise ValueError('Unknown weight initialization method %s' % opts)
return weights
class mlp_1D_network():
self.inputSize = inputSize
self.activationFunc = activationFunc
self.tiedWeights = tiedWeights
self.skipConnect = skipConnect
self.weightInitOpt = weightInitOpt
self.weightStd = weightStd
self.encodersize = encoderSize
self.activationFuncFinal = activationFuncFinal
self.net_config = ['inputSize','encodersize','activationFunc','tiedWeights','we
'skipConnect','activationFuncFinal']
# loading config file overwrites input arguments
if configFile is not None:
load_config(self,configFile)
if self.inputSize is None:
raise Exception('value must be given for inputSize (not None)')
self.weights = { }
self.biases = { }
self.h = {}
self.a = {}
self.train_ops = {}
self.modelsAddrs = {}
if self.tiedWeights is None:
self.tiedWeights = [0]*(len(self.encoderSize)-1)
# decoder weights
for layerNum in range( len( self.decoderSize ) - 1 ):
if self.tiedWeights[layerNum] == 0:
self.weights['decoder_w%i' % (len( self.encoderSize ) + layerNum )] =
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 9/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
g [ _ ( ( ) y )]
create_variable([self.decoderSize[layerNum], self.decoderSize[layerN
self.weightInitOpt, wd=True)
elif self.tiedWeights[layerNum] == 1:
self.weights['decoder_w%i' % (len(self.encoderSize) + layerNum)] = \
tf.transpose( self.weights['encoder_w%i'%(len(self.encoderSize)-1-
else:
raise ValueError('unknown tiedWeights value: %i. '
'Must be 0 or 1 for each layer (or None).' % tiedWeig
# encoder biases
for layerNum in range( len( self.encoderSize ) - 1 ):
self.biases['encoder_b%i'%(layerNum+1)] = \
create_variable([self.encoderSize[layerNum+1]] , self.weightInitOpt, wd=
# decoder biases
for layerNum in range( len( self.decoderSize ) - 1 ):
self.biases['decoder_b%i' % (len( self.encoderSize ) + layerNum )] = \
create_variable([self.decoderSize[layerNum + 1]], self.weightInitOpt, w
# build encoder
self.a['a0'] = self.x
for layerNum in range( 1 , len( self.encoderSize ) ):
self.h['h%d' % (layerNum)] = \
layer_fullyConn(self.a['a%d'%(layerNum-1)], self.weights['encoder_w%d'%
self.biases['encoder_b%d'%(layerNum)])
self.a['a%d' % (layerNum)] = layer_activation(self.h['h%d' % (layerNum)],
# latent representation
self.z = self.a['a%d' % (layerNum)]
# build decoder
for layerNum in range( 1 , len( self.decoderSize ) ):
absLayerNum = len(self.encoderSize) + layerNum - 1
self.h['h%d' % (absLayerNum)] = \
layer_fullyConn(self.a['a%d'%(absLayerNum-1)], self.weights['decoder_w%
self.biases['decoder_b%d'%(absLayerNum)])
if layerNum < len( self.decoderSize )-1:
if self.skipConnect:
self.h['h%d' % (absLayerNum)] += self.h['h%d' % (len(self.decoderS
self.a['a%d' % (absLayerNum)] = \
layer_activation(self.h['h%d' % (absLayerNum)], self.activationFun
else:
if self.skipConnect:
self.h['h%d' % (absLayerNum)] += self.a['a0']
self.a['a%d' % (absLayerNum)] = \
layer_activation(self.h['h%d' % (absLayerNum)], self.activationFun
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 10/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
# construct loss op
self.train_ops['%s_loss'%name] = loss_function_reconstruction_1D(self.y_recon,
# construct training op
self.train_ops['%s_train'%name] = \
train_step(self.train_ops['%s_loss'%name]+wdLoss, learning_rate, decay_ste
piecewise_bounds, piecewise_values,method)
def add_model(self,addr,modelName):
self.modelsAddrs[modelName] = addr
return dataZ
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 11/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
# get reconstruction
dataY_recon = sess.run(self.y_recon, feed_dict={self.z: dataZ})
return dataY_recon
# get reconstruction
dataY_recon = sess.run(self.y_recon, feed_dict={self.x: dataSamples})
return dataY_recon
class cnn_1D_network():
self.inputSize = inputSize
self.tiedWeights = tiedWeights
self.skipConnect = skipConnect
self.weightInitOpt = weightInitOpt
self.weightStd = weightStd
self.zDim = zDim
self.padding = padding
self.activationFunc = activationFunc
self.encoderStride = encoderStride
self.encoderNumfilters = encoderNumFilters
self.encoderFiltersize = encoderFilterSize
self.activationFuncFinal = activationFuncFinal
self.net_config = ['inputSize','zDim','encoderNumfilters','encoderFiltersize',
'weightInitOpt','weightStd','skipConnect','padding','encode
# loading config file overwrites input arguments
if configFile is not None:
load_config(self,configFile)
if self.inputSize is None:
raise Exception('value must be given for inputSize (not None)')
self.weights = { }
self.biases = { }
self.h = {}
self.a = {}
self.train_ops = {}
self.modelsAddrs = {}
if self.tiedWeights is None:
self.tiedWeights = [0]*(len(self.encoderNumFilters)-1)
#--
# encoder weights
for layerNum in range( len( self.encoderNumFilters ) - 1 ):
self.weights['encoder_w%i'%(layerNum+1)] = \
create_variable([self.encoderFilterSize[layerNum], self.encoderNumFilte
self.encoderNumFilters[layerNum+1]],weightInit
self.weights['encoder_w%i' % (layerNum + 2)] = create_variable(
[self.encoderDataShape[layerNum+1], self.zDim],self.weightInitOpt, wd=True
# decoder weights
self.weights['decoder_w%i' % (layerNum + 3)] = create_variable(
[self.zDim,self.decoderDataShape[1]], self.weightInitOpt, wd=True)
for layerNum in range( len( self.decoderNumFilters ) - 1 ):
if self.tiedWeights[layerNum] == 0:
self.weights['decoder_w%i' % (len( self.encoderDataShape ) + layerNum +
create_variable([self.decoderFilterSize[layerNum], self.decoderNumF
self.decoderNumFilters[layerNum]], self.we
elif self.tiedWeights[layerNum] == 1:
self.weights['decoder_w%i' % (len( self.encoderNumFilters )+layerNum+2
self.weights['encoder_w%i' % (len(self.encoderNumFilters)-1 - laye
else:
raise ValueError('unknown tiedWeights value: %i. '
'Must be 0 or 1 for each layer (or None).' % tiedWeig
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 13/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
# encoder biases
for layerNum in range( len( self.encoderNumFilters ) - 1 ):
self.biases['encoder_b%i'%(layerNum+1)] = \
create_variable([self.encoderNumFilters[layerNum+1]] , self.weightInitO
self.biases['encoder_b%i'%(layerNum+2)] = create_variable([self.zDim] , self.we
# decoder biases
self.biases['decoder_b%i' % (layerNum + 3)] = \
create_variable([self.decoderDataShape[1]], self.weightInitOpt, wd=True)
for layerNum in range( len( self.decoderNumFilters ) - 1 ):
self.biases['decoder_b%i' % (len( self.encoderDataShape ) + layerNum + 1)]
create_variable([self.decoderNumFilters[layerNum+1]], self.weightInitOpt
# build encoder
self.a['a0'] = tf.expand_dims(self.x,axis=2) # expand to shape None x inputS
for layerNum in range( 1 , len( self.encoderNumFilters ) ):
self.h['h%d' % (layerNum)] = \
layer_conv1d(self.a['a%d'%(layerNum-1)], self.weights['encoder_w%d'%(la
self.biases['encoder_b%d'%(layerNum)],padding=self
self.a['a%d' % (layerNum)] = layer_activation(self.h['h%d' % (layerNum)],
self.a['a%d'%(layerNum)] = tf.reshape( self.a['a%d'%(layerNum)], [-1,self.encod
self.h['h%d' % (layerNum+1)] = \
layer_fullyConn(
self.a['a%d'%(layerNum)],self.weights['encoder_w%d'%(layerNum+1)],self
self.a['a%d' % (layerNum+1)] = layer_activation(self.h['h%d' % (layerNum+1)],
# latent representation
self.z = self.a['a%d' % (layerNum+1)] # collapse a dim
# build decoder
self.h['h%d' % (layerNum+2)] = \
layer_fullyConn(self.a['a%d' % (layerNum+1)],
self.weights['decoder_w%d' % (layerNum+2)],self.bia
if skipConnect:
self.h['h%d' % (layerNum+2)] += tf.reshape(
self.h['h%d' % (len( self.decoderNumFilters ) - 1)] , [-1,self.encoderD
self.a['a%d' % (layerNum+2)] = layer_activation(self.h['h%d' % (layerNum+2)],
self.a['a%d' % (layerNum + 2)] = tf.reshape(
self.a['a%d' % (layerNum + 2)], [-1,int(self.decoderDataShape[1]/self.encod
for layerNum in range( 1 , len( self.decoderNumFilters ) ):
absLayerNum = len( self.encoderDataShape ) + layerNum
outputShape = [tf.shape(self.a['a%d' % (absLayerNum-1)] )[0],
self.decoderDataShape[layerNum+1],self.decoderNumFilters[lay
self.h['h%d' % (absLayerNum)] = \
layer_deconv1d(self.a['a%d'%(absLayerNum-1)], self.weights['decoder_w%d
self.biases['decoder_b%d'%(absLayerNum)],
outputShape, padding=self.padding, stride=self.d
if layerNum < len( self.decoderNumFilters )-1:
if self.skipConnect:
self.h['h%d' % (absLayerNum)] += self.h['h%d' % (len( self.decoderN
self a['a%d' % (absLayerNum)] layer activation(self h['h%d' % (absLay
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 14/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
self.a[ a%d % (absLayerNum)] = layer_activation(self.h[ h%d % (absLay
else:
if self.skipConnect:
self.h['h%d' % (absLayerNum)] += self.a['a0']
self.a['a%d' % (absLayerNum)] = layer_activation(self.h['h%d' % (absLay
# construct loss op
self.train_ops['%s_loss'%name] = loss_function_reconstruction_1D(self.y_recon,
# construct training op
self.train_ops['%s_train'%name] = \
train_step(self.train_ops['%s_loss'%name]+wdLoss, learning_rate, decay_ste
def add_model(self,addr,modelName):
self.modelsAddrs[modelName] = addr
return dataZ
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 15/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
return dataZ
# get reconstruction
dataY_recon = sess.run(self.y_recon, feed_dict={self.z: dataZ})
return dataY_recon
# get reconstruction
dataY_recon = sess.run(self.y_recon, feed_dict={self.x: dataSamples})
return dataY_recon
import sys
import time
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
img.shape
print(hypData.numRows,hypData.numCols,hypData.numBands,hypData.numClasses,hypData.numSa
# create data iterator objects for training and validation using the pre-processed data
trainSamples = 200000
200
valSamples = 100
dataTrain = Iterator( dataSamples=hypData.spectraPrep[:trainSamples, :],
targets=hypData.spectraPrep[:trainSamples, :], batchSize=
dataVal = Iterator( dataSamples=hypData.spectraPrep[trainSamples:trainSamples+valSample
targets=hypData.spectraPrep[trainSamples:trainSamples+valSa
# train the network for 100 epochs, saving the model at epoch 50 and 100
net.train(dataTrain=dataTrain, dataVal=dataVal, train_op_name='csa', n_epochs=100, save
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 17/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
hypData.spectraPrep
tf.train.Saver
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 18/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
tf.train.Saver
tensorflow.python.training.saver.Saver
.pyplot as plt
image (using 3 out of the 10 dimensions)
h.join('/content/drive/My Drive/Hyperspectral image segmentation/results', 'test_mlp_la
fig = plt.figure()
plt.plot(imgZ[576, 210, :])
plt.xlabel('latent dimension')
plt.ylabel('latent value')
plt.title('meadow spectra')
plt.savefig(os.path.join('/content/drive/My Drive/Hyperspectral image segmentation/res
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 19/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
# save plot comparing pre-processed 'meadow' spectra input with decoder reconstructio
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(range(hypData.numBands),imgX[576, 210, :],label='pre-processed input')
ax.plot(range(hypData.numBands),imgY[576, 210, :],label='reconstruction')
plt.xlabel('band')
plt.ylabel('value')
plt.title('meadow spectra')
ax.legend()
plt.savefig(os.path.join('/content/drive/My Drive/Hyperspectral image segmentation/res
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 20/21
8/7/2020 autoencoder_mpl_basic.ipynb - Colaboratory
<matplotlib.image.AxesImage at 0x7f7a5c0aaa58>
img_array.shape
(610, 340, 4)
https://colab.research.google.com/drive/1m-6ib3lR2tgWxto-3UjgXmuIigjdHUwM#scrollTo=JG2phulTI3v-&uniqifier=8&printMode=true 21/21