python_code
python_code
import lif
import matplotlib.pyplot as plt
# 2 input neurons
# 1 output neuron
# initialize random weights
# Adjust weights using hebbian
#
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1007692#pcbi
-1007692-g002
# At the end of learning, the neuron�s tuning curves are uniformally distributed
(Fig 2Giii), and the quality of the representation becomes optimal for all input
signals (Fig 2Aiii and 2Ciii).
# What are decoding weights?
# What are tuning curves?
# Are our inputs correlated? (for AND, OR gate)
# When does learning converge? Mainly what does this mean: "Learning converges when
all tuning curve maxima are aligned with the respective feedforward weights (Fig
3Bii; dashed lines and arrows)."
# ---------------------------------------------------------------------------
# https://www.geeksforgeeks.org/single-neuron-neural-network-python/
class SNN():
def __init__(self):
np.random.seed(1) # Generate same random weights for every trial
# Matrix containing the weights between each neuron
self.z = np.array[1,0,0,0,1,1,0,1,0,1]
self.alpha = np.array[1,1,1]
self.beta = np.array[2,2,2]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
zhat = SNN.sigmoid(out)
weights[0] += delta_w1
weights[1] += delta_w2
# https://praneethnamburi.com/2015/02/05/simulating-neural-spike-trains/
# fr: firing rate estimate (in Hz)
# train_length: length of the spike train (in seconds)
def poissonSpike(fr, nbins, num_trials):
dt = 1 / 1000
spikeMatrix = np.random.rand(num_trials, nbins) < fr * dt
t = np.arange(0, (nbins * (dt - 1)), dt)
return (spikeMatrix, t)
def rasterPlot(spikeMatrix):
spikes_x = []
spikes_y = []
for i in range(spikeMatrix.shape[0]):
for j in range(spikeMatrix.shape[1]):
if spikeMatrix[i][j]:
spikes_y.append(i)
spikes_x.append(j)
rasterPlot(sm)