Python Primer
Python Primer
March 1, 2016
1 Python
1.1 Lists and Tuples
1.1.1 Indexing into list
In [1]: l = [1, 2, 3] # make a list
Out[1]: 2
Out[2]: [1, 2, 3, 4]
Out[3]: [1, 3, 4]
Out[4]: [1, 3, 3, 4]
1.1.5 Tuples
In [5]: t = (1, 3, 3, 4) # make a tuple
l == t
Out[5]: False
Out[6]: True
1
1.2 Dictionaries
In [7]: Dict = {}
Dict[1] = 2
Dict[one] = two
Dict[1] = 2
Dict
one = 1
print "\nValue at 1"
print Dict[one]
Dictionary keys
[1, 1, one]
Value at 1 :
2
Value at one
two
Value at 1
2
Delete key : 1
{1: 2, one: two}
2
2 Classes and Function
2.1 Functions
In [9]: def printer(x):
print x
def adder(x,y):
return x+y
def square(x):
return x**2
a = 2
b = 3
print "Lets print a:"
printer(a)
print "\nLets print a + b"
printer(adder(a,b))
print "\n So you can pass the return of a function to another function just like everywhere. \n
printer(square(adder(a,b)))
Lets print a:
2
Lets print a + b
5
So you can pass the return of a function to another function just like everywhere.
Lets take it another step further
25
2.2 Classes
In [10]: class student(object):
if age == None:
self.age = 20
else:
self.age = age
def update_name(self,name):
self.name = name
def update_age(self,age):
self.age = age
def inc_age(self):
self.age = self.age + 1
3
def return_info(self):
temp = [self.name, self.age]
return temp
print "\nBhuvesh:"
print vars(Bhuvesh)
Amartya:
{age: 20, name: Amartya}
Bhuvesh:
{age: 21, name: Bhuvesh}
3 Exceptions
In [12]: print "Adding 2 and 3"
printer(adder(2,3))
print "\nBut say we want to practical and only add numbers , not people."
def adder(x,y):
try:
if type(x) != int or type(x) != float or type(y) != int or type(y) != float:
raise ValueError()
4
else:
return x+y
except ValueError:
print "Error!! Error!! You cant add people\n"
Adding 2 and 3
5
But say we want to practical and only add numbers , not people.
None
4 Starting Numpy
In [13]: import numpy as np #Please dont forget this
Zero Array
[ 0. 0. 0. 0. 0.]
Zero Matrix:
[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
5
Ones Array
[ 1. 1. 1. 1. 1.]
Ones Matrix:
[[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
Elementwise Addition
[5 7 9]
Dot Product
32
In [19]: print "Lets square each element in the array"
print [x**2 for x in C]
print "\n Lets do some more complicated function"
def updateX(x):
x = x + 2
x = np.log(x)
x = np.power(x,2)
return x
6
Lets square each element in the array
[16, 25, 36]
4.1.5 Useful stuffs that make your life easy when coding stuffs.
In [20]: print "Createing an array of numbers from 1 to 9"
A = np.arange(1,10)
print A
Summing up elements
Each column
[ 0 11 24]
Each row
[ 7 13 15]
7
In [22]: print "Mean of elements"
print "\n Each column"
print np.mean(C,axis=0)
print "\n Each row"
print np.mean(C,axis=1)
Mean of elements
Each column
[ 0. 3.66666667 8. ]
Each row
[ 2.33333333 4.33333333 5. ]
Product of elements
Each column
[ 0 0 504]
Each row
[0 0 0]
5 Finally Theano!
In [24]: import theano
import theano.tensor as T
addTh = theano.function([x,y],temp1)
theano.pp(addTh.maker.fgraph.outputs[0])
3.0
temp1 = T.le(x, y)
compTh = theano.function([x,y],temp1)
8
theano.pp(compTh.maker.fgraph.outputs[0])
print compTh(4,3)
ifelse = theano.function([x,y],res)
print ""
print theano.pp(compTh.maker.fgraph.outputs[0])
print ""
print ifelse(5,4)
10.0
uselessFunc = theano.function([x,y],d)
theano.pp(uselessFunc.maker.fgraph.outputs[0])
7.52932798092
A = np.asarray([1,2,3])
B = np.asarray([4,5,6])
9
dotaddfn = theano.function([x,y], [xdoty,xaddy])
print dotaddfn(A,B)
print "\n All element wise operations are similar to numpy"
print theano.pp(logistic.maker.fgraph.outputs[0])
logistic([[0, 1], [-1, -2]])
sigmoid(x)
0
1
3
6
10
15
21
28
36
45
10
5.3 As you might have guessed ML is a lot about updating parameters to
achieve lowest cost
5.4 But then we need to choose what to update it with
5.5 Gear up for some magic
5.6 Gradient Magic
In [37]: a = T.scalar(a)
b = T.sqr(a)
c = T.grad(b,a)
gradfn = theano.function([a],c)
print theano.pp(gradfn.maker.fgraph.outputs[0])
print gradfn(4)
(TensorConstant{2.0} * a)
8.0
In [38]: B = theano.shared(np.asarray([1.,2.]))
R = T.sqr(B).sum()
A = T.grad(R, B)
In [ ]:
In [ ]:
In [ ]:
11
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
12