Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
SlideShare a Scribd company logo
Introduction to TensorFlow
Babu Priyavrat
1
Tensor Flow - Basics
3 # a rank 0 tensor; this is a scalar with shape []
[1., 2., 3.] # a rank 1 tensor; this is a vector with shape [3]
[[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3]
[[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3]
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
#A placeholder is a promise to provide a value later.
linear_model = W * x + b
#To initialize all the variables in a TensorFlow program, you must explicitly
call a special operation
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
Output:
[ 0. 0.30000001 0.60000002 0.90000004]
What is Tensor?
Creating a Model
Initializing Session
Running Session
2
tf.train
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of
the squares
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
import tensorflow as tf
# Model parameters
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
sess.run(train, {x: x_train, y: y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
#output
W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11
Complete implementation
Defining Optimizer and using it
3
TensorBoard
4
tf.estimator
import tensorflow as tf
import numpy as np
# Declare list of features. We only have one numeric feature.
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
# An estimator is the front end to invoke training (fitting) and evaluation
# (inference).
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# Here we use two data sets: one for training and one for evaluation
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
# We can invoke 1000 training steps by invoking the method and passing the
# training data set.
estimator.train(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r"% train_metrics)
print("eval metrics: %r"% eval_metrics)
Defining Estimator
A Neural Network with Tensorflow
with four neuron in hidden layer
5
Handwriting Recognition
6
MNIST training set
7
SoftMax and Cross Entropy
8
Cross-Entropy in TensorFlow
#y_ is real value and y is predicted value
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#use inbuilt function for better stability
#tf.nn.softmax_cross_entropy_with_logits
#usage
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y))
9
MNIST Implementation
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
Reading Data Set
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_:
batch_ys})
Creating Model and Training in batches
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy=',sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
Testing the trained model
10
Questions & Answers
11

More Related Content

Introduction to TensorFlow

  • 2. Tensor Flow - Basics 3 # a rank 0 tensor; this is a scalar with shape [] [1., 2., 3.] # a rank 1 tensor; this is a vector with shape [3] [[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3] [[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3] W = tf.Variable([.3], dtype=tf.float32) b = tf.Variable([-.3], dtype=tf.float32) x = tf.placeholder(tf.float32) #A placeholder is a promise to provide a value later. linear_model = W * x + b #To initialize all the variables in a TensorFlow program, you must explicitly call a special operation init = tf.global_variables_initializer() sess.run(init) print(sess.run(linear_model, {x: [1, 2, 3, 4]})) Output: [ 0. 0.30000001 0.60000002 0.90000004] What is Tensor? Creating a Model Initializing Session Running Session 2
  • 3. tf.train # loss loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares optimizer = tf.train.GradientDescentOptimizer(0.01) train = optimizer.minimize(loss) import tensorflow as tf # Model parameters W = tf.Variable([.3], dtype=tf.float32) b = tf.Variable([-.3], dtype=tf.float32) # Model input and output x = tf.placeholder(tf.float32) linear_model = W * x + b y = tf.placeholder(tf.float32) # loss loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares # optimizer optimizer = tf.train.GradientDescentOptimizer(0.01) train = optimizer.minimize(loss) # training data x_train = [1, 2, 3, 4] y_train = [0, -1, -2, -3] # training loop init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # reset values to wrong for i in range(1000): sess.run(train, {x: x_train, y: y_train}) # evaluate training accuracy curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train}) print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss)) #output W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11 Complete implementation Defining Optimizer and using it 3
  • 5. tf.estimator import tensorflow as tf import numpy as np # Declare list of features. We only have one numeric feature. feature_columns = [tf.feature_column.numeric_column("x", shape=[1])] # An estimator is the front end to invoke training (fitting) and evaluation # (inference). estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns) # Here we use two data sets: one for training and one for evaluation x_train = np.array([1., 2., 3., 4.]) y_train = np.array([0., -1., -2., -3.]) x_eval = np.array([2., 5., 8., 1.]) y_eval = np.array([-1.01, -4.1, -7, 0.]) input_fn = tf.estimator.inputs.numpy_input_fn( {"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True) train_input_fn = tf.estimator.inputs.numpy_input_fn( {"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False) eval_input_fn = tf.estimator.inputs.numpy_input_fn( {"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False) # We can invoke 1000 training steps by invoking the method and passing the # training data set. estimator.train(input_fn=input_fn, steps=1000) # Here we evaluate how well our model did. train_metrics = estimator.evaluate(input_fn=train_input_fn) eval_metrics = estimator.evaluate(input_fn=eval_input_fn) print("train metrics: %r"% train_metrics) print("eval metrics: %r"% eval_metrics) Defining Estimator A Neural Network with Tensorflow with four neuron in hidden layer 5
  • 8. SoftMax and Cross Entropy 8
  • 9. Cross-Entropy in TensorFlow #y_ is real value and y is predicted value cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) #use inbuilt function for better stability #tf.nn.softmax_cross_entropy_with_logits #usage cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) 9
  • 10. MNIST Implementation from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) Reading Data Set # Create the model x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, W) + b for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) Creating Model and Training in batches correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print('Accuracy=',sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) Testing the trained model 10