import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.__version__
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
type(mnist)
mnist.train.images.shape # (rows, columns)
sample = mnist.train.images[20].reshape(28,28) # (rows, columns) 28*28=784
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(sample,cmap='Greys')
learning_rate = 0.001 # cost function
training_epochs = 15 # cycles
batch_size = 100 # the number of execution
n_classes = 10 # the number of labels
n_samples = mnist.train.num_examples
mnist.train.num_examples
n_input = 784
# image info, columns
n_hidden_1 = 256
n_hidden_2 = 256
def multilayer_perceptron(x,weights,biases):
'''
x: Placeholder for Data Input
weights: Dict of weights
biases: Dict of bias values
'''
# First Hidden Layer with RELU Activation
# X * W + B
layer_1 = tf.add(tf.matmul(x,weights['h1']),biases['b1'])
# RELU(X * W + B) -> f(x) = max(0,x)
layer_1 = tf.nn.relu(layer_1)
# Second Hidden Layer
layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Last Output Layer
out_layer = tf.matmul(layer_2,weights['out']) + biases['out']
return out_layer
weights = {
'h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
'h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
'out':tf.Variable(tf.random_normal([n_hidden_2,n_classes]))
}
biases = {
'b1':tf.Variable(tf.random_normal([n_hidden_1])),
'b2':tf.Variable(tf.random_normal([n_hidden_2])),
'out':tf.Variable(tf.random_normal([n_classes]))
}
x = tf.placeholder('float',[None,n_input]) # image
print(x)
y = tf.placeholder('float',[None,n_classes]) # label
print(y)
y
pred = multilayer_perceptron(x,weights,biases)
print(pred)
pred
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=pred))
print(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
print(optimizer)
# Training the Model
t = mnist.train.next_batch(1)
Xsamp,ysamp = t
plt.imshow(Xsamp.reshape(28,28),cmap='Greys') # Xsamp: image
ysamp # label
## Run the Session
sess = tf.InteractiveSession()
init = tf.initialize_all_variables()
sess.run(init)
training_epochs
n_samples
batch_size
#batch_x
#batch_y
# 15 loops
for epoch in range(training_epochs):
# Cost
avg_cost = 0.0
total_batch = int(n_samples/batch_size)
for i in range(total_batch):
batch_x,batch_y = mnist.train.next_batch(batch_size)
_,c = sess.run([optimizer,cost],feed_dict={x:batch_x,y:batch_y})
avg_cost += c/total_batch
print("Epoch: {} Cost: {:.4f}".format(epoch+1, avg_cost))
print("Model has completed {} Epochs of training".format(training_epochs))
## Model Evaluations
correct_predictions = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
print(correct_predictions[0])
correct_predictions[0]
correct_predictions = tf.cast(correct_predictions, 'float')
print(correct_predictions[0])
correct_predictions[0]
accuracy = tf.reduce_mean(correct_predictions)
type(accuracy)
mnist.test.labels[0]
#mnist.test.images[0]
accuracy.eval({x:mnist.test.images, y:mnist.test.labels})