tensorboard_advanced.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. '''
  2. Graph and Loss visualization using Tensorboard.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. import tensorflow as tf
  9. # Import MINST data
  10. from tensorflow.examples.tutorials.mnist import input_data
  11. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  12. # Parameters
  13. learning_rate = 0.01
  14. training_epochs = 25
  15. batch_size = 100
  16. display_step = 1
  17. logs_path = '/tmp/tensorflow_logs/example'
  18. # Network Parameters
  19. n_hidden_1 = 256 # 1st layer number of features
  20. n_hidden_2 = 256 # 2nd layer number of features
  21. n_input = 784 # MNIST data input (img shape: 28*28)
  22. n_classes = 10 # MNIST total classes (0-9 digits)
  23. # tf Graph Input
  24. # mnist data image of shape 28*28=784
  25. x = tf.placeholder(tf.float32, [None, 784], name='InputData')
  26. # 0-9 digits recognition => 10 classes
  27. y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
  28. # Create model
  29. def multilayer_perceptron(x, weights, biases):
  30. # Hidden layer with RELU activation
  31. layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
  32. layer_1 = tf.nn.relu(layer_1)
  33. # Create a summary to visualize the first layer ReLU activation
  34. tf.histogram_summary("relu1", layer_1)
  35. # Hidden layer with RELU activation
  36. layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
  37. layer_2 = tf.nn.relu(layer_2)
  38. # Create another summary to visualize the second layer ReLU activation
  39. tf.histogram_summary("relu2", layer_2)
  40. # Output layer
  41. out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
  42. return out_layer
  43. # Store layers weight & bias
  44. weights = {
  45. 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),
  46. 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
  47. 'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')
  48. }
  49. biases = {
  50. 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
  51. 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
  52. 'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')
  53. }
  54. # Encapsulating all ops into scopes, making Tensorboard's Graph
  55. # visualization more convenient
  56. with tf.name_scope('Model'):
  57. # Build model
  58. pred = multilayer_perceptron(x, weights, biases)
  59. with tf.name_scope('Loss'):
  60. # Softmax Cross entropy (cost function)
  61. loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  62. with tf.name_scope('SGD'):
  63. # Gradient Descent
  64. optimizer = tf.train.GradientDescentOptimizer(learning_rate)
  65. # Op to calculate every variable gradient
  66. grads = tf.gradients(loss, tf.trainable_variables())
  67. grads = list(zip(grads, tf.trainable_variables()))
  68. # Op to update all variables according to their gradient
  69. apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
  70. with tf.name_scope('Accuracy'):
  71. # Accuracy
  72. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  73. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  74. # Initializing the variables
  75. init = tf.initialize_all_variables()
  76. # Create a summary to monitor cost tensor
  77. tf.scalar_summary("loss", loss)
  78. # Create a summary to monitor accuracy tensor
  79. tf.scalar_summary("accuracy", acc)
  80. # Create summaries to visualize weights
  81. for var in tf.trainable_variables():
  82. tf.histogram_summary(var.name, var)
  83. # Summarize all gradients
  84. for grad, var in grads:
  85. tf.histogram_summary(var.name + '/gradient', grad)
  86. # Merge all summaries into a single op
  87. merged_summary_op = tf.merge_all_summaries()
  88. # Launch the graph
  89. with tf.Session() as sess:
  90. sess.run(init)
  91. # op to write logs to Tensorboard
  92. summary_writer = tf.train.SummaryWriter(logs_path,
  93. graph=tf.get_default_graph())
  94. # Training cycle
  95. for epoch in range(training_epochs):
  96. avg_cost = 0.
  97. total_batch = int(mnist.train.num_examples/batch_size)
  98. # Loop over all batches
  99. for i in range(total_batch):
  100. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  101. # Run optimization op (backprop), cost op (to get loss value)
  102. # and summary nodes
  103. _, c, summary = sess.run([apply_grads, loss, merged_summary_op],
  104. feed_dict={x: batch_xs, y: batch_ys})
  105. # Write logs at every iteration
  106. summary_writer.add_summary(summary, epoch * total_batch + i)
  107. # Compute average loss
  108. avg_cost += c / total_batch
  109. # Display logs per epoch step
  110. if (epoch+1) % display_step == 0:
  111. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  112. print "Optimization Finished!"
  113. # Test model
  114. # Calculate accuracy
  115. print "Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})
  116. print "Run the command line:\n" \
  117. "--> tensorboard --logdir=/tmp/tensorflow_logs " \
  118. "\nThen open http://0.0.0.0:6006/ into your web browser"