tensorboard_advanced.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. '''
  2. Graph and Loss visualization using Tensorboard.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. from __future__ import print_function
  9. import tensorflow as tf
  10. # Import MINST data
  11. from tensorflow.examples.tutorials.mnist import input_data
  12. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  13. # Parameters
  14. learning_rate = 0.01
  15. training_epochs = 25
  16. batch_size = 100
  17. display_step = 1
  18. logs_path = '/tmp/tensorflow_logs/example'
  19. # Network Parameters
  20. n_hidden_1 = 256 # 1st layer number of features
  21. n_hidden_2 = 256 # 2nd layer number of features
  22. n_input = 784 # MNIST data input (img shape: 28*28)
  23. n_classes = 10 # MNIST total classes (0-9 digits)
  24. # tf Graph Input
  25. # mnist data image of shape 28*28=784
  26. x = tf.placeholder(tf.float32, [None, 784], name='InputData')
  27. # 0-9 digits recognition => 10 classes
  28. y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
  29. # Create model
  30. def multilayer_perceptron(x, weights, biases):
  31. # Hidden layer with RELU activation
  32. layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
  33. layer_1 = tf.nn.relu(layer_1)
  34. # Create a summary to visualize the first layer ReLU activation
  35. tf.histogram_summary("relu1", layer_1)
  36. # Hidden layer with RELU activation
  37. layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
  38. layer_2 = tf.nn.relu(layer_2)
  39. # Create another summary to visualize the second layer ReLU activation
  40. tf.histogram_summary("relu2", layer_2)
  41. # Output layer
  42. out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
  43. return out_layer
  44. # Store layers weight & bias
  45. weights = {
  46. 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),
  47. 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
  48. 'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')
  49. }
  50. biases = {
  51. 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
  52. 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
  53. 'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')
  54. }
  55. # Encapsulating all ops into scopes, making Tensorboard's Graph
  56. # visualization more convenient
  57. with tf.name_scope('Model'):
  58. # Build model
  59. pred = multilayer_perceptron(x, weights, biases)
  60. with tf.name_scope('Loss'):
  61. # Softmax Cross entropy (cost function)
  62. loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
  63. with tf.name_scope('SGD'):
  64. # Gradient Descent
  65. optimizer = tf.train.GradientDescentOptimizer(learning_rate)
  66. # Op to calculate every variable gradient
  67. grads = tf.gradients(loss, tf.trainable_variables())
  68. grads = list(zip(grads, tf.trainable_variables()))
  69. # Op to update all variables according to their gradient
  70. apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
  71. with tf.name_scope('Accuracy'):
  72. # Accuracy
  73. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  74. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  75. # Initializing the variables
  76. init = tf.initialize_all_variables()
  77. # Create a summary to monitor cost tensor
  78. tf.scalar_summary("loss", loss)
  79. # Create a summary to monitor accuracy tensor
  80. tf.scalar_summary("accuracy", acc)
  81. # Create summaries to visualize weights
  82. for var in tf.trainable_variables():
  83. tf.histogram_summary(var.name, var)
  84. # Summarize all gradients
  85. for grad, var in grads:
  86. tf.histogram_summary(var.name + '/gradient', grad)
  87. # Merge all summaries into a single op
  88. merged_summary_op = tf.merge_all_summaries()
  89. # Launch the graph
  90. with tf.Session() as sess:
  91. sess.run(init)
  92. # op to write logs to Tensorboard
  93. summary_writer = tf.train.SummaryWriter(logs_path,
  94. graph=tf.get_default_graph())
  95. # Training cycle
  96. for epoch in range(training_epochs):
  97. avg_cost = 0.
  98. total_batch = int(mnist.train.num_examples/batch_size)
  99. # Loop over all batches
  100. for i in range(total_batch):
  101. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  102. # Run optimization op (backprop), cost op (to get loss value)
  103. # and summary nodes
  104. _, c, summary = sess.run([apply_grads, loss, merged_summary_op],
  105. feed_dict={x: batch_xs, y: batch_ys})
  106. # Write logs at every iteration
  107. summary_writer.add_summary(summary, epoch * total_batch + i)
  108. # Compute average loss
  109. avg_cost += c / total_batch
  110. # Display logs per epoch step
  111. if (epoch+1) % display_step == 0:
  112. print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  113. print("Optimization Finished!")
  114. # Test model
  115. # Calculate accuracy
  116. print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
  117. print("Run the command line:\n" \
  118. "--> tensorboard --logdir=/tmp/tensorflow_logs " \
  119. "\nThen open http://0.0.0.0:6006/ into your web browser")