tensorboard_basic.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. '''
  2. Graph and Loss visualization using Tensorboard.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. from __future__ import print_function
  9. import tensorflow as tf
  10. # Import MINST data
  11. from tensorflow.examples.tutorials.mnist import input_data
  12. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  13. # Parameters
  14. learning_rate = 0.01
  15. training_epochs = 25
  16. batch_size = 100
  17. display_step = 1
  18. logs_path = '/tmp/tensorflow_logs/example'
  19. # tf Graph Input
  20. # mnist data image of shape 28*28=784
  21. x = tf.placeholder(tf.float32, [None, 784], name='InputData')
  22. # 0-9 digits recognition => 10 classes
  23. y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
  24. # Set model weights
  25. W = tf.Variable(tf.zeros([784, 10]), name='Weights')
  26. b = tf.Variable(tf.zeros([10]), name='Bias')
  27. # Construct model and encapsulating all ops into scopes, making
  28. # Tensorboard's Graph visualization more convenient
  29. with tf.name_scope('Model'):
  30. # Model
  31. pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  32. with tf.name_scope('Loss'):
  33. # Minimize error using cross entropy
  34. cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  35. with tf.name_scope('SGD'):
  36. # Gradient Descent
  37. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  38. with tf.name_scope('Accuracy'):
  39. # Accuracy
  40. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  41. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  42. # Initializing the variables
  43. init = tf.initialize_all_variables()
  44. # Create a summary to monitor cost tensor
  45. tf.scalar_summary("loss", cost)
  46. # Create a summary to monitor accuracy tensor
  47. tf.scalar_summary("accuracy", acc)
  48. # Merge all summaries into a single op
  49. merged_summary_op = tf.merge_all_summaries()
  50. # Launch the graph
  51. with tf.Session() as sess:
  52. sess.run(init)
  53. # op to write logs to Tensorboard
  54. summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
  55. # Training cycle
  56. for epoch in range(training_epochs):
  57. avg_cost = 0.
  58. total_batch = int(mnist.train.num_examples/batch_size)
  59. # Loop over all batches
  60. for i in range(total_batch):
  61. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  62. # Run optimization op (backprop), cost op (to get loss value)
  63. # and summary nodes
  64. _, c, summary = sess.run([optimizer, cost, merged_summary_op],
  65. feed_dict={x: batch_xs, y: batch_ys})
  66. # Write logs at every iteration
  67. summary_writer.add_summary(summary, epoch * total_batch + i)
  68. # Compute average loss
  69. avg_cost += c / total_batch
  70. # Display logs per epoch step
  71. if (epoch+1) % display_step == 0:
  72. print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  73. print("Optimization Finished!")
  74. # Test model
  75. # Calculate accuracy
  76. print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
  77. print("Run the command line:\n" \
  78. "--> tensorboard --logdir=/tmp/tensorflow_logs " \
  79. "\nThen open http://0.0.0.0:6006/ into your web browser")