tensorboard_basic.py 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. '''
  2. Graph and Loss visualization using Tensorboard.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. import tensorflow as tf
  9. # Import MINST data
  10. from tensorflow.examples.tutorials.mnist import input_data
  11. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  12. # Parameters
  13. learning_rate = 0.01
  14. training_epochs = 25
  15. batch_size = 100
  16. display_step = 1
  17. logs_path = '/tmp/tensorflow_logs'
  18. # tf Graph Input
  19. # mnist data image of shape 28*28=784
  20. x = tf.placeholder(tf.float32, [None, 784], name='InputData')
  21. # 0-9 digits recognition => 10 classes
  22. y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
  23. # Set model weights
  24. W = tf.Variable(tf.zeros([784, 10]), name='Weights')
  25. b = tf.Variable(tf.zeros([10]), name='Bias')
  26. # Construct model and encapsulating all ops into scopes, making
  27. # Tensorboard's Graph visualization more convenient
  28. with tf.name_scope('Model'):
  29. # Model
  30. pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  31. with tf.name_scope('Loss'):
  32. # Minimize error using cross entropy
  33. cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  34. with tf.name_scope('SGD'):
  35. # Gradient Descent
  36. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  37. with tf.name_scope('Accuracy'):
  38. # Accuracy
  39. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  40. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  41. # Initializing the variables
  42. init = tf.initialize_all_variables()
  43. # Create a summary to monitor cost tensor
  44. tf.scalar_summary("loss", cost)
  45. # Create a summary to monitor accuracy tensor
  46. tf.scalar_summary("accuracy", acc)
  47. # Merge all summaries into a single op
  48. merged_summary_op = tf.merge_all_summaries()
  49. # Launch the graph
  50. with tf.Session() as sess:
  51. sess.run(init)
  52. # op to write logs to Tensorboard
  53. summary_writer = tf.train.SummaryWriter(logs_path)
  54. # Training cycle
  55. for epoch in range(training_epochs):
  56. avg_cost = 0.
  57. total_batch = int(mnist.train.num_examples/batch_size)
  58. # Loop over all batches
  59. for i in range(total_batch):
  60. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  61. # Run optimization op (backprop), cost op (to get loss value)
  62. # and summary nodes
  63. _, c, summary = sess.run([optimizer, cost, merged_summary_op],
  64. feed_dict={x: batch_xs, y: batch_ys})
  65. # Write logs at every iteration
  66. summary_writer.add_summary(summary, epoch * total_batch + i)
  67. # Compute average loss
  68. avg_cost += c / total_batch
  69. # Display logs per epoch step
  70. if (epoch+1) % display_step == 0:
  71. print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
  72. print "Optimization Finished!"
  73. # Test model
  74. # Calculate accuracy
  75. print "Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})
  76. print "Run the command line:\n" \
  77. "--> tensorboard --logdir=/tmp/tensorflow_logs " \
  78. "\nThen open http://0.0.0.0:6006/ into your web browser"