tensorboard_basic.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. '''
  2. Graph and Loss visualization using Tensorboard.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. from __future__ import print_function
  9. import tensorflow as tf
  10. # Import MNIST data
  11. from tensorflow.examples.tutorials.mnist import input_data
  12. mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
  13. # Parameters
  14. learning_rate = 0.01
  15. training_epochs = 25
  16. batch_size = 100
  17. display_epoch = 1
  18. logs_path = '/tmp/tensorflow_logs/example/'
  19. # tf Graph Input
  20. # mnist data image of shape 28*28=784
  21. x = tf.placeholder(tf.float32, [None, 784], name='InputData')
  22. # 0-9 digits recognition => 10 classes
  23. y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
  24. # Set model weights
  25. W = tf.Variable(tf.zeros([784, 10]), name='Weights')
  26. b = tf.Variable(tf.zeros([10]), name='Bias')
  27. # Construct model and encapsulating all ops into scopes, making
  28. # Tensorboard's Graph visualization more convenient
  29. with tf.name_scope('Model'):
  30. # Model
  31. pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
  32. with tf.name_scope('Loss'):
  33. # Minimize error using cross entropy
  34. cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  35. with tf.name_scope('SGD'):
  36. # Gradient Descent
  37. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  38. with tf.name_scope('Accuracy'):
  39. # Accuracy
  40. acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  41. acc = tf.reduce_mean(tf.cast(acc, tf.float32))
  42. # Initialize the variables (i.e. assign their default value)
  43. init = tf.global_variables_initializer()
  44. # Create a summary to monitor cost tensor
  45. tf.summary.scalar("loss", cost)
  46. # Create a summary to monitor accuracy tensor
  47. tf.summary.scalar("accuracy", acc)
  48. # Merge all summaries into a single op
  49. merged_summary_op = tf.summary.merge_all()
  50. # Start training
  51. with tf.Session() as sess:
  52. # Run the initializer
  53. sess.run(init)
  54. # op to write logs to Tensorboard
  55. summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
  56. # Training cycle
  57. for epoch in range(training_epochs):
  58. avg_cost = 0.
  59. total_batch = int(mnist.train.num_examples/batch_size)
  60. # Loop over all batches
  61. for i in range(total_batch):
  62. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  63. # Run optimization op (backprop), cost op (to get loss value)
  64. # and summary nodes
  65. _, c, summary = sess.run([optimizer, cost, merged_summary_op],
  66. feed_dict={x: batch_xs, y: batch_ys})
  67. # Write logs at every iteration
  68. summary_writer.add_summary(summary, epoch * total_batch + i)
  69. # Compute average loss
  70. avg_cost += c / total_batch
  71. # Display logs per epoch step
  72. if (epoch+1) % display_epoch == 0:
  73. print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  74. print("Optimization Finished!")
  75. # Test model
  76. # Calculate accuracy
  77. print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
  78. print("Run the command line:\n" \
  79. "--> tensorboard --logdir=/tmp/tensorflow_logs " \
  80. "\nThen open http://0.0.0.0:6006/ into your web browser")