evaluate.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Evaluate the model.
  16. This script should be run concurrently with training so that summaries show up
  17. in TensorBoard.
  18. """
  19. from __future__ import absolute_import
  20. from __future__ import division
  21. from __future__ import print_function
  22. import math
  23. import os.path
  24. import time
  25. import numpy as np
  26. import tensorflow as tf
  27. from im2txt import configuration
  28. from im2txt import show_and_tell_model
  29. FLAGS = tf.flags.FLAGS
  30. tf.flags.DEFINE_string("input_file_pattern", "",
  31. "File pattern of sharded TFRecord input files.")
  32. tf.flags.DEFINE_string("checkpoint_dir", "",
  33. "Directory containing model checkpoints.")
  34. tf.flags.DEFINE_string("eval_dir", "", "Directory to write event logs.")
  35. tf.flags.DEFINE_integer("eval_interval_secs", 600,
  36. "Interval between evaluation runs.")
  37. tf.flags.DEFINE_integer("num_eval_examples", 10132,
  38. "Number of examples for evaluation.")
  39. tf.flags.DEFINE_integer("min_global_step", 5000,
  40. "Minimum global step to run evaluation.")
  41. tf.logging.set_verbosity(tf.logging.INFO)
  42. def evaluate_model(sess, model, global_step, summary_writer, summary_op):
  43. """Computes perplexity-per-word over the evaluation dataset.
  44. Summaries and perplexity-per-word are written out to the eval directory.
  45. Args:
  46. sess: Session object.
  47. model: Instance of ShowAndTellModel; the model to evaluate.
  48. global_step: Integer; global step of the model checkpoint.
  49. summary_writer: Instance of FileWriter.
  50. summary_op: Op for generating model summaries.
  51. """
  52. # Log model summaries on a single batch.
  53. summary_str = sess.run(summary_op)
  54. summary_writer.add_summary(summary_str, global_step)
  55. # Compute perplexity over the entire dataset.
  56. num_eval_batches = int(
  57. math.ceil(FLAGS.num_eval_examples / model.config.batch_size))
  58. start_time = time.time()
  59. sum_losses = 0.
  60. sum_weights = 0.
  61. for i in xrange(num_eval_batches):
  62. cross_entropy_losses, weights = sess.run([
  63. model.target_cross_entropy_losses,
  64. model.target_cross_entropy_loss_weights
  65. ])
  66. sum_losses += np.sum(cross_entropy_losses * weights)
  67. sum_weights += np.sum(weights)
  68. if not i % 100:
  69. tf.logging.info("Computed losses for %d of %d batches.", i + 1,
  70. num_eval_batches)
  71. eval_time = time.time() - start_time
  72. perplexity = math.exp(sum_losses / sum_weights)
  73. tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time)
  74. # Log perplexity to the FileWriter.
  75. summary = tf.Summary()
  76. value = summary.value.add()
  77. value.simple_value = perplexity
  78. value.tag = "Perplexity"
  79. summary_writer.add_summary(summary, global_step)
  80. # Write the Events file to the eval directory.
  81. summary_writer.flush()
  82. tf.logging.info("Finished processing evaluation at global step %d.",
  83. global_step)
  84. def run_once(model, saver, summary_writer, summary_op):
  85. """Evaluates the latest model checkpoint.
  86. Args:
  87. model: Instance of ShowAndTellModel; the model to evaluate.
  88. saver: Instance of tf.train.Saver for restoring model Variables.
  89. summary_writer: Instance of FileWriter.
  90. summary_op: Op for generating model summaries.
  91. """
  92. model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
  93. if not model_path:
  94. tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
  95. FLAGS.checkpoint_dir)
  96. return
  97. with tf.Session() as sess:
  98. # Load model from checkpoint.
  99. tf.logging.info("Loading model from checkpoint: %s", model_path)
  100. saver.restore(sess, model_path)
  101. global_step = tf.train.global_step(sess, model.global_step.name)
  102. tf.logging.info("Successfully loaded %s at global step = %d.",
  103. os.path.basename(model_path), global_step)
  104. if global_step < FLAGS.min_global_step:
  105. tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step,
  106. FLAGS.min_global_step)
  107. return
  108. # Start the queue runners.
  109. coord = tf.train.Coordinator()
  110. threads = tf.train.start_queue_runners(coord=coord)
  111. # Run evaluation on the latest checkpoint.
  112. try:
  113. evaluate_model(
  114. sess=sess,
  115. model=model,
  116. global_step=global_step,
  117. summary_writer=summary_writer,
  118. summary_op=summary_op)
  119. except Exception, e: # pylint: disable=broad-except
  120. tf.logging.error("Evaluation failed.")
  121. coord.request_stop(e)
  122. coord.request_stop()
  123. coord.join(threads, stop_grace_period_secs=10)
  124. def run():
  125. """Runs evaluation in a loop, and logs summaries to TensorBoard."""
  126. # Create the evaluation directory if it doesn't exist.
  127. eval_dir = FLAGS.eval_dir
  128. if not tf.gfile.IsDirectory(eval_dir):
  129. tf.logging.info("Creating eval directory: %s", eval_dir)
  130. tf.gfile.MakeDirs(eval_dir)
  131. g = tf.Graph()
  132. with g.as_default():
  133. # Build the model for evaluation.
  134. model_config = configuration.ModelConfig()
  135. model_config.input_file_pattern = FLAGS.input_file_pattern
  136. model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
  137. model.build()
  138. # Create the Saver to restore model Variables.
  139. saver = tf.train.Saver()
  140. # Create the summary operation and the summary writer.
  141. summary_op = tf.summary.merge_all()
  142. summary_writer = tf.summary.FileWriter(eval_dir)
  143. g.finalize()
  144. # Run a new evaluation run every eval_interval_secs.
  145. while True:
  146. start = time.time()
  147. tf.logging.info("Starting evaluation at " + time.strftime(
  148. "%Y-%m-%d-%H:%M:%S", time.localtime()))
  149. run_once(model, saver, summary_writer, summary_op)
  150. time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
  151. if time_to_next_eval > 0:
  152. time.sleep(time_to_next_eval)
  153. def main(unused_argv):
  154. assert FLAGS.input_file_pattern, "--input_file_pattern is required"
  155. assert FLAGS.checkpoint_dir, "--checkpoint_dir is required"
  156. assert FLAGS.eval_dir, "--eval_dir is required"
  157. run()
  158. if __name__ == "__main__":
  159. tf.app.run()