track_perplexity.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Tracks training progress via per-word perplexity.
  16. This script should be run concurrently with training so that summaries show up
  17. in TensorBoard.
  18. """
  19. from __future__ import absolute_import
  20. from __future__ import division
  21. from __future__ import print_function
  22. import math
  23. import os.path
  24. import time
  25. import numpy as np
  26. import tensorflow as tf
  27. from skip_thoughts import configuration
  28. from skip_thoughts import skip_thoughts_model
  29. FLAGS = tf.flags.FLAGS
  30. tf.flags.DEFINE_string("input_file_pattern", None,
  31. "File pattern of sharded TFRecord input files.")
  32. tf.flags.DEFINE_string("checkpoint_dir", None,
  33. "Directory containing model checkpoints.")
  34. tf.flags.DEFINE_string("eval_dir", None, "Directory to write event logs to.")
  35. tf.flags.DEFINE_integer("eval_interval_secs", 600,
  36. "Interval between evaluation runs.")
  37. tf.flags.DEFINE_integer("num_eval_examples", 50000,
  38. "Number of examples for evaluation.")
  39. tf.flags.DEFINE_integer("min_global_step", 100,
  40. "Minimum global step to run evaluation.")
  41. tf.logging.set_verbosity(tf.logging.INFO)
  42. def evaluate_model(sess, losses, weights, num_batches, global_step,
  43. summary_writer, summary_op):
  44. """Computes perplexity-per-word over the evaluation dataset.
  45. Summaries and perplexity-per-word are written out to the eval directory.
  46. Args:
  47. sess: Session object.
  48. losses: A Tensor of any shape; the target cross entropy losses for the
  49. current batch.
  50. weights: A Tensor of weights corresponding to losses.
  51. num_batches: Integer; the number of evaluation batches.
  52. global_step: Integer; global step of the model checkpoint.
  53. summary_writer: Instance of SummaryWriter.
  54. summary_op: Op for generating model summaries.
  55. """
  56. # Log model summaries on a single batch.
  57. summary_str = sess.run(summary_op)
  58. summary_writer.add_summary(summary_str, global_step)
  59. start_time = time.time()
  60. sum_losses = 0.0
  61. sum_weights = 0.0
  62. for i in xrange(num_batches):
  63. batch_losses, batch_weights = sess.run([losses, weights])
  64. sum_losses += np.sum(batch_losses * batch_weights)
  65. sum_weights += np.sum(batch_weights)
  66. if not i % 100:
  67. tf.logging.info("Computed losses for %d of %d batches.", i + 1,
  68. num_batches)
  69. eval_time = time.time() - start_time
  70. perplexity = math.exp(sum_losses / sum_weights)
  71. tf.logging.info("Perplexity = %f (%.2f sec)", perplexity, eval_time)
  72. # Log perplexity to the SummaryWriter.
  73. summary = tf.Summary()
  74. value = summary.value.add()
  75. value.simple_value = perplexity
  76. value.tag = "perplexity"
  77. summary_writer.add_summary(summary, global_step)
  78. # Write the Events file to the eval directory.
  79. summary_writer.flush()
  80. tf.logging.info("Finished processing evaluation at global step %d.",
  81. global_step)
  82. def run_once(model, losses, weights, saver, summary_writer, summary_op):
  83. """Evaluates the latest model checkpoint.
  84. Args:
  85. model: Instance of SkipThoughtsModel; the model to evaluate.
  86. losses: Tensor; the target cross entropy losses for the current batch.
  87. weights: A Tensor of weights corresponding to losses.
  88. saver: Instance of tf.train.Saver for restoring model Variables.
  89. summary_writer: Instance of FileWriter.
  90. summary_op: Op for generating model summaries.
  91. """
  92. model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
  93. if not model_path:
  94. tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
  95. FLAGS.checkpoint_dir)
  96. return
  97. with tf.Session() as sess:
  98. # Load model from checkpoint.
  99. tf.logging.info("Loading model from checkpoint: %s", model_path)
  100. saver.restore(sess, model_path)
  101. global_step = tf.train.global_step(sess, model.global_step.name)
  102. tf.logging.info("Successfully loaded %s at global step = %d.",
  103. os.path.basename(model_path), global_step)
  104. if global_step < FLAGS.min_global_step:
  105. tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step,
  106. FLAGS.min_global_step)
  107. return
  108. # Start the queue runners.
  109. coord = tf.train.Coordinator()
  110. threads = tf.train.start_queue_runners(coord=coord)
  111. num_eval_batches = int(
  112. math.ceil(FLAGS.num_eval_examples / model.config.batch_size))
  113. # Run evaluation on the latest checkpoint.
  114. try:
  115. evaluate_model(sess, losses, weights, num_eval_batches, global_step,
  116. summary_writer, summary_op)
  117. except tf.InvalidArgumentError:
  118. tf.logging.error(
  119. "Evaluation raised InvalidArgumentError (e.g. due to Nans).")
  120. finally:
  121. coord.request_stop()
  122. coord.join(threads, stop_grace_period_secs=10)
  123. def main(unused_argv):
  124. if not FLAGS.input_file_pattern:
  125. raise ValueError("--input_file_pattern is required.")
  126. if not FLAGS.checkpoint_dir:
  127. raise ValueError("--checkpoint_dir is required.")
  128. if not FLAGS.eval_dir:
  129. raise ValueError("--eval_dir is required.")
  130. # Create the evaluation directory if it doesn't exist.
  131. eval_dir = FLAGS.eval_dir
  132. if not tf.gfile.IsDirectory(eval_dir):
  133. tf.logging.info("Creating eval directory: %s", eval_dir)
  134. tf.gfile.MakeDirs(eval_dir)
  135. g = tf.Graph()
  136. with g.as_default():
  137. # Build the model for evaluation.
  138. model_config = configuration.model_config(
  139. input_file_pattern=FLAGS.input_file_pattern,
  140. input_queue_capacity=FLAGS.num_eval_examples,
  141. shuffle_input_data=False)
  142. model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="eval")
  143. model.build()
  144. losses = tf.concat(model.target_cross_entropy_losses, 0)
  145. weights = tf.concat(model.target_cross_entropy_loss_weights, 0)
  146. # Create the Saver to restore model Variables.
  147. saver = tf.train.Saver()
  148. # Create the summary operation and the summary writer.
  149. summary_op = tf.summary.merge_all()
  150. summary_writer = tf.summary.FileWriter(eval_dir)
  151. g.finalize()
  152. # Run a new evaluation run every eval_interval_secs.
  153. while True:
  154. start = time.time()
  155. tf.logging.info("Starting evaluation at " + time.strftime(
  156. "%Y-%m-%d-%H:%M:%S", time.localtime()))
  157. run_once(model, losses, weights, saver, summary_writer, summary_op)
  158. time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
  159. if time_to_next_eval > 0:
  160. time.sleep(time_to_next_eval)
  161. if __name__ == "__main__":
  162. tf.app.run()