trainer.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. # Copyright 2016 Google Inc. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """A program to train a tensorflow neural net parser from a conll file."""
  16. import base64
  17. import os
  18. import os.path
  19. import random
  20. import time
  21. import tensorflow as tf
  22. from tensorflow.python.framework import errors
  23. from tensorflow.python.platform import gfile
  24. from tensorflow.python.platform import tf_logging as logging
  25. from google.protobuf import text_format
  26. from syntaxnet.ops import gen_parser_ops
  27. from syntaxnet import task_spec_pb2
  28. from syntaxnet import sentence_pb2
  29. from dragnn.protos import spec_pb2
  30. from dragnn.python.sentence_io import ConllSentenceReader
  31. from dragnn.python import evaluation
  32. from dragnn.python import graph_builder
  33. from dragnn.python import lexicon
  34. from dragnn.python import spec_builder
  35. from dragnn.python import trainer_lib
  36. from syntaxnet.util import check
  37. import dragnn.python.load_dragnn_cc_impl
  38. import syntaxnet.load_parser_ops
  39. flags = tf.app.flags
  40. FLAGS = flags.FLAGS
  41. flags.DEFINE_string('tf_master', '',
  42. 'TensorFlow execution engine to connect to.')
  43. flags.DEFINE_string('dragnn_spec', '', 'Path to the spec defining the model.')
  44. flags.DEFINE_string('resource_path', '', 'Path to constructed resources.')
  45. flags.DEFINE_string('hyperparams',
  46. 'adam_beta1:0.9 adam_beta2:0.9 adam_eps:0.00001 '
  47. 'decay_steps:128000 dropout_rate:0.8 gradient_clip_norm:1 '
  48. 'learning_method:"adam" learning_rate:0.0005 seed:1 '
  49. 'use_moving_average:true',
  50. 'Hyperparameters of the model to train, either in ProtoBuf'
  51. 'text format or base64-encoded ProtoBuf text format.')
  52. flags.DEFINE_string('tensorboard_dir', '',
  53. 'Directory for TensorBoard logs output.')
  54. flags.DEFINE_string('checkpoint_filename', '',
  55. 'Filename to save the best checkpoint to.')
  56. flags.DEFINE_string('training_corpus_path', '', 'Path to training data.')
  57. flags.DEFINE_string('tune_corpus_path', '', 'Path to tuning set data.')
  58. flags.DEFINE_bool('compute_lexicon', False, '')
  59. flags.DEFINE_bool('projectivize_training_set', True, '')
  60. flags.DEFINE_integer('batch_size', 4, 'Batch size.')
  61. flags.DEFINE_integer('report_every', 200,
  62. 'Report cost and training accuracy every this many steps.')
  63. flags.DEFINE_integer('job_id', 0, 'The trainer will clear checkpoints if the '
  64. 'saved job id is less than the id this flag. If you want '
  65. 'training to start over, increment this id.')
  66. def main(unused_argv):
  67. logging.set_verbosity(logging.INFO)
  68. check.IsTrue(FLAGS.checkpoint_filename)
  69. check.IsTrue(FLAGS.tensorboard_dir)
  70. check.IsTrue(FLAGS.resource_path)
  71. if not gfile.IsDirectory(FLAGS.resource_path):
  72. gfile.MakeDirs(FLAGS.resource_path)
  73. training_corpus_path = gfile.Glob(FLAGS.training_corpus_path)[0]
  74. tune_corpus_path = gfile.Glob(FLAGS.tune_corpus_path)[0]
  75. # SummaryWriter for TensorBoard
  76. tf.logging.info('TensorBoard directory: "%s"', FLAGS.tensorboard_dir)
  77. tf.logging.info('Deleting prior data if exists...')
  78. stats_file = '%s.stats' % FLAGS.checkpoint_filename
  79. try:
  80. stats = gfile.GFile(stats_file, 'r').readlines()[0].split(',')
  81. stats = [int(x) for x in stats]
  82. except errors.OpError:
  83. stats = [-1, 0, 0]
  84. tf.logging.info('Read ckpt stats: %s', str(stats))
  85. do_restore = True
  86. if stats[0] < FLAGS.job_id:
  87. do_restore = False
  88. tf.logging.info('Deleting last job: %d', stats[0])
  89. try:
  90. gfile.DeleteRecursively(FLAGS.tensorboard_dir)
  91. gfile.Remove(FLAGS.checkpoint_filename)
  92. except errors.OpError as err:
  93. tf.logging.error('Unable to delete prior files: %s', err)
  94. stats = [FLAGS.job_id, 0, 0]
  95. tf.logging.info('Creating the directory again...')
  96. gfile.MakeDirs(FLAGS.tensorboard_dir)
  97. tf.logging.info('Created! Instatiating SummaryWriter...')
  98. summary_writer = trainer_lib.get_summary_writer(FLAGS.tensorboard_dir)
  99. tf.logging.info('Creating TensorFlow checkpoint dir...')
  100. gfile.MakeDirs(os.path.dirname(FLAGS.checkpoint_filename))
  101. # Constructs lexical resources for SyntaxNet in the given resource path, from
  102. # the training data.
  103. if FLAGS.compute_lexicon:
  104. logging.info('Computing lexicon...')
  105. lexicon.build_lexicon(
  106. FLAGS.resource_path, training_corpus_path, morph_to_pos=True)
  107. tf.logging.info('Loading MasterSpec...')
  108. master_spec = spec_pb2.MasterSpec()
  109. with gfile.FastGFile(FLAGS.dragnn_spec, 'r') as fin:
  110. text_format.Parse(fin.read(), master_spec)
  111. spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_path)
  112. logging.info('Constructed master spec: %s', str(master_spec))
  113. hyperparam_config = spec_pb2.GridPoint()
  114. # Build the TensorFlow graph.
  115. tf.logging.info('Building Graph...')
  116. hyperparam_config = spec_pb2.GridPoint()
  117. try:
  118. text_format.Parse(FLAGS.hyperparams, hyperparam_config)
  119. except text_format.ParseError:
  120. text_format.Parse(base64.b64decode(FLAGS.hyperparams), hyperparam_config)
  121. g = tf.Graph()
  122. with g.as_default():
  123. builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
  124. component_targets = [
  125. spec_pb2.TrainTarget(
  126. name=component.name,
  127. max_index=idx + 1,
  128. unroll_using_oracle=[False] * idx + [True])
  129. for idx, component in enumerate(master_spec.component)
  130. if 'shift-only' not in component.transition_system.registered_name
  131. ]
  132. trainers = [
  133. builder.add_training_from_config(target) for target in component_targets
  134. ]
  135. annotator = builder.add_annotation()
  136. builder.add_saver()
  137. # Read in serialized protos from training data.
  138. training_set = ConllSentenceReader(
  139. training_corpus_path,
  140. projectivize=FLAGS.projectivize_training_set,
  141. morph_to_pos=True).corpus()
  142. tune_set = ConllSentenceReader(
  143. tune_corpus_path, projectivize=False, morph_to_pos=True).corpus()
  144. # Ready to train!
  145. logging.info('Training on %d sentences.', len(training_set))
  146. logging.info('Tuning on %d sentences.', len(tune_set))
  147. pretrain_steps = [10000, 0]
  148. tagger_steps = 100000
  149. train_steps = [tagger_steps, 8 * tagger_steps]
  150. with tf.Session(FLAGS.tf_master, graph=g) as sess:
  151. # Make sure to re-initialize all underlying state.
  152. sess.run(tf.global_variables_initializer())
  153. if do_restore:
  154. tf.logging.info('Restoring from checkpoint...')
  155. builder.saver.restore(sess, FLAGS.checkpoint_filename)
  156. prev_tagger_steps = stats[1]
  157. prev_parser_steps = stats[2]
  158. tf.logging.info('adjusting schedule from steps: %d, %d',
  159. prev_tagger_steps, prev_parser_steps)
  160. pretrain_steps[0] = max(pretrain_steps[0] - prev_tagger_steps, 0)
  161. tf.logging.info('new pretrain steps: %d', pretrain_steps[0])
  162. trainer_lib.run_training(
  163. sess, trainers, annotator, evaluation.parser_summaries, pretrain_steps,
  164. train_steps, training_set, tune_set, tune_set, FLAGS.batch_size,
  165. summary_writer, FLAGS.report_every, builder.saver,
  166. FLAGS.checkpoint_filename, stats)
  167. if __name__ == '__main__':
  168. tf.app.run()