lexicon_builder_test.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. # coding=utf-8
  2. # Copyright 2016 Google Inc. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # ==============================================================================
  16. """Tests for lexicon_builder."""
  17. # disable=no-name-in-module,unused-import,g-bad-import-order,maybe-no-member
  18. import os.path
  19. import tensorflow as tf
  20. import syntaxnet.load_parser_ops
  21. from tensorflow.python.framework import test_util
  22. from tensorflow.python.platform import googletest
  23. from tensorflow.python.platform import tf_logging as logging
  24. from syntaxnet import sentence_pb2
  25. from syntaxnet import task_spec_pb2
  26. from syntaxnet.ops import gen_parser_ops
  27. FLAGS = tf.app.flags.FLAGS
  28. CONLL_DOC1 = u'''1 बात _ n NN _ _ _ _ _
  29. 2 गलत _ adj JJ _ _ _ _ _
  30. 3 हो _ v VM _ _ _ _ _
  31. 4 तो _ avy CC _ _ _ _ _
  32. 5 गुस्सा _ n NN _ _ _ _ _
  33. 6 सेलेब्रिटिज _ n NN _ _ _ _ _
  34. 7 को _ psp PSP _ _ _ _ _
  35. 8 भी _ avy RP _ _ _ _ _
  36. 9 आना _ v VM _ _ _ _ _
  37. 10 लाजमी _ adj JJ _ _ _ _ _
  38. 11 है _ v VM _ _ _ _ _
  39. 12 । _ punc SYM _ _ _ _ _'''
  40. CONLL_DOC2 = u'''1 लेकिन _ avy CC _ _ _ _ _
  41. 2 अभिनेत्री _ n NN _ _ _ _ _
  42. 3 के _ psp PSP _ _ _ _ _
  43. 4 इस _ pn DEM _ _ _ _ _
  44. 5 कदम _ n NN _ _ _ _ _
  45. 6 से _ psp PSP _ _ _ _ _
  46. 7 वहां _ pn PRP _ _ _ _ _
  47. 8 रंग _ n NN _ _ _ _ _
  48. 9 में _ psp PSP _ _ _ _ _
  49. 10 भंग _ adj JJ _ _ _ _ _
  50. 11 पड़ _ v VM _ _ _ _ _
  51. 12 गया _ v VAUX _ _ _ _ _
  52. 13 । _ punc SYM _ _ _ _ _'''
  53. TAGS = ['NN', 'JJ', 'VM', 'CC', 'PSP', 'RP', 'JJ', 'SYM', 'DEM', 'PRP', 'VAUX']
  54. CATEGORIES = ['n', 'adj', 'v', 'avy', 'n', 'psp', 'punc', 'pn']
  55. TOKENIZED_DOCS = u'''बात गलत हो तो गुस्सा सेलेब्रिटिज को भी आना लाजमी है ।
  56. लेकिन अभिनेत्री के इस कदम से वहां रंग में भंग पड़ गया ।
  57. '''
  58. COMMENTS = u'# Line with fake comments.'
  59. class LexiconBuilderTest(test_util.TensorFlowTestCase):
  60. def setUp(self):
  61. if not hasattr(FLAGS, 'test_srcdir'):
  62. FLAGS.test_srcdir = ''
  63. if not hasattr(FLAGS, 'test_tmpdir'):
  64. FLAGS.test_tmpdir = tf.test.get_temp_dir()
  65. self.corpus_file = os.path.join(FLAGS.test_tmpdir, 'documents.conll')
  66. self.context_file = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
  67. def AddInput(self, name, file_pattern, record_format, context):
  68. inp = context.input.add()
  69. inp.name = name
  70. inp.record_format.append(record_format)
  71. inp.part.add().file_pattern = file_pattern
  72. def WriteContext(self, corpus_format):
  73. context = task_spec_pb2.TaskSpec()
  74. self.AddInput('documents', self.corpus_file, corpus_format, context)
  75. for name in ('word-map', 'lcword-map', 'tag-map',
  76. 'category-map', 'label-map', 'prefix-table',
  77. 'suffix-table', 'tag-to-category'):
  78. self.AddInput(name, os.path.join(FLAGS.test_tmpdir, name), '', context)
  79. logging.info('Writing context to: %s', self.context_file)
  80. with open(self.context_file, 'w') as f:
  81. f.write(str(context))
  82. def ReadNextDocument(self, sess, doc_source):
  83. doc_str, last = sess.run(doc_source)
  84. if doc_str:
  85. doc = sentence_pb2.Sentence()
  86. doc.ParseFromString(doc_str[0])
  87. else:
  88. doc = None
  89. return doc, last
  90. def ValidateDocuments(self):
  91. doc_source = gen_parser_ops.document_source(self.context_file, batch_size=1)
  92. with self.test_session() as sess:
  93. logging.info('Reading document1')
  94. doc, last = self.ReadNextDocument(sess, doc_source)
  95. self.assertEqual(len(doc.token), 12)
  96. self.assertEqual(u'लाजमी', doc.token[9].word)
  97. self.assertFalse(last)
  98. logging.info('Reading document2')
  99. doc, last = self.ReadNextDocument(sess, doc_source)
  100. self.assertEqual(len(doc.token), 13)
  101. self.assertEqual(u'भंग', doc.token[9].word)
  102. self.assertFalse(last)
  103. logging.info('Hitting end of the dataset')
  104. doc, last = self.ReadNextDocument(sess, doc_source)
  105. self.assertTrue(doc is None)
  106. self.assertTrue(last)
  107. def ValidateTagToCategoryMap(self):
  108. with file(os.path.join(FLAGS.test_tmpdir, 'tag-to-category'), 'r') as f:
  109. entries = [line.strip().split('\t') for line in f.readlines()]
  110. for tag, category in entries:
  111. self.assertIn(tag, TAGS)
  112. self.assertIn(category, CATEGORIES)
  113. def BuildLexicon(self):
  114. with self.test_session():
  115. gen_parser_ops.lexicon_builder(task_context=self.context_file).run()
  116. def testCoNLLFormat(self):
  117. self.WriteContext('conll-sentence')
  118. logging.info('Writing conll file to: %s', self.corpus_file)
  119. with open(self.corpus_file, 'w') as f:
  120. f.write((CONLL_DOC1 + u'\n\n' + CONLL_DOC2 + u'\n')
  121. .replace(' ', '\t').encode('utf-8'))
  122. self.ValidateDocuments()
  123. self.BuildLexicon()
  124. self.ValidateTagToCategoryMap()
  125. def testCoNLLFormatExtraNewlinesAndComments(self):
  126. self.WriteContext('conll-sentence')
  127. with open(self.corpus_file, 'w') as f:
  128. f.write((u'\n\n\n' + CONLL_DOC1 + u'\n\n\n' + COMMENTS +
  129. u'\n\n' + CONLL_DOC2).replace(' ', '\t').encode('utf-8'))
  130. self.ValidateDocuments()
  131. self.BuildLexicon()
  132. self.ValidateTagToCategoryMap()
  133. def testTokenizedTextFormat(self):
  134. self.WriteContext('tokenized-text')
  135. with open(self.corpus_file, 'w') as f:
  136. f.write(TOKENIZED_DOCS.encode('utf-8'))
  137. self.ValidateDocuments()
  138. self.BuildLexicon()
  139. def testTokenizedTextFormatExtraNewlines(self):
  140. self.WriteContext('tokenized-text')
  141. with open(self.corpus_file, 'w') as f:
  142. f.write((u'\n\n\n' + TOKENIZED_DOCS + u'\n\n\n').encode('utf-8'))
  143. self.ValidateDocuments()
  144. self.BuildLexicon()
  145. if __name__ == '__main__':
  146. googletest.main()