bert_tokenization.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. # coding=utf-8
  2. # Copyright 2018 The Google AI Language Team Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import collections
  20. import re
  21. import unicodedata
  22. import six
  23. def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
  24. """Checks whether the casing config is consistent with the checkpoint name."""
  25. # The casing has to be passed in by the user and there is no explicit check
  26. # as to whether it matches the checkpoint. The casing information probably
  27. # should have been stored in the bert_config.json file, but it's not, so
  28. # we have to heuristically detect it to validate.
  29. if not init_checkpoint:
  30. return
  31. m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
  32. if m is None:
  33. return
  34. model_name = m.group(1)
  35. lower_models = [
  36. "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
  37. "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
  38. ]
  39. cased_models = [
  40. "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
  41. "multi_cased_L-12_H-768_A-12"
  42. ]
  43. is_bad_config = False
  44. if model_name in lower_models and not do_lower_case:
  45. is_bad_config = True
  46. actual_flag = "False"
  47. case_name = "lowercased"
  48. opposite_flag = "True"
  49. if model_name in cased_models and do_lower_case:
  50. is_bad_config = True
  51. actual_flag = "True"
  52. case_name = "cased"
  53. opposite_flag = "False"
  54. if is_bad_config:
  55. raise ValueError(
  56. "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
  57. "However, `%s` seems to be a %s model, so you "
  58. "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
  59. "how the model was pre-training. If this error is wrong, please "
  60. "just comment out this check." % (actual_flag, init_checkpoint,
  61. model_name, case_name, opposite_flag))
  62. def convert_to_unicode(text):
  63. """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
  64. if six.PY3:
  65. if isinstance(text, str):
  66. return text
  67. elif isinstance(text, bytes):
  68. return text.decode("utf-8", "ignore")
  69. else:
  70. raise ValueError("Unsupported string type: %s" % (type(text)))
  71. elif six.PY2:
  72. if isinstance(text, str):
  73. return text.decode("utf-8", "ignore")
  74. elif isinstance(text, unicode):
  75. return text
  76. else:
  77. raise ValueError("Unsupported string type: %s" % (type(text)))
  78. else:
  79. raise ValueError("Not running on Python2 or Python 3?")
  80. def printable_text(text):
  81. """Returns text encoded in a way suitable for print or `tf.logging`."""
  82. # These functions want `str` for both Python2 and Python3, but in one case
  83. # it's a Unicode string and in the other it's a byte string.
  84. if six.PY3:
  85. if isinstance(text, str):
  86. return text
  87. elif isinstance(text, bytes):
  88. return text.decode("utf-8", "ignore")
  89. else:
  90. raise ValueError("Unsupported string type: %s" % (type(text)))
  91. elif six.PY2:
  92. if isinstance(text, str):
  93. return text
  94. elif isinstance(text, unicode):
  95. return text.encode("utf-8")
  96. else:
  97. raise ValueError("Unsupported string type: %s" % (type(text)))
  98. else:
  99. raise ValueError("Not running on Python2 or Python 3?")
  100. def load_vocab(vocab_file):
  101. """Loads a vocabulary file into a dictionary."""
  102. vocab = collections.OrderedDict()
  103. index = 0
  104. with open(vocab_file, "r") as reader:
  105. while True:
  106. token = convert_to_unicode(reader.readline())
  107. if not token:
  108. break
  109. token = token.strip()
  110. vocab[token] = index
  111. index += 1
  112. return vocab
  113. def convert_by_vocab(vocab, items):
  114. """Converts a sequence of [tokens|ids] using the vocab."""
  115. output = []
  116. for item in items:
  117. output.append(vocab[item])
  118. return output
  119. def convert_tokens_to_ids(vocab, tokens):
  120. return convert_by_vocab(vocab, tokens)
  121. def convert_ids_to_tokens(inv_vocab, ids):
  122. return convert_by_vocab(inv_vocab, ids)
  123. def whitespace_tokenize(text):
  124. """Runs basic whitespace cleaning and splitting on a piece of text."""
  125. text = text.strip()
  126. if not text:
  127. return []
  128. tokens = text.split()
  129. return tokens
  130. class FullTokenizer(object):
  131. """Runs end-to-end tokenziation."""
  132. def __init__(self, vocab_file, do_lower_case=True):
  133. self.vocab = load_vocab(vocab_file)
  134. self.inv_vocab = {v: k for k, v in self.vocab.items()}
  135. self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
  136. self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
  137. def tokenize(self, text):
  138. split_tokens = []
  139. for token in self.basic_tokenizer.tokenize(text):
  140. for sub_token in self.wordpiece_tokenizer.tokenize(token):
  141. split_tokens.append(sub_token)
  142. return split_tokens
  143. def convert_tokens_to_ids(self, tokens):
  144. return convert_by_vocab(self.vocab, tokens)
  145. def convert_ids_to_tokens(self, ids):
  146. return convert_by_vocab(self.inv_vocab, ids)
  147. @staticmethod
  148. def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True):
  149. """ Converts a sequence of tokens (string) in a single string. """
  150. def clean_up_tokenization(out_string):
  151. """ Clean up a list of simple English tokenization artifacts
  152. like spaces before punctuations and abreviated forms.
  153. """
  154. out_string = (
  155. out_string.replace(" .", ".")
  156. .replace(" ?", "?")
  157. .replace(" !", "!")
  158. .replace(" ,", ",")
  159. .replace(" ' ", "'")
  160. .replace(" n't", "n't")
  161. .replace(" 'm", "'m")
  162. .replace(" 's", "'s")
  163. .replace(" 've", "'ve")
  164. .replace(" 're", "'re")
  165. )
  166. return out_string
  167. text = ' '.join(tokens).replace(' ##', '').strip()
  168. if clean_up_tokenization_spaces:
  169. clean_text = clean_up_tokenization(text)
  170. return clean_text
  171. else:
  172. return text
  173. def vocab_size(self):
  174. return len(self.vocab)
  175. class BasicTokenizer(object):
  176. """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
  177. def __init__(self, do_lower_case=True):
  178. """Constructs a BasicTokenizer.
  179. Args:
  180. do_lower_case: Whether to lower case the input.
  181. """
  182. self.do_lower_case = do_lower_case
  183. def tokenize(self, text):
  184. """Tokenizes a piece of text."""
  185. text = convert_to_unicode(text)
  186. text = self._clean_text(text)
  187. # This was added on November 1st, 2018 for the multilingual and Chinese
  188. # models. This is also applied to the English models now, but it doesn't
  189. # matter since the English models were not trained on any Chinese data
  190. # and generally don't have any Chinese data in them (there are Chinese
  191. # characters in the vocabulary because Wikipedia does have some Chinese
  192. # words in the English Wikipedia.).
  193. text = self._tokenize_chinese_chars(text)
  194. orig_tokens = whitespace_tokenize(text)
  195. split_tokens = []
  196. for token in orig_tokens:
  197. if self.do_lower_case:
  198. token = token.lower()
  199. token = self._run_strip_accents(token)
  200. split_tokens.extend(self._run_split_on_punc(token))
  201. output_tokens = whitespace_tokenize(" ".join(split_tokens))
  202. return output_tokens
  203. def _run_strip_accents(self, text):
  204. """Strips accents from a piece of text."""
  205. text = unicodedata.normalize("NFD", text)
  206. output = []
  207. for char in text:
  208. cat = unicodedata.category(char)
  209. if cat == "Mn":
  210. continue
  211. output.append(char)
  212. return "".join(output)
  213. def _run_split_on_punc(self, text):
  214. """Splits punctuation on a piece of text."""
  215. chars = list(text)
  216. i = 0
  217. start_new_word = True
  218. output = []
  219. while i < len(chars):
  220. char = chars[i]
  221. if _is_punctuation(char):
  222. output.append([char])
  223. start_new_word = True
  224. else:
  225. if start_new_word:
  226. output.append([])
  227. start_new_word = False
  228. output[-1].append(char)
  229. i += 1
  230. return ["".join(x) for x in output]
  231. def _tokenize_chinese_chars(self, text):
  232. """Adds whitespace around any CJK character."""
  233. output = []
  234. for char in text:
  235. cp = ord(char)
  236. if self._is_chinese_char(cp):
  237. output.append(" ")
  238. output.append(char)
  239. output.append(" ")
  240. else:
  241. output.append(char)
  242. return "".join(output)
  243. def _is_chinese_char(self, cp):
  244. """Checks whether CP is the codepoint of a CJK character."""
  245. # This defines a "chinese character" as anything in the CJK Unicode block:
  246. # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
  247. #
  248. # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
  249. # despite its name. The modern Korean Hangul alphabet is a different block,
  250. # as is Japanese Hiragana and Katakana. Those alphabets are used to write
  251. # space-separated words, so they are not treated specially and handled
  252. # like the all of the other languages.
  253. if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
  254. (cp >= 0x3400 and cp <= 0x4DBF) or #
  255. (cp >= 0x20000 and cp <= 0x2A6DF) or #
  256. (cp >= 0x2A700 and cp <= 0x2B73F) or #
  257. (cp >= 0x2B740 and cp <= 0x2B81F) or #
  258. (cp >= 0x2B820 and cp <= 0x2CEAF) or
  259. (cp >= 0xF900 and cp <= 0xFAFF) or #
  260. (cp >= 0x2F800 and cp <= 0x2FA1F)): #
  261. return True
  262. return False
  263. def _clean_text(self, text):
  264. """Performs invalid character removal and whitespace cleanup on text."""
  265. output = []
  266. for char in text:
  267. cp = ord(char)
  268. if cp == 0 or cp == 0xfffd or _is_control(char):
  269. continue
  270. if _is_whitespace(char):
  271. output.append(" ")
  272. else:
  273. output.append(char)
  274. return "".join(output)
  275. class WordpieceTokenizer(object):
  276. """Runs WordPiece tokenziation."""
  277. def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
  278. self.vocab = vocab
  279. self.unk_token = unk_token
  280. self.max_input_chars_per_word = max_input_chars_per_word
  281. def tokenize(self, text):
  282. """Tokenizes a piece of text into its word pieces.
  283. This uses a greedy longest-match-first algorithm to perform tokenization
  284. using the given vocabulary.
  285. For example:
  286. input = "unaffable"
  287. output = ["un", "##aff", "##able"]
  288. Args:
  289. text: A single token or whitespace separated tokens. This should have
  290. already been passed through `BasicTokenizer.
  291. Returns:
  292. A list of wordpiece tokens.
  293. """
  294. text = convert_to_unicode(text)
  295. output_tokens = []
  296. for token in whitespace_tokenize(text):
  297. chars = list(token)
  298. if len(chars) > self.max_input_chars_per_word:
  299. output_tokens.append(self.unk_token)
  300. continue
  301. is_bad = False
  302. start = 0
  303. sub_tokens = []
  304. while start < len(chars):
  305. end = len(chars)
  306. cur_substr = None
  307. while start < end:
  308. substr = "".join(chars[start:end])
  309. if start > 0:
  310. substr = "##" + substr
  311. if substr in self.vocab:
  312. cur_substr = substr
  313. break
  314. end -= 1
  315. if cur_substr is None:
  316. is_bad = True
  317. break
  318. sub_tokens.append(cur_substr)
  319. start = end
  320. if is_bad:
  321. output_tokens.append(self.unk_token)
  322. else:
  323. output_tokens.extend(sub_tokens)
  324. return output_tokens
  325. def _is_whitespace(char):
  326. """Checks whether `chars` is a whitespace character."""
  327. # \t, \n, and \r are technically contorl characters but we treat them
  328. # as whitespace since they are generally considered as such.
  329. if char == " " or char == "\t" or char == "\n" or char == "\r":
  330. return True
  331. cat = unicodedata.category(char)
  332. if cat == "Zs":
  333. return True
  334. return False
  335. def _is_control(char):
  336. """Checks whether `chars` is a control character."""
  337. # These are technically control characters but we count them as whitespace
  338. # characters.
  339. if char == "\t" or char == "\n" or char == "\r":
  340. return False
  341. cat = unicodedata.category(char)
  342. if cat in ("Cc", "Cf"):
  343. return True
  344. return False
  345. def _is_punctuation(char):
  346. """Checks whether `chars` is a punctuation character."""
  347. cp = ord(char)
  348. # We treat all non-letter/number ASCII as punctuation.
  349. # Characters such as "^", "$", and "`" are not in the Unicode
  350. # Punctuation class but we treat them as punctuation anyways, for
  351. # consistency.
  352. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
  353. (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
  354. return True
  355. cat = unicodedata.category(char)
  356. if cat.startswith("P"):
  357. return True
  358. return False