nn_utils.py 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. # Copyright 2016 Google Inc. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Author: aneelakantan (Arvind Neelakantan)
  16. """
  17. import tensorflow as tf
  18. def get_embedding(word, utility, params):
  19. return tf.nn.embedding_lookup(params["word"], word)
  20. def apply_dropout(x, dropout_rate, mode):
  21. if (dropout_rate > 0.0):
  22. if (mode == "train"):
  23. x = tf.nn.dropout(x, dropout_rate)
  24. else:
  25. x = x
  26. return x
  27. def LSTMCell(x, mprev, cprev, key, params):
  28. """Create an LSTM cell.
  29. Implements the equations in pg.2 from
  30. "Long Short-Term Memory Based Recurrent Neural Network Architectures
  31. For Large Vocabulary Speech Recognition",
  32. Hasim Sak, Andrew Senior, Francoise Beaufays.
  33. Args:
  34. w: A dictionary of the weights and optional biases as returned
  35. by LSTMParametersSplit().
  36. x: Inputs to this cell.
  37. mprev: m_{t-1}, the recurrent activations (same as the output)
  38. from the previous cell.
  39. cprev: c_{t-1}, the cell activations from the previous cell.
  40. keep_prob: Keep probability on the input and the outputs of a cell.
  41. Returns:
  42. m: Outputs of this cell.
  43. c: Cell Activations.
  44. """
  45. i = tf.matmul(x, params[key + "_ix"]) + tf.matmul(mprev, params[key + "_im"])
  46. i = tf.nn.bias_add(i, params[key + "_i"])
  47. f = tf.matmul(x, params[key + "_fx"]) + tf.matmul(mprev, params[key + "_fm"])
  48. f = tf.nn.bias_add(f, params[key + "_f"])
  49. c = tf.matmul(x, params[key + "_cx"]) + tf.matmul(mprev, params[key + "_cm"])
  50. c = tf.nn.bias_add(c, params[key + "_c"])
  51. o = tf.matmul(x, params[key + "_ox"]) + tf.matmul(mprev, params[key + "_om"])
  52. o = tf.nn.bias_add(o, params[key + "_o"])
  53. i = tf.sigmoid(i, name="i_gate")
  54. f = tf.sigmoid(f, name="f_gate")
  55. o = tf.sigmoid(o, name="o_gate")
  56. c = f * cprev + i * tf.tanh(c)
  57. m = o * c
  58. return m, c