Explorar o código

fixed a bug in sampled_loss(), made compatible for 0.12.0

Arvind Agarwal %!s(int64=8) %!d(string=hai) anos
pai
achega
a38bf8d764
Modificáronse 1 ficheiros con 7 adicións e 7 borrados
  1. 7 7
      tutorials/rnn/translate/seq2seq_model.py

+ 7 - 7
tutorials/rnn/translate/seq2seq_model.py

@@ -100,7 +100,7 @@ class Seq2SeqModel(object):
       b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
       output_projection = (w, b)
 
-      def sampled_loss(labels, inputs):
+      def sampled_loss(inputs,labels):
         labels = tf.reshape(labels, [-1, 1])
         # We need to compute the sampled_softmax_loss using 32bit floats to
         # avoid numerical instabilities.
@@ -120,17 +120,17 @@ class Seq2SeqModel(object):
 
     # Create the internal multi-layer cell for our RNN.
     def single_cell():
-      return tf.contrib.rnn.GRUCell(size)
+      return tf.nn.rnn_cell.GRUCell(size)
     if use_lstm:
       def single_cell():
-        return tf.contrib.rnn.BasicLSTMCell(size)
+        return tf.nn.rnn_cell.BasicLSTMCell(size)
     cell = single_cell()
     if num_layers > 1:
-      cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
+      cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(num_layers)])
 
     # The seq2seq function: we use embedding for the input and attention.
     def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
-      return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
+      return tf.nn.seq2seq.embedding_attention_seq2seq(
           encoder_inputs,
           decoder_inputs,
           cell,
@@ -160,7 +160,7 @@ class Seq2SeqModel(object):
 
     # Training outputs and losses.
     if forward_only:
-      self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
+      self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
           self.encoder_inputs, self.decoder_inputs, targets,
           self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
           softmax_loss_function=softmax_loss_function)
@@ -172,7 +172,7 @@ class Seq2SeqModel(object):
               for output in self.outputs[b]
           ]
     else:
-      self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
+      self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
           self.encoder_inputs, self.decoder_inputs, targets,
           self.target_weights, buckets,
           lambda x, y: seq2seq_f(x, y, False),