Преглед на файлове

Merge https://github.com/tensorflow/models

Ivan Bogatyy преди 8 години
родител
ревизия
296d4f65ee
променени са 4 файла, в които са добавени 7 реда и са изтрити 6 реда
  1. 1 1
      inception/inception/slim/README.md
  2. 3 2
      textsum/seq2seq_attention_model.py
  3. 1 1
      tutorials/rnn/ptb/ptb_word_lm.py
  4. 2 2
      tutorials/rnn/translate/seq2seq_model.py

+ 1 - 1
inception/inception/slim/README.md

@@ -319,7 +319,7 @@ their use, consider the following example.
 def MyNewOp(inputs):
   varA = ...
   varB = ...
-  outputs = tf.mul(varA, inputs) + varB
+  outputs = tf.multiply(varA, inputs) + varB
   return outputs
 
 ```

+ 3 - 2
textsum/seq2seq_attention_model.py

@@ -227,8 +227,9 @@ class Seq2SeqAttentionModel(object):
         def sampled_loss_func(inputs, labels):
           with tf.device('/cpu:0'):  # Try gpu.
             labels = tf.reshape(labels, [-1, 1])
-            return tf.nn.sampled_softmax_loss(w_t, v, inputs, labels,
-                                              hps.num_softmax_samples, vsize)
+            return tf.nn.sampled_softmax_loss(
+                weights=w_t, biases=v, labels=labels, inputs=inputs,
+                num_sampled=hps.num_softmax_samples, num_classes=vsize)
 
         if hps.num_softmax_samples != 0 and hps.mode == 'train':
           self._loss = seq2seq_lib.sampled_sequence_loss(

+ 1 - 1
tutorials/rnn/ptb/ptb_word_lm.py

@@ -110,7 +110,7 @@ class PTBModel(object):
     # different than reported in the paper.
     def lstm_cell():
       return tf.contrib.rnn.BasicLSTMCell(
-          size, forget_bias=0.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
+          size, forget_bias=0.0, state_is_tuple=True)
     attn_cell = lstm_cell
     if is_training and config.keep_prob < 1:
       def attn_cell():

+ 2 - 2
tutorials/rnn/translate/seq2seq_model.py

@@ -100,13 +100,13 @@ class Seq2SeqModel(object):
       b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
       output_projection = (w, b)
 
-      def sampled_loss(labels, inputs):
+      def sampled_loss(labels, logits):
         labels = tf.reshape(labels, [-1, 1])
         # We need to compute the sampled_softmax_loss using 32bit floats to
         # avoid numerical instabilities.
         local_w_t = tf.cast(w_t, tf.float32)
         local_b = tf.cast(b, tf.float32)
-        local_inputs = tf.cast(inputs, tf.float32)
+        local_inputs = tf.cast(logits, tf.float32)
         return tf.cast(
             tf.nn.sampled_softmax_loss(
                 weights=local_w_t,