Przeglądaj źródła

Updated calls to '..._cross_entropy_with_logits' in order to match internal version

Neal Wu 8 lat temu
rodzic
commit
cc1fb66858

+ 2 - 2
tutorials/embedding/word2vec.py

@@ -263,9 +263,9 @@ class Word2Vec(object):
     # cross-entropy(logits, labels)
     opts = self._options
     true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
-        true_logits, tf.ones_like(true_logits))
+        labels=tf.ones_like(true_logits), logits=true_logits)
     sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
-        sampled_logits, tf.zeros_like(sampled_logits))
+        labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
 
     # NCE-loss is the sum of the true and noise (sampled words)
     # contributions, averaged over the batch.

+ 1 - 1
tutorials/image/cifar10/cifar10.py

@@ -286,7 +286,7 @@ def loss(logits, labels):
   # Calculate the average cross entropy loss across the batch.
   labels = tf.cast(labels, tf.int64)
   cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
-      logits=logits, labels=labels, name='cross_entropy_per_example')
+      labels=labels, logits=logits, name='cross_entropy_per_example')
   cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
   tf.add_to_collection('losses', cross_entropy_mean)