Browse Source

Merge pull request #864 from tensorflow/fix-xent

Fix *_cross_entropy_with_logits calls
Martin Wicke 8 years ago
parent
commit
fc1c9b1e66

+ 1 - 1
differential_privacy/multiple_teachers/deep_cnn.py

@@ -341,7 +341,7 @@ def loss_fun(logits, labels):
   # Calculate the cross entropy between labels and predictions
   labels = tf.cast(labels, tf.int64)
   cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
-      logits, labels, name='cross_entropy_per_example')
+      logits=logits, labels=labels, name='cross_entropy_per_example')
 
   # Calculate the average cross entropy loss across the batch.
   cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')

+ 2 - 2
inception/inception/slim/losses.py

@@ -163,8 +163,8 @@ def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
       smooth_positives = 1.0 - label_smoothing
       smooth_negatives = label_smoothing / num_classes
       one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
-    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
-                                                            one_hot_labels,
+    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
+                                                            labels=one_hot_labels,
                                                             name='xentropy')
     weight = tf.convert_to_tensor(weight,
                                   dtype=logits.dtype.base_dtype,

+ 1 - 1
street/python/vgsl_model.py

@@ -454,7 +454,7 @@ class VGSLImageModel(object):
         self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
         self.labels = tf.reshape(self.labels, [-1])
       cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
-          logits, self.labels, name='xent')
+          logits=logits, labels=self.labels, name='xent')
     else:
       # TODO(rays) Labels need an extra dimension for logistic, so different
       # padding functions are needed, as well as a different loss function.

+ 1 - 1
transformer/cluttered_mnist.py

@@ -123,7 +123,7 @@ y_logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
 
 # %% Define loss/eval/training functions
 cross_entropy = tf.reduce_mean(
-    tf.nn.softmax_cross_entropy_with_logits(y_logits, y))
+    tf.nn.softmax_cross_entropy_with_logits(logits=y_logits, targets=y))
 opt = tf.train.AdamOptimizer()
 optimizer = opt.minimize(cross_entropy)
 grads = opt.compute_gradients(cross_entropy, [b_fc_loc2])

+ 1 - 1
tutorials/image/cifar10/cifar10.py

@@ -286,7 +286,7 @@ def loss(logits, labels):
   # Calculate the average cross entropy loss across the batch.
   labels = tf.cast(labels, tf.int64)
   cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
-      logits, labels, name='cross_entropy_per_example')
+      logits=logits, labels=labels, name='cross_entropy_per_example')
   cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
   tf.add_to_collection('losses', cross_entropy_mean)
 

+ 1 - 1
tutorials/image/mnist/convolutional.py

@@ -228,7 +228,7 @@ def main(_):
   # Training computation: logits + cross-entropy loss.
   logits = model(train_data_node, True)
   loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
-      logits, train_labels_node))
+      labels=train_labels_node, logits=logits))
 
   # L2 regularization for the fully connected parameters.
   regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +