ソースを参照

Merge pull request #785 from mrphoenix13/master

tf.contrib.deprecated elimination bugfix for tf version 12
Olivia 8 年 前
コミット
1125de19d5

+ 1 - 1
tutorials/embedding/word2vec.py

@@ -365,7 +365,7 @@ class Word2Vec(object):
       self._word2id[w] = i
     true_logits, sampled_logits = self.forward(examples, labels)
     loss = self.nce_loss(true_logits, sampled_logits)
-    tf.contrib.deprecated.scalar_summary("NCE loss", loss)
+    tf.scalar_summary("NCE loss", loss)
     self._loss = loss
     self.optimize(loss)
 

+ 7 - 7
tutorials/image/cifar10/cifar10.py

@@ -90,8 +90,8 @@ def _activation_summary(x):
   # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
   # session. This helps the clarity of presentation on tensorboard.
   tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
-  tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
-  tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',
+  tf.histogram_summary(tensor_name + '/activations', x)
+  tf.scalar_summary(tensor_name + '/sparsity',
                                        tf.nn.zero_fraction(x))
 
 
@@ -316,8 +316,8 @@ def _add_loss_summaries(total_loss):
   for l in losses + [total_loss]:
     # Name each loss as '(raw)' and name the moving average version of the loss
     # as the original loss name.
-    tf.contrib.deprecated.scalar_summary(l.op.name + ' (raw)', l)
-    tf.contrib.deprecated.scalar_summary(l.op.name, loss_averages.average(l))
+    tf.scalar_summary(l.op.name + ' (raw)', l)
+    tf.scalar_summary(l.op.name, loss_averages.average(l))
 
   return loss_averages_op
 
@@ -345,7 +345,7 @@ def train(total_loss, global_step):
                                   decay_steps,
                                   LEARNING_RATE_DECAY_FACTOR,
                                   staircase=True)
-  tf.contrib.deprecated.scalar_summary('learning_rate', lr)
+  tf.scalar_summary('learning_rate', lr)
 
   # Generate moving averages of all losses and associated summaries.
   loss_averages_op = _add_loss_summaries(total_loss)
@@ -360,12 +360,12 @@ def train(total_loss, global_step):
 
   # Add histograms for trainable variables.
   for var in tf.trainable_variables():
-    tf.contrib.deprecated.histogram_summary(var.op.name, var)
+    tf.histogram_summary(var.op.name, var)
 
   # Add histograms for gradients.
   for grad, var in grads:
     if grad is not None:
-      tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients', grad)
+      tf.histogram_summary(var.op.name + '/gradients', grad)
 
   # Track the moving averages of all trainable variables.
   variable_averages = tf.train.ExponentialMovingAverage(

+ 1 - 1
tutorials/image/cifar10/cifar10_input.py

@@ -132,7 +132,7 @@ def _generate_image_and_label_batch(image, label, min_queue_examples,
         capacity=min_queue_examples + 3 * batch_size)
 
   # Display the training images in the visualizer.
-  tf.contrib.deprecated.image_summary('images', images)
+  tf.image_summary('images', images)
 
   return images, tf.reshape(label_batch, [batch_size])
 

+ 5 - 5
tutorials/image/cifar10/cifar10_multi_gpu_train.py

@@ -93,7 +93,7 @@ def tower_loss(scope):
     # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
     # session. This helps the clarity of presentation on tensorboard.
     loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
-    tf.contrib.deprecated.scalar_summary(loss_name, l)
+    tf.scalar_summary(loss_name, l)
 
   return total_loss
 
@@ -187,13 +187,13 @@ def train():
     grads = average_gradients(tower_grads)
 
     # Add a summary to track the learning rate.
-    summaries.append(tf.contrib.deprecated.scalar_summary('learning_rate', lr))
+    summaries.append(tf.scalar_summary('learning_rate', lr))
 
     # Add histograms for gradients.
     for grad, var in grads:
       if grad is not None:
         summaries.append(
-            tf.contrib.deprecated.histogram_summary(var.op.name + '/gradients',
+            tf.histogram_summary(var.op.name + '/gradients',
                                                     grad))
 
     # Apply the gradients to adjust the shared variables.
@@ -202,7 +202,7 @@ def train():
     # Add histograms for trainable variables.
     for var in tf.trainable_variables():
       summaries.append(
-          tf.contrib.deprecated.histogram_summary(var.op.name, var))
+          tf.histogram_summary(var.op.name, var))
 
     # Track the moving averages of all trainable variables.
     variable_averages = tf.train.ExponentialMovingAverage(
@@ -216,7 +216,7 @@ def train():
     saver = tf.train.Saver(tf.global_variables())
 
     # Build the summary operation from the last tower summaries.
-    summary_op = tf.contrib.deprecated.merge_summary(summaries)
+    summary_op = tf.merge_summary(summaries)
 
     # Build an initialization operation to run below.
     init = tf.global_variables_initializer()

+ 3 - 3
tutorials/rnn/ptb/ptb_word_lm.py

@@ -331,14 +331,14 @@ def main(_):
       train_input = PTBInput(config=config, data=train_data, name="TrainInput")
       with tf.variable_scope("Model", reuse=None, initializer=initializer):
         m = PTBModel(is_training=True, config=config, input_=train_input)
-      tf.contrib.deprecated.scalar_summary("Training Loss", m.cost)
-      tf.contrib.deprecated.scalar_summary("Learning Rate", m.lr)
+      tf.scalar_summary("Training Loss", m.cost)
+      tf.scalar_summary("Learning Rate", m.lr)
 
     with tf.name_scope("Valid"):
       valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
       with tf.variable_scope("Model", reuse=True, initializer=initializer):
         mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
-      tf.contrib.deprecated.scalar_summary("Validation Loss", mvalid.cost)
+      tf.scalar_summary("Validation Loss", mvalid.cost)
 
     with tf.name_scope("Test"):
       test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")