Quellcode durchsuchen

Additional upgrades to 1.0 and code fixes

Neal Wu vor 8 Jahren
Ursprung
Commit
546fd48ecb

+ 5 - 5
slim/train_image_classifier.py

@@ -394,9 +394,9 @@ def main(_):
 
   tf.logging.set_verbosity(tf.logging.INFO)
   with tf.Graph().as_default():
-    ######################
-    # Config model_deploy#
-    ######################
+    #######################
+    # Config model_deploy #
+    #######################
     deploy_config = model_deploy.DeploymentConfig(
         num_clones=FLAGS.num_clones,
         clone_on_cpu=FLAGS.clone_on_cpu,
@@ -414,9 +414,9 @@ def main(_):
     dataset = dataset_factory.get_dataset(
         FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
 
-    ####################
+    ######################
     # Select the network #
-    ####################
+    ######################
     network_fn = nets_factory.get_network_fn(
         FLAGS.model_name,
         num_classes=(dataset.num_classes - FLAGS.labels_offset),

+ 3 - 3
textsum/seq2seq_attention_model.py

@@ -158,11 +158,11 @@ class Seq2SeqAttentionModel(object):
       for layer_i in xrange(hps.enc_layers):
         with tf.variable_scope('encoder%d'%layer_i), tf.device(
             self._next_device()):
-          cell_fw = tf.nn.rnn_cell.LSTMCell(
+          cell_fw = tf.contrib.rnn.LSTMCell(
               hps.num_hidden,
               initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=123),
               state_is_tuple=False)
-          cell_bw = tf.nn.rnn_cell.LSTMCell(
+          cell_bw = tf.contrib.rnn.LSTMCell(
               hps.num_hidden,
               initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
               state_is_tuple=False)
@@ -188,7 +188,7 @@ class Seq2SeqAttentionModel(object):
           loop_function = _extract_argmax_and_embed(
               embedding, (w, v), update_embedding=False)
 
-        cell = tf.nn.rnn_cell.LSTMCell(
+        cell = tf.contrib.rnn.LSTMCell(
             hps.num_hidden,
             initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
             state_is_tuple=False)

+ 0 - 20
tutorials/rnn/linear.py

@@ -1,20 +0,0 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Import linear python op for backward compatibility."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-raise ImportError("This module is deprecated.  Use tf.contrib.layers.linear.")

+ 0 - 21
tutorials/rnn/rnn.py

@@ -1,21 +0,0 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Import rnn python ops for backward compatibility."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-raise ImportError("This module is deprecated.  Use tf.nn.rnn_* instead.")

+ 0 - 21
tutorials/rnn/rnn_cell.py

@@ -1,21 +0,0 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Import rnn_cell python ops for backward compatibility."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-raise ImportError("This module is deprecated.  Use tf.contrib.rnn instead.")

+ 0 - 22
tutorials/rnn/seq2seq.py

@@ -1,22 +0,0 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Import seq2seq python ops for backward compatibility."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-raise ImportError(
-    "This module is deprecated. Use tf.contrib.legacy_seq2seq instead.")