Browse Source

Upgrade to TF 1.0

Neal Wu 8 years ago
parent
commit
dc97aa0ffb

+ 6 - 4
domain_adaptation/domain_separation/dsn.py

@@ -282,15 +282,17 @@ def add_autoencoders(source_data, source_shared, target_data, target_shared,
 
 
   # Add summaries
   # Add summaries
   source_reconstructions = tf.concat(
   source_reconstructions = tf.concat(
-      map(normalize_images, [
+      axis=2,
+      values=map(normalize_images, [
           source_data, source_recons, source_shared_recons,
           source_data, source_recons, source_shared_recons,
           source_private_recons
           source_private_recons
-      ]), 2)
+      ]))
   target_reconstructions = tf.concat(
   target_reconstructions = tf.concat(
-      map(normalize_images, [
+      axis=2,
+      values=map(normalize_images, [
           target_data, target_recons, target_shared_recons,
           target_data, target_recons, target_shared_recons,
           target_private_recons
           target_private_recons
-      ]), 2)
+      ]))
   tf.summary.image(
   tf.summary.image(
       'Source Images:Recons:RGB',
       'Source Images:Recons:RGB',
       source_reconstructions[:, :, :, :3],
       source_reconstructions[:, :, :, :3],

+ 1 - 1
domain_adaptation/domain_separation/dsn_test.py

@@ -26,7 +26,7 @@ class HelperFunctionsTest(tf.test.TestCase):
     with self.test_session() as sess:
     with self.test_session() as sess:
       # Test for when global_step < domain_separation_startpoint
       # Test for when global_step < domain_separation_startpoint
       step = tf.contrib.slim.get_or_create_global_step()
       step = tf.contrib.slim.get_or_create_global_step()
-      sess.run(tf.initialize_all_variables())  # global_step = 0
+      sess.run(tf.global_variables_initializer())  # global_step = 0
       params = {'domain_separation_startpoint': 2}
       params = {'domain_separation_startpoint': 2}
       weight = dsn.dsn_loss_coefficient(params)
       weight = dsn.dsn_loss_coefficient(params)
       weight_np = sess.run(weight)
       weight_np = sess.run(weight)

+ 7 - 7
domain_adaptation/domain_separation/losses.py

@@ -100,7 +100,7 @@ def mmd_loss(source_samples, target_samples, weight, scope=None):
     tag = 'MMD Loss'
     tag = 'MMD Loss'
     if scope:
     if scope:
       tag = scope + tag
       tag = scope + tag
-    tf.contrib.deprecated.scalar_summary(tag, loss_value)
+    tf.summary.scalar(tag, loss_value)
     tf.losses.add_loss(loss_value)
     tf.losses.add_loss(loss_value)
 
 
   return loss_value
   return loss_value
@@ -135,7 +135,7 @@ def correlation_loss(source_samples, target_samples, weight, scope=None):
     tag = 'Correlation Loss'
     tag = 'Correlation Loss'
     if scope:
     if scope:
       tag = scope + tag
       tag = scope + tag
-    tf.contrib.deprecated.scalar_summary(tag, corr_loss)
+    tf.summary.scalar(tag, corr_loss)
     tf.losses.add_loss(corr_loss)
     tf.losses.add_loss(corr_loss)
 
 
   return corr_loss
   return corr_loss
@@ -155,11 +155,11 @@ def dann_loss(source_samples, target_samples, weight, scope=None):
   """
   """
   with tf.variable_scope('dann'):
   with tf.variable_scope('dann'):
     batch_size = tf.shape(source_samples)[0]
     batch_size = tf.shape(source_samples)[0]
-    samples = tf.concat([source_samples, target_samples], 0)
+    samples = tf.concat(axis=0, values=[source_samples, target_samples])
     samples = slim.flatten(samples)
     samples = slim.flatten(samples)
 
 
     domain_selection_mask = tf.concat(
     domain_selection_mask = tf.concat(
-        [tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], 0)
+        axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))])
 
 
     # Perform the gradient reversal and be careful with the shape.
     # Perform the gradient reversal and be careful with the shape.
     grl = grl_ops.gradient_reversal(samples)
     grl = grl_ops.gradient_reversal(samples)
@@ -184,9 +184,9 @@ def dann_loss(source_samples, target_samples, weight, scope=None):
       tag_loss = scope + tag_loss
       tag_loss = scope + tag_loss
       tag_accuracy = scope + tag_accuracy
       tag_accuracy = scope + tag_accuracy
 
 
-    tf.contrib.deprecated.scalar_summary(
+    tf.summary.scalar(
         tag_loss, domain_loss, name='domain_loss_summary')
         tag_loss, domain_loss, name='domain_loss_summary')
-    tf.contrib.deprecated.scalar_summary(
+    tf.summary.scalar(
         tag_accuracy, domain_accuracy, name='domain_accuracy_summary')
         tag_accuracy, domain_accuracy, name='domain_accuracy_summary')
 
 
   return domain_loss
   return domain_loss
@@ -216,7 +216,7 @@ def difference_loss(private_samples, shared_samples, weight=1.0, name=''):
   cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
   cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
   cost = tf.where(cost > 0, cost, 0, name='value')
   cost = tf.where(cost > 0, cost, 0, name='value')
 
 
-  tf.contrib.deprecated.scalar_summary('losses/Difference Loss {}'.format(name),
+  tf.summary.scalar('losses/Difference Loss {}'.format(name),
                                        cost)
                                        cost)
   assert_op = tf.Assert(tf.is_finite(cost), [cost])
   assert_op = tf.Assert(tf.is_finite(cost), [cost])
   with tf.control_dependencies([assert_op]):
   with tf.control_dependencies([assert_op]):

+ 1 - 1
domain_adaptation/domain_separation/models_test.py

@@ -115,7 +115,7 @@ class DecoderTest(tf.test.TestCase):
           width=width,
           width=width,
           channels=channels,
           channels=channels,
           batch_norm_params=batch_norm_params)
           batch_norm_params=batch_norm_params)
-      sess.run(tf.initialize_all_variables())
+      sess.run(tf.global_variables_initializer())
       output_np = sess.run(output)
       output_np = sess.run(output)
     self.assertEqual(output_np.shape, (32, height, width, channels))
     self.assertEqual(output_np.shape, (32, height, width, channels))
     self.assertTrue(np.any(output_np))
     self.assertTrue(np.any(output_np))

+ 5 - 5
domain_adaptation/domain_separation/utils.py

@@ -75,15 +75,15 @@ def reshape_feature_maps(features_tensor):
       num_filters)
       num_filters)
   num_filters_sqrt = int(num_filters_sqrt)
   num_filters_sqrt = int(num_filters_sqrt)
   conv_summary = tf.unstack(features_tensor, axis=3)
   conv_summary = tf.unstack(features_tensor, axis=3)
-  conv_one_row = tf.concat(conv_summary[0:num_filters_sqrt], 2)
+  conv_one_row = tf.concat(axis=2, values=conv_summary[0:num_filters_sqrt])
   ind = 1
   ind = 1
   conv_final = conv_one_row
   conv_final = conv_one_row
   for ind in range(1, num_filters_sqrt):
   for ind in range(1, num_filters_sqrt):
-    conv_one_row = tf.concat(conv_summary[
-        ind * num_filters_sqrt + 0:ind * num_filters_sqrt + num_filters_sqrt],
-                             2)
+    conv_one_row = tf.concat(axis=2,
+                             values=conv_summary[
+        ind * num_filters_sqrt + 0:ind * num_filters_sqrt + num_filters_sqrt])
     conv_final = tf.concat(
     conv_final = tf.concat(
-        [tf.squeeze(conv_final), tf.squeeze(conv_one_row)], 1)
+        axis=1, values=[tf.squeeze(conv_final), tf.squeeze(conv_one_row)])
     conv_final = tf.expand_dims(conv_final, -1)
     conv_final = tf.expand_dims(conv_final, -1)
   return conv_final
   return conv_final