浏览代码

Merge pull request #924 from h4ck3rm1k3/master

untie
Neal Wu 8 年之前
父节点
当前提交
5d758ef0f5

+ 1 - 1
differential_privacy/dp_sgd/dp_optimizer/dp_pca.py

@@ -27,7 +27,7 @@ def ComputeDPPrincipalProjection(data, projection_dims,
   Args:
   Args:
     data: the input data, each row is a data vector.
     data: the input data, each row is a data vector.
     projection_dims: the projection dimension.
     projection_dims: the projection dimension.
-    sanitizer: the sanitizer used for acheiving privacy.
+    sanitizer: the sanitizer used for achieving privacy.
     eps_delta: (eps, delta) pair.
     eps_delta: (eps, delta) pair.
     sigma: if not None, use noise sigma; otherwise compute it using
     sigma: if not None, use noise sigma; otherwise compute it using
       eps_delta pair.
       eps_delta pair.

+ 1 - 1
differential_privacy/multiple_teachers/analysis.py

@@ -287,7 +287,7 @@ def main(unused_argv):
   if min(eps_list_nm) == eps_list_nm[-1]:
   if min(eps_list_nm) == eps_list_nm[-1]:
     print "Warning: May not have used enough values of l"
     print "Warning: May not have used enough values of l"
 
 
-  # Data indpendent bound, as mechanism is
+  # Data independent bound, as mechanism is
   # 2*noise_eps DP.
   # 2*noise_eps DP.
   data_ind_log_mgf = np.array([0.0 for _ in l_list])
   data_ind_log_mgf = np.array([0.0 for _ in l_list])
   data_ind_log_mgf += num_examples * np.array(
   data_ind_log_mgf += num_examples * np.array(

+ 2 - 2
differential_privacy/multiple_teachers/deep_cnn.py

@@ -84,7 +84,7 @@ def inference(images, dropout=False):
   """Build the CNN model.
   """Build the CNN model.
   Args:
   Args:
     images: Images returned from distorted_inputs() or inputs().
     images: Images returned from distorted_inputs() or inputs().
-    dropout: Boolean controling whether to use dropout or not
+    dropout: Boolean controlling whether to use dropout or not
   Returns:
   Returns:
     Logits
     Logits
   """
   """
@@ -194,7 +194,7 @@ def inference_deeper(images, dropout=False):
   """Build a deeper CNN model.
   """Build a deeper CNN model.
   Args:
   Args:
     images: Images returned from distorted_inputs() or inputs().
     images: Images returned from distorted_inputs() or inputs().
-    dropout: Boolean controling whether to use dropout or not
+    dropout: Boolean controlling whether to use dropout or not
   Returns:
   Returns:
     Logits
     Logits
   """
   """

+ 1 - 1
differential_privacy/privacy_accountant/tf/accountant.py

@@ -152,7 +152,7 @@ class MomentsAccountant(object):
   We further assume that at each step, the mechanism operates on a random
   We further assume that at each step, the mechanism operates on a random
   sample with sampling probability q = batch_size / total_examples. Then
   sample with sampling probability q = batch_size / total_examples. Then
     E[exp(L X)] = E[(Pr[M(D)==x / Pr[M(D')==x])^L]
     E[exp(L X)] = E[(Pr[M(D)==x / Pr[M(D')==x])^L]
-  By distinguishign two cases of wether D < D' or D' < D, we have
+  By distinguishing two cases of whether D < D' or D' < D, we have
   that
   that
     E[exp(L X)] <= max (I1, I2)
     E[exp(L X)] <= max (I1, I2)
   where
   where

+ 1 - 1
im2txt/im2txt/data/build_mscoco_data.py

@@ -424,7 +424,7 @@ def _load_and_process_metadata(captions_file, image_dir):
         (len(id_to_filename), captions_file))
         (len(id_to_filename), captions_file))
 
 
   # Process the captions and combine the data into a list of ImageMetadata.
   # Process the captions and combine the data into a list of ImageMetadata.
-  print("Proccessing captions.")
+  print("Processing captions.")
   image_metadata = []
   image_metadata = []
   num_captions = 0
   num_captions = 0
   for image_id, base_filename in id_to_filename:
   for image_id, base_filename in id_to_filename:

+ 1 - 1
inception/inception/inception_distributed_train.py

@@ -89,7 +89,7 @@ RMSPROP_EPSILON = 1.0              # Epsilon term for RMSProp.
 
 
 def train(target, dataset, cluster_spec):
 def train(target, dataset, cluster_spec):
   """Train Inception on a dataset for a number of steps."""
   """Train Inception on a dataset for a number of steps."""
-  # Number of workers and parameter servers are infered from the workers and ps
+  # Number of workers and parameter servers are inferred from the workers and ps
   # hosts string.
   # hosts string.
   num_workers = len(cluster_spec.as_dict()['worker'])
   num_workers = len(cluster_spec.as_dict()['worker'])
   num_parameter_servers = len(cluster_spec.as_dict()['ps'])
   num_parameter_servers = len(cluster_spec.as_dict()['ps'])

+ 1 - 1
inception/inception/inception_eval.py

@@ -77,7 +77,7 @@ def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
       #   /my-favorite-path/imagenet_train/model.ckpt-0,
       #   /my-favorite-path/imagenet_train/model.ckpt-0,
       # extract global_step from it.
       # extract global_step from it.
       global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
       global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
-      print('Succesfully loaded model from %s at step=%s.' %
+      print('Successfully loaded model from %s at step=%s.' %
             (ckpt.model_checkpoint_path, global_step))
             (ckpt.model_checkpoint_path, global_step))
     else:
     else:
       print('No checkpoint file found')
       print('No checkpoint file found')

+ 1 - 1
inception/inception/inception_train.py

@@ -290,7 +290,7 @@ def train(dataset):
     variable_averages = tf.train.ExponentialMovingAverage(
     variable_averages = tf.train.ExponentialMovingAverage(
         inception.MOVING_AVERAGE_DECAY, global_step)
         inception.MOVING_AVERAGE_DECAY, global_step)
 
 
-    # Another possiblility is to use tf.slim.get_variables().
+    # Another possibility is to use tf.slim.get_variables().
     variables_to_average = (tf.trainable_variables() +
     variables_to_average = (tf.trainable_variables() +
                             tf.moving_average_variables())
                             tf.moving_average_variables())
     variables_averages_op = variable_averages.apply(variables_to_average)
     variables_averages_op = variable_averages.apply(variables_to_average)

+ 1 - 1
inception/inception/slim/ops.py

@@ -15,7 +15,7 @@
 """Contains convenience wrappers for typical Neural Network TensorFlow layers.
 """Contains convenience wrappers for typical Neural Network TensorFlow layers.
 
 
    Additionally it maintains a collection with update_ops that need to be
    Additionally it maintains a collection with update_ops that need to be
-   updated after the ops have been computed, for exmaple to update moving means
+   updated after the ops have been computed, for example to update moving means
    and moving variances of batch_norm.
    and moving variances of batch_norm.
 
 
    Ops that have different behavior during training or eval have an is_training
    Ops that have different behavior during training or eval have an is_training

+ 1 - 1
namignizer/data_utils.py

@@ -58,7 +58,7 @@ def _letter_to_number(letter):
 def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
 def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
     """Takes a list of names and counts like those output from read_names, and
     """Takes a list of names and counts like those output from read_names, and
     makes an iterator yielding a batch_size by num_steps array of random names
     makes an iterator yielding a batch_size by num_steps array of random names
-    separated by an end of name token. The names are choosen randomly according
+    separated by an end of name token. The names are chosen randomly according
     to their counts. The batch may end mid-name
     to their counts. The batch may end mid-name
 
 
     Args:
     Args:

+ 1 - 1
namignizer/names.py

@@ -14,7 +14,7 @@
 """A library showing off sequence recognition and generation with the simple
 """A library showing off sequence recognition and generation with the simple
 example of names.
 example of names.
 
 
-We use recurrent neural nets to learn complex functions able to recogize and
+We use recurrent neural nets to learn complex functions able to recognize and
 generate sequences of a given form. This can be used for natural language
 generate sequences of a given form. This can be used for natural language
 syntax recognition, dynamically generating maps or puzzles and of course
 syntax recognition, dynamically generating maps or puzzles and of course
 baby name generation.
 baby name generation.

+ 1 - 1
neural_programmer/data_utils.py

@@ -223,7 +223,7 @@ def list_join(a):
 
 
 
 
 def group_by_max(table, number):
 def group_by_max(table, number):
-  #computes the most frequently occuring entry in a column
+  #computes the most frequently occurring entry in a column
   answer = []
   answer = []
   for i in range(len(table)):
   for i in range(len(table)):
     temp = []
     temp = []

+ 1 - 1
neural_programmer/model.py

@@ -135,7 +135,7 @@ class Graph():
     #Attention on quetsion to decide the question number to passed to comparison ops
     #Attention on quetsion to decide the question number to passed to comparison ops
     def compute_ans(op_embedding, comparison):
     def compute_ans(op_embedding, comparison):
       op_embedding = tf.expand_dims(op_embedding, 0)
       op_embedding = tf.expand_dims(op_embedding, 0)
-      #dot product of operation embedding with hidden state to the left of the number occurence
+      #dot product of operation embedding with hidden state to the left of the number occurrence
       first = tf.transpose(
       first = tf.transpose(
           tf.matmul(op_embedding,
           tf.matmul(op_embedding,
                     tf.transpose(
                     tf.transpose(

+ 1 - 1
slim/deployment/model_deploy.py

@@ -304,7 +304,7 @@ def optimize_clones(clones, optimizer,
       regularization_losses = None
       regularization_losses = None
   # Compute the total_loss summing all the clones_losses.
   # Compute the total_loss summing all the clones_losses.
   total_loss = tf.add_n(clones_losses, name='total_loss')
   total_loss = tf.add_n(clones_losses, name='total_loss')
-  # Sum the gradients accross clones.
+  # Sum the gradients across clones.
   grads_and_vars = _sum_clones_gradients(grads_and_vars)
   grads_and_vars = _sum_clones_gradients(grads_and_vars)
   return total_loss, grads_and_vars
   return total_loss, grads_and_vars
 
 

+ 1 - 1
slim/nets/inception_resnet_v2.py

@@ -191,7 +191,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
         end_points['Mixed_6a'] = net
         end_points['Mixed_6a'] = net
         net = slim.repeat(net, 20, block17, scale=0.10)
         net = slim.repeat(net, 20, block17, scale=0.10)
 
 
-        # Auxillary tower
+        # Auxiliary tower
         with tf.variable_scope('AuxLogits'):
         with tf.variable_scope('AuxLogits'):
           aux = slim.avg_pool2d(net, 5, stride=3, padding='VALID',
           aux = slim.avg_pool2d(net, 5, stride=3, padding='VALID',
                                 scope='Conv2d_1a_3x3')
                                 scope='Conv2d_1a_3x3')

+ 1 - 1
slim/nets/inception_v4.py

@@ -269,7 +269,7 @@ def inception_v4(inputs, num_classes=1001, is_training=True,
     reuse: whether or not the network and its variables should be reused. To be
     reuse: whether or not the network and its variables should be reused. To be
       able to reuse 'scope' must be given.
       able to reuse 'scope' must be given.
     scope: Optional variable_scope.
     scope: Optional variable_scope.
-    create_aux_logits: Whether to include the auxilliary logits.
+    create_aux_logits: Whether to include the auxiliary logits.
 
 
   Returns:
   Returns:
     logits: the logits outputs of the model.
     logits: the logits outputs of the model.