浏览代码

Merge pull request #63 from vra/master

Fix format and some typos
Aymeric Damien 8 年之前
父节点
当前提交
cbebfd6ad2

+ 1 - 1
examples/1_Introduction/helloworld.py

@@ -9,7 +9,7 @@ from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
-#Simple hello world using TensorFlow
+# Simple hello world using TensorFlow
 
 
 # Create a Constant op
 # Create a Constant op
 # The op is added as a node to the default graph.
 # The op is added as a node to the default graph.

+ 2 - 2
examples/2_BasicModels/linear_regression.py

@@ -52,7 +52,7 @@ with tf.Session() as sess:
         for (x, y) in zip(train_X, train_Y):
         for (x, y) in zip(train_X, train_Y):
             sess.run(optimizer, feed_dict={X: x, Y: y})
             sess.run(optimizer, feed_dict={X: x, Y: y})
 
 
-        #Display logs per epoch step
+        # Display logs per epoch step
         if (epoch+1) % display_step == 0:
         if (epoch+1) % display_step == 0:
             c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
             c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
             print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
             print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
@@ -62,7 +62,7 @@ with tf.Session() as sess:
     training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
     training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
     print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
     print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
 
 
-    #Graphic display
+    # Graphic display
     plt.plot(train_X, train_Y, 'ro', label='Original data')
     plt.plot(train_X, train_Y, 'ro', label='Original data')
     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
     plt.legend()
     plt.legend()

+ 1 - 1
examples/2_BasicModels/logistic_regression.py

@@ -11,7 +11,7 @@ from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 1 - 1
examples/2_BasicModels/nearest_neighbor.py

@@ -12,7 +12,7 @@ from __future__ import print_function
 import numpy as np
 import numpy as np
 import tensorflow as tf
 import tensorflow as tf
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 1 - 1
examples/3_NeuralNetworks/autoencoder.py

@@ -15,7 +15,7 @@ import tensorflow as tf
 import numpy as np
 import numpy as np
 import matplotlib.pyplot as plt
 import matplotlib.pyplot as plt
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 4 - 4
examples/3_NeuralNetworks/bidirectional_rnn.py

@@ -1,5 +1,5 @@
 '''
 '''
-A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
+A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
 This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
 This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
 Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
 Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
 
 
@@ -13,12 +13,12 @@ import tensorflow as tf
 from tensorflow.python.ops import rnn, rnn_cell
 from tensorflow.python.ops import rnn, rnn_cell
 import numpy as np
 import numpy as np
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 
 '''
 '''
-To classify images using a bidirectional reccurent neural network, we consider
+To classify images using a bidirectional recurrent neural network, we consider
 every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
 every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
 we will then handle 28 sequences of 28 steps for every sample.
 we will then handle 28 sequences of 28 steps for every sample.
 '''
 '''
@@ -41,7 +41,7 @@ y = tf.placeholder("float", [None, n_classes])
 
 
 # Define weights
 # Define weights
 weights = {
 weights = {
-    # Hidden layer weights => 2*n_hidden because of foward + backward cells
+    # Hidden layer weights => 2*n_hidden because of forward + backward cells
     'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
     'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
 }
 }
 biases = {
 biases = {

+ 1 - 1
examples/3_NeuralNetworks/convolutional_network.py

@@ -11,7 +11,7 @@ from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 3 - 3
examples/3_NeuralNetworks/dynamic_rnn.py

@@ -1,5 +1,5 @@
 '''
 '''
-A Dynamic Reccurent Neural Network (LSTM) implementation example using
+A Dynamic Recurrent Neural Network (LSTM) implementation example using
 TensorFlow library. This example is using a toy dataset to classify linear
 TensorFlow library. This example is using a toy dataset to classify linear
 sequences. The generated sequences have variable length.
 sequences. The generated sequences have variable length.
 
 
@@ -26,7 +26,7 @@ class ToySequenceData(object):
 
 
     NOTICE:
     NOTICE:
     We have to pad each sequence to reach 'max_seq_len' for TensorFlow
     We have to pad each sequence to reach 'max_seq_len' for TensorFlow
-    consistency (we cannot feed a numpy array with unconsistent
+    consistency (we cannot feed a numpy array with inconsistent
     dimensions). The dynamic calculation will then be perform thanks to
     dimensions). The dynamic calculation will then be perform thanks to
     'seqlen' attribute that records every actual sequence length.
     'seqlen' attribute that records every actual sequence length.
     """
     """
@@ -130,7 +130,7 @@ def dynamicRNN(x, seqlen, weights, biases):
                                 sequence_length=seqlen)
                                 sequence_length=seqlen)
 
 
     # When performing dynamic calculation, we must retrieve the last
     # When performing dynamic calculation, we must retrieve the last
-    # dynamically computed output, i.e, if a sequence length is 10, we need
+    # dynamically computed output, i.e., if a sequence length is 10, we need
     # to retrieve the 10th output.
     # to retrieve the 10th output.
     # However TensorFlow doesn't support advanced indexing yet, so we build
     # However TensorFlow doesn't support advanced indexing yet, so we build
     # a custom op that for each sample in batch size, get its length and
     # a custom op that for each sample in batch size, get its length and

+ 1 - 1
examples/3_NeuralNetworks/multilayer_perceptron.py

@@ -9,7 +9,7 @@ Project: https://github.com/aymericdamien/TensorFlow-Examples/
 
 
 from __future__ import print_function
 from __future__ import print_function
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 2 - 2
examples/3_NeuralNetworks/recurrent_network.py

@@ -1,5 +1,5 @@
 '''
 '''
-A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
+A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
 This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
 This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
 Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
 Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
 
 
@@ -18,7 +18,7 @@ from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 
 '''
 '''
-To classify images using a reccurent neural network, we consider every image
+To classify images using a recurrent neural network, we consider every image
 row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
 row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
 handle 28 sequences of 28 steps for every sample.
 handle 28 sequences of 28 steps for every sample.
 '''
 '''

+ 1 - 1
examples/4_Utils/save_restore_model.py

@@ -9,7 +9,7 @@ Project: https://github.com/aymericdamien/TensorFlow-Examples/
 
 
 from __future__ import print_function
 from __future__ import print_function
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 2 - 2
examples/4_Utils/tensorboard_advanced.py

@@ -11,7 +11,7 @@ from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 
@@ -64,7 +64,7 @@ biases = {
 }
 }
 
 
 # Encapsulating all ops into scopes, making Tensorboard's Graph
 # Encapsulating all ops into scopes, making Tensorboard's Graph
-# visualization more convenient
+# Visualization more convenient
 with tf.name_scope('Model'):
 with tf.name_scope('Model'):
     # Build model
     # Build model
     pred = multilayer_perceptron(x, weights, biases)
     pred = multilayer_perceptron(x, weights, biases)

+ 1 - 1
examples/4_Utils/tensorboard_basic.py

@@ -11,7 +11,7 @@ from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
-# Import MINST data
+# Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 

+ 11 - 11
examples/5_MultiGPU/multigpu_basics.py

@@ -18,10 +18,10 @@ import numpy as np
 import tensorflow as tf
 import tensorflow as tf
 import datetime
 import datetime
 
 
-#Processing Units logs
+# Processing Units logs
 log_device_placement = True
 log_device_placement = True
 
 
-#num of multiplications to perform
+# Num of multiplications to perform
 n = 10
 n = 10
 
 
 '''
 '''
@@ -30,11 +30,11 @@ Results on 8 cores with 2 GTX-980:
  * Single GPU computation time: 0:00:11.277449
  * Single GPU computation time: 0:00:11.277449
  * Multi GPU computation time: 0:00:07.131701
  * Multi GPU computation time: 0:00:07.131701
 '''
 '''
-#Create random large matrix
+# Create random large matrix
 A = np.random.rand(1e4, 1e4).astype('float32')
 A = np.random.rand(1e4, 1e4).astype('float32')
 B = np.random.rand(1e4, 1e4).astype('float32')
 B = np.random.rand(1e4, 1e4).astype('float32')
 
 
-# Creates a graph to store results
+# Create a graph to store results
 c1 = []
 c1 = []
 c2 = []
 c2 = []
 
 
@@ -50,7 +50,7 @@ Single GPU computing
 with tf.device('/gpu:0'):
 with tf.device('/gpu:0'):
     a = tf.constant(A)
     a = tf.constant(A)
     b = tf.constant(B)
     b = tf.constant(B)
-    #compute A^n and B^n and store results in c1
+    # Compute A^n and B^n and store results in c1
     c1.append(matpow(a, n))
     c1.append(matpow(a, n))
     c1.append(matpow(b, n))
     c1.append(matpow(b, n))
 
 
@@ -59,7 +59,7 @@ with tf.device('/cpu:0'):
 
 
 t1_1 = datetime.datetime.now()
 t1_1 = datetime.datetime.now()
 with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
 with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
-    # Runs the op.
+    # Run the op.
     sess.run(sum)
     sess.run(sum)
 t2_1 = datetime.datetime.now()
 t2_1 = datetime.datetime.now()
 
 
@@ -67,15 +67,15 @@ t2_1 = datetime.datetime.now()
 '''
 '''
 Multi GPU computing
 Multi GPU computing
 '''
 '''
-#GPU:0 computes A^n
+# GPU:0 computes A^n
 with tf.device('/gpu:0'):
 with tf.device('/gpu:0'):
-    #compute A^n and store result in c2
+    # Compute A^n and store result in c2
     a = tf.constant(A)
     a = tf.constant(A)
     c2.append(matpow(a, n))
     c2.append(matpow(a, n))
 
 
-#GPU:1 computes B^n
+# GPU:1 computes B^n
 with tf.device('/gpu:1'):
 with tf.device('/gpu:1'):
-    #compute B^n and store result in c2
+    # Compute B^n and store result in c2
     b = tf.constant(B)
     b = tf.constant(B)
     c2.append(matpow(b, n))
     c2.append(matpow(b, n))
 
 
@@ -84,7 +84,7 @@ with tf.device('/cpu:0'):
 
 
 t1_2 = datetime.datetime.now()
 t1_2 = datetime.datetime.now()
 with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
 with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
-    # Runs the op.
+    # Run the op.
     sess.run(sum)
     sess.run(sum)
 t2_2 = datetime.datetime.now()
 t2_2 = datetime.datetime.now()