浏览代码

update comments

Aymeric Damien 9 年之前
父节点
当前提交
d5e3592ce1
共有 3 个文件被更改,包括 78 次插入22 次删除
  1. 24 7
      linear_regression.py
  2. 33 9
      logistic_regression.py
  3. 21 6
      nearest_neighbor.py

+ 24 - 7
linear_regression.py

@@ -1,3 +1,10 @@
+'''
+A linear regression learning algorithm example using TensorFlow library.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
 import tensorflow as tf
 import numpy
 import matplotlib.pyplot as plt
@@ -11,29 +18,38 @@ display_step = 50
 # Training Data
 train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
 train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
+n_samples = train_X.shape[0]
+
+# tf Graph Input
+X = tf.placeholder("float")
+Y = tf.placeholder("float")
 
 # Create Model
+
+# Set model weights
 W = tf.Variable(rng.randn(), name="weight")
 b = tf.Variable(rng.randn(), name="bias")
 
-X = tf.placeholder("float")
-Y = tf.placeholder("float")
-n_samples = train_X.shape[0]
-
-activation = tf.add(tf.mul(X, W), b) #linear
-cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2
+# Construct a linear model
+activation = tf.add(tf.mul(X, W), b)
 
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+# Minimize the squared errors
+cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
 
+# Initializing the variables
 init = tf.initialize_all_variables()
 
+# Launch the graph
 with tf.Session() as sess:
     sess.run(init)
 
+    # Fit all training data
     for epoch in range(training_epochs):
         for (x, y) in zip(train_X, train_Y):
             sess.run(optimizer, feed_dict={X: x, Y: y})
 
+        #Display logs per epoch step
         if epoch % display_step == 0:
             print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
                 "W=", sess.run(W), "b=", sess.run(b)
@@ -41,6 +57,7 @@ with tf.Session() as sess:
     print "Optimization Finished!"
     print "cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess.run(W), "b=", sess.run(b)
 
+    #Graphic display
     plt.plot(train_X, train_Y, 'ro', label='Original data')
     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
     plt.legend()

+ 33 - 9
logistic_regression.py

@@ -1,3 +1,11 @@
+'''
+A logistic regression learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
 # Import MINST data
 import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
@@ -10,33 +18,49 @@ training_epochs = 25
 batch_size = 100
 display_step = 1
 
+# tf Graph Input
+x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
+y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
+
 # Create model
-x = tf.placeholder("float", [None, 784])
-y = tf.placeholder("float", [None,10])
-W = tf.Variable(tf.zeros([784,10]))
+
+# Set model weights
+W = tf.Variable(tf.zeros([784, 10]))
 b = tf.Variable(tf.zeros([10]))
 
-activation = tf.nn.softmax(tf.matmul(x,W) + b) #softmax
-cost = -tf.reduce_sum(y*tf.log(activation)) #cross entropy
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+# Construct model
+activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
 
-# Train
+# Minimize error using cross entropy
+cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
+
+# Initializing the variables
 init = tf.initialize_all_variables()
+
+# Launch the graph
 with tf.Session() as sess:
     sess.run(init)
+
+    # Training cycle
     for epoch in range(training_epochs):
         avg_cost = 0.
         total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
         for i in range(total_batch):
             batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+            # Fit training using batch data
             sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
+            # Compute average loss
             avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
+        # Display logs per epoch step
         if epoch % display_step == 0:
             print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
 
     print "Optimization Finished!"
 
-    # Test trained model
-    correct_prediction = tf.equal(tf.argmax(activation,1), tf.argmax(y,1))
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
+    # Calculate accuracy
     accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
     print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})

+ 21 - 6
nearest_neighbor.py

@@ -1,6 +1,11 @@
 '''
-Nearest Neighbor classification on MNIST with TensorFlow
+A nearest neighbor learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
 '''
+
 import numpy as np
 import tensorflow as tf
 
@@ -8,30 +13,40 @@ import tensorflow as tf
 import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
-#In this example, we limit mnist data
+# In this example, we limit mnist data
 Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
 Xte, Yte = mnist.test.next_batch(200) #200 for testing
 
-#Reshape images to 1D
+# Reshape images to 1D
 Xtr = np.reshape(Xtr, newshape=(-1, 28*28))
 Xte = np.reshape(Xte, newshape=(-1, 28*28))
 
+# tf Graph Input
 xtr = tf.placeholder("float", [None, 784])
 xte = tf.placeholder("float", [784])
 
-#Calculation of L1 Distance
+# Nearest Neighbor calculation using L1 Distance
+# Calculate L1 Distance
 distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
-#Predict: Get min distance index (Nearest neighbor)
+# Predict: Get min distance index (Nearest neighbor)
 pred = tf.arg_min(distance, 0)
 
 accuracy = 0.
+
+# Initializing the variables
 init = tf.initialize_all_variables()
+
+# Launch the graph
 with tf.Session() as sess:
     sess.run(init)
+
+    # loop over test data
     for i in range(len(Xte)):
+        # Get nearest neighbor
         nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})
-        #Get nn class label and compare it to its true label
+        # Get nearest neighbor class label and compare it to its true label
         print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i])
+        # Calculate accuracy
         if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
             accuracy += 1./len(Xte)
     print "Done!"