Parcourir la source

Examples major update

aymericdamien il y a 9 ans
Parent
commit
998ba46963
44 fichiers modifiés avec 2662 ajouts et 3615 suppressions
  1. 17 18
      README.md
  2. 0 0
      examples/1_Introduction/basic_operations.py
  3. 2 1
      examples/1 - Introduction/helloworld.py
  4. 0 144
      examples/2 - Basic Classifiers/input_data.py
  5. 27 20
      examples/2 - Basic Classifiers/linear_regression.py
  6. 18 17
      examples/2 - Basic Classifiers/logistic_regression.py
  7. 7 10
      examples/2 - Basic Classifiers/nearest_neighbor.py
  8. 0 133
      examples/3 - Neural Networks/alexnet.py
  9. 0 163
      examples/3 - Neural Networks/bidirectional_rnn.py
  10. 0 112
      examples/3 - Neural Networks/convolutional_network.py
  11. 0 144
      examples/3 - Neural Networks/input_data.py
  12. 21 39
      examples/3 - Neural Networks/autoencoder.py
  13. 116 0
      examples/3_NeuralNetworks/bidirectional_rnn.py
  14. 132 0
      examples/3_NeuralNetworks/convolutional_network.py
  15. 25 16
      examples/3 - Neural Networks/multilayer_perceptron.py
  16. 39 40
      examples/3 - Neural Networks/recurrent_network.py
  17. 137 0
      examples/4_Utils/save_restore_model.py
  18. 1 0
      examples/4_Utils/tensorboard_advanced.py
  19. 93 0
      examples/4_Utils/tensorboard_basic.py
  20. 0 78
      examples/5 - User Interface/graph_visualization.py
  21. 0 86
      examples/5 - User Interface/loss_visualization.py
  22. 0 0
      examples/5_MultiGPU/multigpu_basics.py
  23. 2 2
      notebooks/1 - Introduction/basic_operations.ipynb
  24. 2 2
      notebooks/1 - Introduction/helloworld.ipynb
  25. 0 144
      notebooks/2 - Basic Classifiers/input_data.py
  26. 0 253
      notebooks/2 - Basic Classifiers/linear_regression.ipynb
  27. 202 0
      notebooks/2_BasicModels/linear_regression.ipynb
  28. 55 111
      notebooks/2 - Basic Classifiers/logistic_regression.ipynb
  29. 20 78
      notebooks/2 - Basic Classifiers/nearest_neighbor.ipynb
  30. 0 348
      notebooks/3 - Neural Networks/alexnet.ipynb
  31. 0 350
      notebooks/3 - Neural Networks/bidirectional_rnn.ipynb
  32. 0 324
      notebooks/3 - Neural Networks/convolutional_network.ipynb
  33. 0 144
      notebooks/3 - Neural Networks/input_data.py
  34. 0 299
      notebooks/3 - Neural Networks/reccurent_network.ipynb
  35. 226 0
      notebooks/3_Neural Networks/autoencoder.ipynb
  36. 293 0
      notebooks/3_Neural Networks/bidirectional_rnn.ipynb
  37. 387 0
      notebooks/3_Neural Networks/convolutional_network.ipynb
  38. 66 115
      notebooks/3 - Neural Networks/multilayer_perceptron.ipynb
  39. 289 0
      notebooks/3_Neural Networks/recurrent_network.ipynb
  40. 271 0
      notebooks/4_Utils/save_restore_model.ipynb
  41. 212 0
      notebooks/4_Utils/tensorboard_basic.ipynb
  42. 0 226
      notebooks/5 - User Interface/graph_visualization.ipynb
  43. 0 196
      notebooks/5 - User Interface/loss_visualization.ipynb
  44. 2 2
      notebooks/4 - Multi GPU/multigpu_basics.ipynb

+ 17 - 18
README.md

@@ -4,31 +4,30 @@ Code examples for some popular machine learning algorithms, using TensorFlow lib
 ## Tutorial index
 
 #### 1 - Introduction
-- Hello World ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1%20-%20Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1%20-%20Introduction/helloworld.py))
-- Basic Operations ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1%20-%20Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1%20-%20Introduction/basic_operations.py))
+- Hello World ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/helloworld.py))
+- Basic Operations ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_operations.py))
 
 #### 2 - Basic Classifiers
-- Nearest Neighbor ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2%20-%20Basic%20Classifiers/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2%20-%20Basic%20Classifiers/nearest_neighbor.py))
-- Linear Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2%20-%20Basic%20Classifiers/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2%20-%20Basic%20Classifiers/linear_regression.py))
-- Logistic Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2%20-%20Basic%20Classifiers/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2%20-%20Basic%20Classifiers/logistic_regression.py))
+- Nearest Neighbor ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/nearest_neighbor.py))
+- Linear Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py))
+- Logistic Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/logistic_regression.py))
 
 #### 3 - Neural Networks
-- Multilayer Perceptron ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3%20-%20Neural%20Networks/multilayer_perceptron.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/multilayer_perceptron.py))
-- Convolutional Neural Network ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3%20-%20Neural%20Networks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/convolutional_network.py))
-- AlexNet ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3%20-%20Neural%20Networks/alexnet.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/alexnet.py))
-- Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3%20-%20Neural%20Networks/reccurent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/recurrent_network.py))
-- Bidirectional Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3%20-%20Neural%20Networks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/bidirectional_rnn.py))
-- AutoEncoder ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3%20-%20Neural%20Networks/autoencoder.py))
+- Multilayer Perceptron ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/multilayer_perceptron.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/multilayer_perceptron.py))
+- Convolutional Neural Network ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py))
+- AlexNet ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/alexnet.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/alexnet.py))
+- Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/reccurent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py))
+- Bidirectional Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py))
+- AutoEncoder ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/autoencoder.ipynb)) / ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py))
 
-#### 4 - Multi GPU
-- Basic Operations on multi-GPU ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4%20-%20Multi%20GPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4%20-%20Multi%20GPU/multigpu_basics.py))
+#### 4 - Utils
+- Save and Restore a model ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/save_restore_model.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/save_restore_model.py))
+- Tensorboard - Graph and loss visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/tensorboard_basic.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/tensorboard_basic.py))
 
-#### 5 - User Interface (Tensorboard)
-- Graph Visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5%20-%20User%20Interface/graph_visualization.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5%20-%20User%20Interface/graph_visualization.py))
-- Loss Visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5%20-%20User%20Interface/loss_visualization.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5%20-%20User%20Interface/loss_visualization.py))
+#### 5 - Multi GPU (Tensorboard)
+- Basic Operations on multi-GPU ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_MultiGPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5_MultiGPU/multigpu_basics.py))
 
-
-## More Examples
+## Going further - More Examples
 The following examples are coming from [TFLearn](https://github.com/tflearn/tflearn), a library that provides a simplified interface for TensorFlow. You can have a look, there are many [examples](https://github.com/tflearn/tflearn/tree/master/examples) and [pre-built operations and layers](http://tflearn.org/doc_index/#api).
 
 #### Basics

examples/1 - Introduction/basic_operations.py → examples/1_Introduction/basic_operations.py


+ 2 - 1
examples/1 - Introduction/helloworld.py

@@ -19,4 +19,5 @@ hello = tf.constant('Hello, TensorFlow!')
 # Start tf session
 sess = tf.Session()
 
-print sess.run(hello)
+# Run the op
+print sess.run(hello)

+ 0 - 144
examples/2 - Basic Classifiers/input_data.py

@@ -1,144 +0,0 @@
-"""Functions for downloading and reading MNIST data."""
-from __future__ import print_function
-import gzip
-import os
-import urllib
-import numpy
-SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
-def maybe_download(filename, work_directory):
-  """Download the data from Yann's website, unless it's already here."""
-  if not os.path.exists(work_directory):
-    os.mkdir(work_directory)
-  filepath = os.path.join(work_directory, filename)
-  if not os.path.exists(filepath):
-    filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
-    statinfo = os.stat(filepath)
-    print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
-  return filepath
-def _read32(bytestream):
-  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
-  return numpy.frombuffer(bytestream.read(4), dtype=dt)
-def extract_images(filename):
-  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2051:
-      raise ValueError(
-          'Invalid magic number %d in MNIST image file: %s' %
-          (magic, filename))
-    num_images = _read32(bytestream)
-    rows = _read32(bytestream)
-    cols = _read32(bytestream)
-    buf = bytestream.read(rows * cols * num_images)
-    data = numpy.frombuffer(buf, dtype=numpy.uint8)
-    data = data.reshape(num_images, rows, cols, 1)
-    return data
-def dense_to_one_hot(labels_dense, num_classes=10):
-  """Convert class labels from scalars to one-hot vectors."""
-  num_labels = labels_dense.shape[0]
-  index_offset = numpy.arange(num_labels) * num_classes
-  labels_one_hot = numpy.zeros((num_labels, num_classes))
-  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
-  return labels_one_hot
-def extract_labels(filename, one_hot=False):
-  """Extract the labels into a 1D uint8 numpy array [index]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2049:
-      raise ValueError(
-          'Invalid magic number %d in MNIST label file: %s' %
-          (magic, filename))
-    num_items = _read32(bytestream)
-    buf = bytestream.read(num_items)
-    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
-    if one_hot:
-      return dense_to_one_hot(labels)
-    return labels
-class DataSet(object):
-  def __init__(self, images, labels, fake_data=False):
-    if fake_data:
-      self._num_examples = 10000
-    else:
-      assert images.shape[0] == labels.shape[0], (
-          "images.shape: %s labels.shape: %s" % (images.shape,
-                                                 labels.shape))
-      self._num_examples = images.shape[0]
-      # Convert shape from [num examples, rows, columns, depth]
-      # to [num examples, rows*columns] (assuming depth == 1)
-      assert images.shape[3] == 1
-      images = images.reshape(images.shape[0],
-                              images.shape[1] * images.shape[2])
-      # Convert from [0, 255] -> [0.0, 1.0].
-      images = images.astype(numpy.float32)
-      images = numpy.multiply(images, 1.0 / 255.0)
-    self._images = images
-    self._labels = labels
-    self._epochs_completed = 0
-    self._index_in_epoch = 0
-  @property
-  def images(self):
-    return self._images
-  @property
-  def labels(self):
-    return self._labels
-  @property
-  def num_examples(self):
-    return self._num_examples
-  @property
-  def epochs_completed(self):
-    return self._epochs_completed
-  def next_batch(self, batch_size, fake_data=False):
-    """Return the next `batch_size` examples from this data set."""
-    if fake_data:
-      fake_image = [1.0 for _ in xrange(784)]
-      fake_label = 0
-      return [fake_image for _ in xrange(batch_size)], [
-          fake_label for _ in xrange(batch_size)]
-    start = self._index_in_epoch
-    self._index_in_epoch += batch_size
-    if self._index_in_epoch > self._num_examples:
-      # Finished epoch
-      self._epochs_completed += 1
-      # Shuffle the data
-      perm = numpy.arange(self._num_examples)
-      numpy.random.shuffle(perm)
-      self._images = self._images[perm]
-      self._labels = self._labels[perm]
-      # Start next epoch
-      start = 0
-      self._index_in_epoch = batch_size
-      assert batch_size <= self._num_examples
-    end = self._index_in_epoch
-    return self._images[start:end], self._labels[start:end]
-def read_data_sets(train_dir, fake_data=False, one_hot=False):
-  class DataSets(object):
-    pass
-  data_sets = DataSets()
-  if fake_data:
-    data_sets.train = DataSet([], [], fake_data=True)
-    data_sets.validation = DataSet([], [], fake_data=True)
-    data_sets.test = DataSet([], [], fake_data=True)
-    return data_sets
-  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
-  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
-  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
-  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
-  VALIDATION_SIZE = 5000
-  local_file = maybe_download(TRAIN_IMAGES, train_dir)
-  train_images = extract_images(local_file)
-  local_file = maybe_download(TRAIN_LABELS, train_dir)
-  train_labels = extract_labels(local_file, one_hot=one_hot)
-  local_file = maybe_download(TEST_IMAGES, train_dir)
-  test_images = extract_images(local_file)
-  local_file = maybe_download(TEST_LABELS, train_dir)
-  test_labels = extract_labels(local_file, one_hot=one_hot)
-  validation_images = train_images[:VALIDATION_SIZE]
-  validation_labels = train_labels[:VALIDATION_SIZE]
-  train_images = train_images[VALIDATION_SIZE:]
-  train_labels = train_labels[VALIDATION_SIZE:]
-  data_sets.train = DataSet(train_images, train_labels)
-  data_sets.validation = DataSet(validation_images, validation_labels)
-  data_sets.test = DataSet(test_images, test_labels)
-  return data_sets

+ 27 - 20
examples/2 - Basic Classifiers/linear_regression.py

@@ -12,30 +12,31 @@ rng = numpy.random
 
 # Parameters
 learning_rate = 0.01
-training_epochs = 2000
+training_epochs = 1000
 display_step = 50
 
 # Training Data
-train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
-train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
+train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
+                         7.042,10.791,5.313,7.997,5.654,9.27,3.1])
+train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
+                         2.827,3.465,1.65,2.904,2.42,2.94,1.3])
 n_samples = train_X.shape[0]
 
 # tf Graph Input
 X = tf.placeholder("float")
 Y = tf.placeholder("float")
 
-# Create Model
-
 # Set model weights
 W = tf.Variable(rng.randn(), name="weight")
 b = tf.Variable(rng.randn(), name="bias")
 
 # Construct a linear model
-activation = tf.add(tf.mul(X, W), b)
+pred = tf.add(tf.mul(X, W), b)
 
-# Minimize the squared errors
-cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
+# Mean squared error
+cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
+# Gradient descent
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 
 # Initializing the variables
 init = tf.initialize_all_variables()
@@ -50,28 +51,34 @@ with tf.Session() as sess:
             sess.run(optimizer, feed_dict={X: x, Y: y})
 
         #Display logs per epoch step
-        if epoch % display_step == 0:
-            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
+        if (epoch+1) % display_step == 0:
+            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
+            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
                 "W=", sess.run(W), "b=", sess.run(b)
 
     print "Optimization Finished!"
     training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
     print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
 
+    #Graphic display
+    plt.plot(train_X, train_Y, 'ro', label='Original data')
+    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
+    plt.legend()
+    plt.show()
 
     # Testing example, as requested (Issue #2)
-    test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
-    test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
+    test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
+    test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
 
-    print "Testing... (L2 loss Comparison)"
-    testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
-                            feed_dict={X: test_X, Y: test_Y}) #same function as cost above
+    print "Testing... (Mean square loss Comparison)"
+    testing_cost = sess.run(
+        tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
+        feed_dict={X: test_X, Y: test_Y})  # same function as cost above
     print "Testing cost=", testing_cost
-    print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
+    print "Absolute mean square loss difference:", abs(
+        training_cost - testing_cost)
 
-    #Graphic display
-    plt.plot(train_X, train_Y, 'ro', label='Original data')
     plt.plot(test_X, test_Y, 'bo', label='Testing data')
     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
     plt.legend()
-    plt.show()
+    plt.show()

+ 18 - 17
examples/2 - Basic Classifiers/logistic_regression.py

@@ -1,17 +1,18 @@
 '''
 A logistic regression learning algorithm example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
 
 Author: Aymeric Damien
 Project: https://github.com/aymericdamien/TensorFlow-Examples/
 '''
 
+import tensorflow as tf
+
 # Import MINST data
-import input_data
+from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
-import tensorflow as tf
-
 # Parameters
 learning_rate = 0.01
 training_epochs = 25
@@ -19,21 +20,20 @@ batch_size = 100
 display_step = 1
 
 # tf Graph Input
-x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
-y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
-
-# Create model
+x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
+y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
 
 # Set model weights
 W = tf.Variable(tf.zeros([784, 10]))
 b = tf.Variable(tf.zeros([10]))
 
 # Construct model
-activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
+pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
 
 # Minimize error using cross entropy
-cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(activation), reduction_indices=1)) # Cross entropy
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
+cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
+# Gradient Descent
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 
 # Initializing the variables
 init = tf.initialize_all_variables()
@@ -49,18 +49,19 @@ with tf.Session() as sess:
         # Loop over all batches
         for i in range(total_batch):
             batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-            # Fit training using batch data
-            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
+                                                          y: batch_ys})
             # Compute average loss
-            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
+            avg_cost += c / total_batch
         # Display logs per epoch step
-        if epoch % display_step == 0:
+        if (epoch+1) % display_step == 0:
             print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
 
     print "Optimization Finished!"
 
     # Test model
-    correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
     # Calculate accuracy
-    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
     print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})

+ 7 - 10
examples/2 - Basic Classifiers/nearest_neighbor.py

@@ -1,6 +1,7 @@
 '''
 A nearest neighbor learning algorithm example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
 
 Author: Aymeric Damien
 Project: https://github.com/aymericdamien/TensorFlow-Examples/
@@ -10,17 +11,13 @@ import numpy as np
 import tensorflow as tf
 
 # Import MINST data
-import input_data
+from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 # In this example, we limit mnist data
 Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
 Xte, Yte = mnist.test.next_batch(200) #200 for testing
 
-# Reshape images to 1D
-Xtr = np.reshape(Xtr, newshape=(-1, 28*28))
-Xte = np.reshape(Xte, newshape=(-1, 28*28))
-
 # tf Graph Input
 xtr = tf.placeholder("float", [None, 784])
 xte = tf.placeholder("float", [784])
@@ -28,7 +25,7 @@ xte = tf.placeholder("float", [784])
 # Nearest Neighbor calculation using L1 Distance
 # Calculate L1 Distance
 distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
-# Predict: Get min distance index (Nearest neighbor)
+# Prediction: Get min distance index (Nearest neighbor)
 pred = tf.arg_min(distance, 0)
 
 accuracy = 0.
@@ -43,12 +40,12 @@ with tf.Session() as sess:
     # loop over test data
     for i in range(len(Xte)):
         # Get nearest neighbor
-        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})
+        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
         # Get nearest neighbor class label and compare it to its true label
-        print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i])
+        print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \
+            "True Class:", np.argmax(Yte[i])
         # Calculate accuracy
         if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
             accuracy += 1./len(Xte)
     print "Done!"
     print "Accuracy:", accuracy
-

+ 0 - 133
examples/3 - Neural Networks/alexnet.py

@@ -1,133 +0,0 @@
-'''
-AlexNet implementation example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
-AlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
-
-Author: Aymeric Damien
-Project: https://github.com/aymericdamien/TensorFlow-Examples/
-'''
-
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
-import tensorflow as tf
-
-# Parameters
-learning_rate = 0.001
-training_iters = 200000
-batch_size = 64
-display_step = 20
-
-# Network Parameters
-n_input = 784 # MNIST data input (img shape: 28*28)
-n_classes = 10 # MNIST total classes (0-9 digits)
-dropout = 0.8 # Dropout, probability to keep units
-
-# tf Graph input
-x = tf.placeholder(tf.float32, [None, n_input])
-y = tf.placeholder(tf.float32, [None, n_classes])
-keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
-
-# Create AlexNet model
-def conv2d(name, l_input, w, b):
-    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name)
-
-def max_pool(name, l_input, k):
-    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
-
-def norm(name, l_input, lsize=4):
-    return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
-
-def alex_net(_X, _weights, _biases, _dropout):
-    # Reshape input picture
-    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
-
-    # Convolution Layer
-    conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
-    # Max Pooling (down-sampling)
-    pool1 = max_pool('pool1', conv1, k=2)
-    # Apply Normalization
-    norm1 = norm('norm1', pool1, lsize=4)
-    # Apply Dropout
-    norm1 = tf.nn.dropout(norm1, _dropout)
-
-    # Convolution Layer
-    conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
-    # Max Pooling (down-sampling)
-    pool2 = max_pool('pool2', conv2, k=2)
-    # Apply Normalization
-    norm2 = norm('norm2', pool2, lsize=4)
-    # Apply Dropout
-    norm2 = tf.nn.dropout(norm2, _dropout)
-
-    # Convolution Layer
-    conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
-    # Max Pooling (down-sampling)
-    pool3 = max_pool('pool3', conv3, k=2)
-    # Apply Normalization
-    norm3 = norm('norm3', pool3, lsize=4)
-    # Apply Dropout
-    norm3 = tf.nn.dropout(norm3, _dropout)
-
-    # Fully connected layer
-    dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
-    dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # Relu activation
-
-    dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation
-
-    # Output, class prediction
-    out = tf.matmul(dense2, _weights['out']) + _biases['out']
-    return out
-
-# Store layers weight & bias
-weights = {
-    'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
-    'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
-    'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
-    'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
-    'wd2': tf.Variable(tf.random_normal([1024, 1024])),
-    'out': tf.Variable(tf.random_normal([1024, 10]))
-}
-biases = {
-    'bc1': tf.Variable(tf.random_normal([64])),
-    'bc2': tf.Variable(tf.random_normal([128])),
-    'bc3': tf.Variable(tf.random_normal([256])),
-    'bd1': tf.Variable(tf.random_normal([1024])),
-    'bd2': tf.Variable(tf.random_normal([1024])),
-    'out': tf.Variable(tf.random_normal([n_classes]))
-}
-
-# Construct model
-pred = alex_net(x, weights, biases, keep_prob)
-
-# Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
-optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
-
-# Evaluate model
-correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
-accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
-
-# Initializing the variables
-init = tf.initialize_all_variables()
-
-# Launch the graph
-with tf.Session() as sess:
-    sess.run(init)
-    step = 1
-    # Keep training until reach max iterations
-    while step * batch_size < training_iters:
-        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-        # Fit training using batch data
-        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
-        if step % display_step == 0:
-            # Calculate batch accuracy
-            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
-            # Calculate batch loss
-            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
-            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
-        step += 1
-    print "Optimization Finished!"
-    # Calculate accuracy for 256 mnist test images
-    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

+ 0 - 163
examples/3 - Neural Networks/bidirectional_rnn.py

@@ -1,163 +0,0 @@
-'''
-A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
-Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
-
-Author: Aymeric Damien
-Project: https://github.com/aymericdamien/TensorFlow-Examples/
-'''
-
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
-import tensorflow as tf
-from tensorflow.python.ops.constant_op import constant
-from tensorflow.models.rnn import rnn, rnn_cell
-import numpy as np
-
-'''
-To classify images using a bidirectional reccurent neural network, we consider every image row as a sequence of pixels.
-Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample.
-'''
-
-# Parameters
-learning_rate = 0.001
-training_iters = 100000
-batch_size = 128
-display_step = 10
-
-# Network Parameters
-n_input = 28 # MNIST data input (img shape: 28*28)
-n_steps = 28 # timesteps
-n_hidden = 128 # hidden layer num of features
-n_classes = 10 # MNIST total classes (0-9 digits)
-
-# tf Graph input
-x = tf.placeholder("float", [None, n_steps, n_input])
-# Tensorflow LSTM cell requires 2x n_hidden length (state & cell)
-istate_fw = tf.placeholder("float", [None, 2*n_hidden])
-istate_bw = tf.placeholder("float", [None, 2*n_hidden])
-y = tf.placeholder("float", [None, n_classes])
-
-# Define weights
-weights = {
-    # Hidden layer weights => 2*n_hidden because of foward + backward cells
-    'hidden': tf.Variable(tf.random_normal([n_input, 2*n_hidden])),
-    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
-}
-biases = {
-    'hidden': tf.Variable(tf.random_normal([2*n_hidden])),
-    'out': tf.Variable(tf.random_normal([n_classes]))
-}
-
-def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases, _batch_size, _seq_len):
-
-    # BiRNN requires to supply sequence_length as [batch_size, int64]
-    # Note: Tensorflow 0.6.0 requires BiRNN sequence_length parameter to be set
-    # For a better implementation with latest version of tensorflow, check below
-    _seq_len = tf.fill([_batch_size], constant(_seq_len, dtype=tf.int64))
-
-    # input shape: (batch_size, n_steps, n_input)
-    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
-    # Reshape to prepare input to hidden activation
-    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
-    # Linear activation
-    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
-
-    # Define lstm cells with tensorflow
-    # Forward direction cell
-    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-    # Backward direction cell
-    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-    # Split data because rnn cell needs a list of inputs for the RNN inner loop
-    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)
-
-    # Get lstm cell output
-    outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X,
-                                            initial_state_fw=_istate_fw,
-                                            initial_state_bw=_istate_bw,
-                                            sequence_length=_seq_len)
-
-    # Linear activation
-    # Get inner loop last output
-    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
-
-pred = BiRNN(x, istate_fw, istate_bw, weights, biases, batch_size, n_steps)
-
-
-# NOTE: The following code is working with current master version of tensorflow
-#       BiRNN sequence_length parameter isn't required, so we don't define it
-#
-# def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases):
-#
-#     # input shape: (batch_size, n_steps, n_input)
-#     _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
-#     # Reshape to prepare input to hidden activation
-#     _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
-#     # Linear activation
-#     _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
-#
-#     # Define lstm cells with tensorflow
-#     # Forward direction cell
-#     lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-#     # Backward direction cell
-#     lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-#     # Split data because rnn cell needs a list of inputs for the RNN inner loop
-#     _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)
-#
-#     # Get lstm cell output
-#     outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X,
-#                                             initial_state_fw=_istate_fw,
-#                                             initial_state_bw=_istate_bw)
-#
-#     # Linear activation
-#     # Get inner loop last output
-#     return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
-#
-# pred = BiRNN(x, istate_fw, istate_bw, weights, biases)
-
-# Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
-optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
-
-# Evaluate model
-correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
-accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
-
-# Initializing the variables
-init = tf.initialize_all_variables()
-
-# Launch the graph
-with tf.Session() as sess:
-    sess.run(init)
-    step = 1
-    # Keep training until reach max iterations
-    while step * batch_size < training_iters:
-        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-        # Reshape data to get 28 seq of 28 elements
-        batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))
-        # Fit training using batch data
-        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
-                                       istate_fw: np.zeros((batch_size, 2*n_hidden)),
-                                       istate_bw: np.zeros((batch_size, 2*n_hidden))})
-        if step % display_step == 0:
-            # Calculate batch accuracy
-            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,
-                                                istate_fw: np.zeros((batch_size, 2*n_hidden)),
-                                                istate_bw: np.zeros((batch_size, 2*n_hidden))})
-            # Calculate batch loss
-            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,
-                                             istate_fw: np.zeros((batch_size, 2*n_hidden)),
-                                             istate_bw: np.zeros((batch_size, 2*n_hidden))})
-            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \
-                  ", Training Accuracy= " + "{:.5f}".format(acc)
-        step += 1
-    print "Optimization Finished!"
-    # Calculate accuracy for 128 mnist test images
-    test_len = 128
-    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
-    test_label = mnist.test.labels[:test_len]
-    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label,
-                                                             istate_fw: np.zeros((test_len, 2*n_hidden)),
-                                                             istate_bw: np.zeros((test_len, 2*n_hidden))})

+ 0 - 112
examples/3 - Neural Networks/convolutional_network.py

@@ -1,112 +0,0 @@
-'''
-A Convolutional Network implementation example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
-
-Author: Aymeric Damien
-Project: https://github.com/aymericdamien/TensorFlow-Examples/
-'''
-
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
-import tensorflow as tf
-
-# Parameters
-learning_rate = 0.001
-training_iters = 100000
-batch_size = 128
-display_step = 10
-
-# Network Parameters
-n_input = 784 # MNIST data input (img shape: 28*28)
-n_classes = 10 # MNIST total classes (0-9 digits)
-dropout = 0.75 # Dropout, probability to keep units
-
-# tf Graph input
-x = tf.placeholder(tf.float32, [None, n_input])
-y = tf.placeholder(tf.float32, [None, n_classes])
-keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
-
-# Create model
-def conv2d(img, w, b):
-    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='SAME'),b))
-
-def max_pool(img, k):
-    return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
-
-def conv_net(_X, _weights, _biases, _dropout):
-    # Reshape input picture
-    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])
-
-    # Convolution Layer
-    conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
-    # Max Pooling (down-sampling)
-    conv1 = max_pool(conv1, k=2)
-    # Apply Dropout
-    conv1 = tf.nn.dropout(conv1, _dropout)
-
-    # Convolution Layer
-    conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
-    # Max Pooling (down-sampling)
-    conv2 = max_pool(conv2, k=2)
-    # Apply Dropout
-    conv2 = tf.nn.dropout(conv2, _dropout)
-
-    # Fully connected layer
-    dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
-    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
-    dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout
-
-    # Output, class prediction
-    out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
-    return out
-
-# Store layers weight & bias
-weights = {
-    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5 conv, 1 input, 32 outputs
-    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5 conv, 32 inputs, 64 outputs
-    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), # fully connected, 7*7*64 inputs, 1024 outputs
-    'out': tf.Variable(tf.random_normal([1024, n_classes])) # 1024 inputs, 10 outputs (class prediction)
-}
-
-biases = {
-    'bc1': tf.Variable(tf.random_normal([32])),
-    'bc2': tf.Variable(tf.random_normal([64])),
-    'bd1': tf.Variable(tf.random_normal([1024])),
-    'out': tf.Variable(tf.random_normal([n_classes]))
-}
-
-# Construct model
-pred = conv_net(x, weights, biases, keep_prob)
-
-# Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
-optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
-
-# Evaluate model
-correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
-accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
-
-# Initializing the variables
-init = tf.initialize_all_variables()
-
-# Launch the graph
-with tf.Session() as sess:
-    sess.run(init)
-    step = 1
-    # Keep training until reach max iterations
-    while step * batch_size < training_iters:
-        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-        # Fit training using batch data
-        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
-        if step % display_step == 0:
-            # Calculate batch accuracy
-            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
-            # Calculate batch loss
-            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
-            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
-        step += 1
-    print "Optimization Finished!"
-    # Calculate accuracy for 256 mnist test images
-    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

+ 0 - 144
examples/3 - Neural Networks/input_data.py

@@ -1,144 +0,0 @@
-"""Functions for downloading and reading MNIST data."""
-from __future__ import print_function
-import gzip
-import os
-import urllib
-import numpy
-SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
-def maybe_download(filename, work_directory):
-  """Download the data from Yann's website, unless it's already here."""
-  if not os.path.exists(work_directory):
-    os.mkdir(work_directory)
-  filepath = os.path.join(work_directory, filename)
-  if not os.path.exists(filepath):
-    filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
-    statinfo = os.stat(filepath)
-    print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
-  return filepath
-def _read32(bytestream):
-  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
-  return numpy.frombuffer(bytestream.read(4), dtype=dt)
-def extract_images(filename):
-  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2051:
-      raise ValueError(
-          'Invalid magic number %d in MNIST image file: %s' %
-          (magic, filename))
-    num_images = _read32(bytestream)
-    rows = _read32(bytestream)
-    cols = _read32(bytestream)
-    buf = bytestream.read(rows * cols * num_images)
-    data = numpy.frombuffer(buf, dtype=numpy.uint8)
-    data = data.reshape(num_images, rows, cols, 1)
-    return data
-def dense_to_one_hot(labels_dense, num_classes=10):
-  """Convert class labels from scalars to one-hot vectors."""
-  num_labels = labels_dense.shape[0]
-  index_offset = numpy.arange(num_labels) * num_classes
-  labels_one_hot = numpy.zeros((num_labels, num_classes))
-  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
-  return labels_one_hot
-def extract_labels(filename, one_hot=False):
-  """Extract the labels into a 1D uint8 numpy array [index]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2049:
-      raise ValueError(
-          'Invalid magic number %d in MNIST label file: %s' %
-          (magic, filename))
-    num_items = _read32(bytestream)
-    buf = bytestream.read(num_items)
-    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
-    if one_hot:
-      return dense_to_one_hot(labels)
-    return labels
-class DataSet(object):
-  def __init__(self, images, labels, fake_data=False):
-    if fake_data:
-      self._num_examples = 10000
-    else:
-      assert images.shape[0] == labels.shape[0], (
-          "images.shape: %s labels.shape: %s" % (images.shape,
-                                                 labels.shape))
-      self._num_examples = images.shape[0]
-      # Convert shape from [num examples, rows, columns, depth]
-      # to [num examples, rows*columns] (assuming depth == 1)
-      assert images.shape[3] == 1
-      images = images.reshape(images.shape[0],
-                              images.shape[1] * images.shape[2])
-      # Convert from [0, 255] -> [0.0, 1.0].
-      images = images.astype(numpy.float32)
-      images = numpy.multiply(images, 1.0 / 255.0)
-    self._images = images
-    self._labels = labels
-    self._epochs_completed = 0
-    self._index_in_epoch = 0
-  @property
-  def images(self):
-    return self._images
-  @property
-  def labels(self):
-    return self._labels
-  @property
-  def num_examples(self):
-    return self._num_examples
-  @property
-  def epochs_completed(self):
-    return self._epochs_completed
-  def next_batch(self, batch_size, fake_data=False):
-    """Return the next `batch_size` examples from this data set."""
-    if fake_data:
-      fake_image = [1.0 for _ in xrange(784)]
-      fake_label = 0
-      return [fake_image for _ in xrange(batch_size)], [
-          fake_label for _ in xrange(batch_size)]
-    start = self._index_in_epoch
-    self._index_in_epoch += batch_size
-    if self._index_in_epoch > self._num_examples:
-      # Finished epoch
-      self._epochs_completed += 1
-      # Shuffle the data
-      perm = numpy.arange(self._num_examples)
-      numpy.random.shuffle(perm)
-      self._images = self._images[perm]
-      self._labels = self._labels[perm]
-      # Start next epoch
-      start = 0
-      self._index_in_epoch = batch_size
-      assert batch_size <= self._num_examples
-    end = self._index_in_epoch
-    return self._images[start:end], self._labels[start:end]
-def read_data_sets(train_dir, fake_data=False, one_hot=False):
-  class DataSets(object):
-    pass
-  data_sets = DataSets()
-  if fake_data:
-    data_sets.train = DataSet([], [], fake_data=True)
-    data_sets.validation = DataSet([], [], fake_data=True)
-    data_sets.test = DataSet([], [], fake_data=True)
-    return data_sets
-  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
-  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
-  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
-  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
-  VALIDATION_SIZE = 5000
-  local_file = maybe_download(TRAIN_IMAGES, train_dir)
-  train_images = extract_images(local_file)
-  local_file = maybe_download(TRAIN_LABELS, train_dir)
-  train_labels = extract_labels(local_file, one_hot=one_hot)
-  local_file = maybe_download(TEST_IMAGES, train_dir)
-  test_images = extract_images(local_file)
-  local_file = maybe_download(TEST_LABELS, train_dir)
-  test_labels = extract_labels(local_file, one_hot=one_hot)
-  validation_images = train_images[:VALIDATION_SIZE]
-  validation_labels = train_labels[:VALIDATION_SIZE]
-  train_images = train_images[VALIDATION_SIZE:]
-  train_labels = train_labels[VALIDATION_SIZE:]
-  data_sets.train = DataSet(train_images, train_labels)
-  data_sets.validation = DataSet(validation_images, validation_labels)
-  data_sets.test = DataSet(test_images, test_labels)
-  return data_sets

+ 21 - 39
examples/3 - Neural Networks/autoencoder.py

@@ -16,7 +16,7 @@ import numpy as np
 import matplotlib.pyplot as plt
 
 # Import MINST data
-import input_data
+from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 # Parameters
@@ -47,28 +47,37 @@ biases = {
     'decoder_b2': tf.Variable(tf.random_normal([n_input])),
 }
 
+
 # Building the encoder
 def encoder(x):
     # Encoder Hidden layer with sigmoid activation #1
-    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
+    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
+                                   biases['encoder_b1']))
     # Decoder Hidden layer with sigmoid activation #2
-    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
+    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
+                                   biases['encoder_b2']))
     return layer_2
 
+
 # Building the decoder
 def decoder(x):
     # Encoder Hidden layer with sigmoid activation #1
-    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
+    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
+                                   biases['decoder_b1']))
     # Decoder Hidden layer with sigmoid activation #2
-    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
+    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
+                                   biases['decoder_b2']))
     return layer_2
 
 # Construct model
 encoder_op = encoder(X)
 decoder_op = decoder(encoder_op)
 
+# Prediction
 y_pred = decoder_op
+# Targets (Labels) are the input data.
 y_true = X
+
 # Define loss and optimizer, minimize the squared error
 cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
 optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
@@ -85,16 +94,18 @@ with tf.Session() as sess:
         # Loop over all batches
         for i in range(total_batch):
             batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-            # Fit training using batch data
-            _, cost_value = sess.run([optimizer, cost], feed_dict={X: batch_xs})
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
         # Display logs per epoch step
         if epoch % display_step == 0:
-            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(cost_value))
+            print("Epoch:", '%04d' % (epoch+1),
+                  "cost=", "{:.9f}".format(c))
 
     print("Optimization Finished!")
 
-    #Applying encode and decode over test set
-    encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
+    # Applying encode and decode over test set
+    encode_decode = sess.run(
+        y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
     # Compare original images with their reconstructions
     f, a = plt.subplots(2, 10, figsize=(10, 2))
     for i in range(examples_to_show):
@@ -103,32 +114,3 @@ with tf.Session() as sess:
     f.show()
     plt.draw()
     plt.waitforbuttonpress()
-
-# # Regression, with mean square error
-# net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
-#                          loss='mean_square', metric=None)
-#
-# # Training the auto encoder
-# model = tflearn.DNN(net, tensorboard_verbose=0)
-# model.fit(X, X, n_epoch=10, validation_set=(testX, testX),
-#           run_id="auto_encoder", batch_size=256)
-#
-# # Encoding X[0] for test
-# print("\nTest encoding of X[0]:")
-# # New model, re-using the same session, for weights sharing
-# encoding_model = tflearn.DNN(encoder, session=model.session)
-# print(encoding_model.predict([X[0]]))
-#
-# # Testing the image reconstruction on new data (test set)
-# print("\nVisualizing results after being encoded and decoded:")
-# testX = tflearn.data_utils.shuffle(testX)[0]
-# # Applying encode and decode over test set
-# encode_decode = model.predict(testX)
-# # Compare original images with their reconstructions
-# f, a = plt.subplots(2, 10, figsize=(10, 2))
-# for i in range(10):
-#     a[0][i].imshow(np.reshape(testX[i], (28, 28)))
-#     a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
-# f.show()
-# plt.draw()
-# plt.waitforbuttonpress()

+ 116 - 0
examples/3_NeuralNetworks/bidirectional_rnn.py

@@ -0,0 +1,116 @@
+'''
+A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
+This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+import tensorflow as tf
+from tensorflow.models.rnn import rnn, rnn_cell
+import numpy as np
+
+# Import MINST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+'''
+To classify images using a bidirectional reccurent neural network, we consider
+every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
+we will then handle 28 sequences of 28 steps for every sample.
+'''
+
+# Parameters
+learning_rate = 0.001
+training_iters = 100000
+batch_size = 128
+display_step = 10
+
+# Network Parameters
+n_input = 28 # MNIST data input (img shape: 28*28)
+n_steps = 28 # timesteps
+n_hidden = 128 # hidden layer num of features
+n_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+x = tf.placeholder("float", [None, n_steps, n_input])
+y = tf.placeholder("float", [None, n_classes])
+
+# Define weights
+weights = {
+    # Hidden layer weights => 2*n_hidden because of foward + backward cells
+    'hidden': tf.Variable(tf.random_normal([n_input, 2*n_hidden])),
+    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
+}
+biases = {
+    'hidden': tf.Variable(tf.random_normal([2*n_hidden])),
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+
+def BiRNN(x, weights, biases):
+
+    # Prepare data shape to match `bidirectional_rnn` function requirements
+    # Current data input shape: (batch_size, n_steps, n_input)
+    # Permuting batch_size and n_steps
+    x = tf.transpose(x, [1, 0, 2])
+    # Reshape to (n_steps*batch_size, n_input)
+    x = tf.reshape(x, [-1, n_input])
+    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
+    x = tf.split(0, n_steps, x)
+
+    # Define lstm cells with tensorflow
+    # Forward direction cell
+    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
+    # Backward direction cell
+    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
+
+    # Get lstm cell output
+    outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
+                                    dtype=tf.float32)
+
+    # Linear activation, using rnn inner loop last output
+    return tf.matmul(outputs[-1], weights['out']) + biases['out']
+
+pred = BiRNN(x, weights, biases)
+
+# Define loss and optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
+
+# Evaluate model
+correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initializing the variables
+init = tf.initialize_all_variables()
+
+# Launch the graph
+with tf.Session() as sess:
+    sess.run(init)
+    step = 1
+    # Keep training until reach max iterations
+    while step * batch_size < training_iters:
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Reshape data to get 28 seq of 28 elements
+        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
+        # Run optimization op (backprop)
+        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
+        if step % display_step == 0:
+            # Calculate batch accuracy
+            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
+            # Calculate batch loss
+            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
+            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
+                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.5f}".format(acc)
+        step += 1
+    print "Optimization Finished!"
+
+    # Calculate accuracy for 128 mnist test images
+    test_len = 128
+    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
+    test_label = mnist.test.labels[:test_len]
+    print "Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={x: test_data, y: test_label})

+ 132 - 0
examples/3_NeuralNetworks/convolutional_network.py

@@ -0,0 +1,132 @@
+'''
+A Convolutional Network implementation example using TensorFlow library.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+import tensorflow as tf
+
+# Import MINST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.001
+training_iters = 200000
+batch_size = 128
+display_step = 10
+
+# Network Parameters
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+dropout = 0.75 # Dropout, probability to keep units
+
+# tf Graph input
+x = tf.placeholder(tf.float32, [None, n_input])
+y = tf.placeholder(tf.float32, [None, n_classes])
+keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
+
+
+# Create some wrappers for simplicity
+def conv2d(x, W, b, strides=1):
+    # Conv2D wrapper, with bias and relu activation
+    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
+    x = tf.nn.bias_add(x, b)
+    return tf.nn.relu(x)
+
+
+def maxpool2d(x, k=2):
+    # MaxPool2D wrapper
+    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
+                          padding='SAME')
+
+
+# Create model
+def conv_net(x, weights, biases, dropout):
+    # Reshape input picture
+    x = tf.reshape(x, shape=[-1, 28, 28, 1])
+
+    # Convolution Layer
+    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
+    # Max Pooling (down-sampling)
+    conv1 = maxpool2d(conv1, k=2)
+
+    # Convolution Layer
+    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
+    # Max Pooling (down-sampling)
+    conv2 = maxpool2d(conv2, k=2)
+
+    # Fully connected layer
+    # Reshape conv2 output to fit fully connected layer input
+    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
+    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
+    fc1 = tf.nn.relu(fc1)
+    # Apply Dropout
+    fc1 = tf.nn.dropout(fc1, dropout)
+
+    # Output, class prediction
+    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
+    return out
+
+# Store layers weight & bias
+weights = {
+    # 5x5 conv, 1 input, 32 outputs
+    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
+    # 5x5 conv, 32 inputs, 64 outputs
+    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
+    # fully connected, 7*7*64 inputs, 1024 outputs
+    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
+    # 1024 inputs, 10 outputs (class prediction)
+    'out': tf.Variable(tf.random_normal([1024, n_classes]))
+}
+
+biases = {
+    'bc1': tf.Variable(tf.random_normal([32])),
+    'bc2': tf.Variable(tf.random_normal([64])),
+    'bd1': tf.Variable(tf.random_normal([1024])),
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+# Construct model
+pred = conv_net(x, weights, biases, keep_prob)
+
+# Define loss and optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
+
+# Evaluate model
+correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initializing the variables
+init = tf.initialize_all_variables()
+
+# Launch the graph
+with tf.Session() as sess:
+    sess.run(init)
+    step = 1
+    # Keep training until reach max iterations
+    while step * batch_size < training_iters:
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Run optimization op (backprop)
+        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
+                                       keep_prob: dropout})
+        if step % display_step == 0:
+            # Calculate batch loss and accuracy
+            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
+                                                              y: batch_y,
+                                                              keep_prob: 1.})
+            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
+                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.5f}".format(acc)
+        step += 1
+    print "Optimization Finished!"
+
+    # Calculate accuracy for 256 mnist test images
+    print "Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
+                                      y: mnist.test.labels[:256],
+                                      keep_prob: 1.})

+ 25 - 16
examples/3 - Neural Networks/multilayer_perceptron.py

@@ -1,13 +1,14 @@
 '''
 A Multilayer Perceptron implementation example using TensorFlow library.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
 
 Author: Aymeric Damien
 Project: https://github.com/aymericdamien/TensorFlow-Examples/
 '''
 
 # Import MINST data
-import input_data
+from tensorflow.examples.tutorials.mnist import input_data
 mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
 import tensorflow as tf
@@ -19,8 +20,8 @@ batch_size = 100
 display_step = 1
 
 # Network Parameters
-n_hidden_1 = 256 # 1st layer num features
-n_hidden_2 = 256 # 2nd layer num features
+n_hidden_1 = 256 # 1st layer number of features
+n_hidden_2 = 256 # 2nd layer number of features
 n_input = 784 # MNIST data input (img shape: 28*28)
 n_classes = 10 # MNIST total classes (0-9 digits)
 
@@ -28,11 +29,18 @@ n_classes = 10 # MNIST total classes (0-9 digits)
 x = tf.placeholder("float", [None, n_input])
 y = tf.placeholder("float", [None, n_classes])
 
+
 # Create model
-def multilayer_perceptron(_X, _weights, _biases):
-    layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation
-    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
-    return tf.matmul(layer_2, _weights['out']) + _biases['out']
+def multilayer_perceptron(x, weights, biases):
+    # Hidden layer with RELU activation
+    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
+    layer_1 = tf.nn.relu(layer_1)
+    # Hidden layer with RELU activation
+    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
+    layer_2 = tf.nn.relu(layer_2)
+    # Output layer with linear activation
+    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
+    return out_layer
 
 # Store layers weight & bias
 weights = {
@@ -50,8 +58,8 @@ biases = {
 pred = multilayer_perceptron(x, weights, biases)
 
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
-optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 # Initializing the variables
 init = tf.initialize_all_variables()
@@ -66,15 +74,16 @@ with tf.Session() as sess:
         total_batch = int(mnist.train.num_examples/batch_size)
         # Loop over all batches
         for i in range(total_batch):
-            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-            # Fit training using batch data
-            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
+                                                          y: batch_y})
             # Compute average loss
-            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
+            avg_cost += c / total_batch
         # Display logs per epoch step
         if epoch % display_step == 0:
-            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
-
+            print "Epoch:", '%04d' % (epoch+1), "cost=", \
+                "{:.9f}".format(avg_cost)
     print "Optimization Finished!"
 
     # Test model

+ 39 - 40
examples/3 - Neural Networks/recurrent_network.py

@@ -7,17 +7,18 @@ Author: Aymeric Damien
 Project: https://github.com/aymericdamien/TensorFlow-Examples/
 '''
 
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
 import tensorflow as tf
 from tensorflow.models.rnn import rnn, rnn_cell
 import numpy as np
 
+# Import MINST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
 '''
-To classify images using a reccurent neural network, we consider every image row as a sequence of pixels.
-Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample.
+To classify images using a reccurent neural network, we consider every image
+row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
+handle 28 sequences of 28 steps for every sample.
 '''
 
 # Parameters
@@ -34,13 +35,11 @@ n_classes = 10 # MNIST total classes (0-9 digits)
 
 # tf Graph input
 x = tf.placeholder("float", [None, n_steps, n_input])
-# Tensorflow LSTM cell requires 2x n_hidden length (state & cell)
-istate = tf.placeholder("float", [None, 2*n_hidden])
 y = tf.placeholder("float", [None, n_classes])
 
 # Define weights
 weights = {
-    'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
+    'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])),
     'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
 }
 biases = {
@@ -48,32 +47,33 @@ biases = {
     'out': tf.Variable(tf.random_normal([n_classes]))
 }
 
-def RNN(_X, _istate, _weights, _biases):
 
-    # input shape: (batch_size, n_steps, n_input)
-    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
-    # Reshape to prepare input to hidden activation
-    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
-    # Linear activation
-    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
+def RNN(x, weights, biases):
+
+    # Prepare data shape to match `rnn` function requirements
+    # Current data input shape: (batch_size, n_steps, n_input)
+    # Permuting batch_size and n_steps
+    x = tf.transpose(x, [1, 0, 2])
+    # Reshaping to (n_steps*batch_size, n_input)
+    x = tf.reshape(x, [-1, n_input])
+    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
+    # This input shape is required by `rnn` function
+    x = tf.split(0, n_steps, x)
 
     # Define a lstm cell with tensorflow
     lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
-    # Split data because rnn cell needs a list of inputs for the RNN inner loop
-    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)
 
     # Get lstm cell output
-    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)
+    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
 
-    # Linear activation
-    # Get inner loop last output
-    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']
+    # Linear activation, using rnn inner loop last output
+    return tf.matmul(outputs[-1], weights['out']) + biases['out']
 
-pred = RNN(x, istate, weights, biases)
+pred = RNN(x, weights, biases)
 
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
-optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 # Evaluate model
 correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
@@ -88,26 +88,25 @@ with tf.Session() as sess:
     step = 1
     # Keep training until reach max iterations
     while step * batch_size < training_iters:
-        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
         # Reshape data to get 28 seq of 28 elements
-        batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))
-        # Fit training using batch data
-        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,
-                                       istate: np.zeros((batch_size, 2*n_hidden))})
+        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
+        # Run optimization op (backprop)
+        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
         if step % display_step == 0:
             # Calculate batch accuracy
-            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,
-                                                istate: np.zeros((batch_size, 2*n_hidden))})
+            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
             # Calculate batch loss
-            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,
-                                             istate: np.zeros((batch_size, 2*n_hidden))})
-            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \
-                  ", Training Accuracy= " + "{:.5f}".format(acc)
+            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
+            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
+                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.5f}".format(acc)
         step += 1
     print "Optimization Finished!"
-    # Calculate accuracy for 256 mnist test images
-    test_len = 256
+
+    # Calculate accuracy for 128 mnist test images
+    test_len = 128
     test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
     test_label = mnist.test.labels[:test_len]
-    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label,
-                                                             istate: np.zeros((test_len, 2*n_hidden))})
+    print "Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={x: test_data, y: test_label})

+ 137 - 0
examples/4_Utils/save_restore_model.py

@@ -0,0 +1,137 @@
+'''
+Save and Restore a model using TensorFlow.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+# Import MINST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+import tensorflow as tf
+
+# Parameters
+learning_rate = 0.001
+batch_size = 100
+display_step = 1
+model_path = "/tmp/model.ckpt"
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of features
+n_hidden_2 = 256 # 2nd layer number of features
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+x = tf.placeholder("float", [None, n_input])
+y = tf.placeholder("float", [None, n_classes])
+
+
+# Create model
+def multilayer_perceptron(x, weights, biases):
+    # Hidden layer with RELU activation
+    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
+    layer_1 = tf.nn.relu(layer_1)
+    # Hidden layer with RELU activation
+    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
+    layer_2 = tf.nn.relu(layer_2)
+    # Output layer with linear activation
+    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
+    return out_layer
+
+# Store layers weight & bias
+weights = {
+    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
+    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
+}
+biases = {
+    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
+    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+# Construct model
+pred = multilayer_perceptron(x, weights, biases)
+
+# Define loss and optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
+
+# Initializing the variables
+init = tf.initialize_all_variables()
+
+# 'Saver' op to save and restore all the variables
+saver = tf.train.Saver()
+
+# Running first session
+print "Starting 1st session..."
+with tf.Session() as sess:
+    # Initialize variables
+    sess.run(init)
+
+    # Training cycle
+    for epoch in range(3):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
+                                                          y: batch_y})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if epoch % display_step == 0:
+            print "Epoch:", '%04d' % (epoch+1), "cost=", \
+                "{:.9f}".format(avg_cost)
+    print "First Optimization Finished!"
+
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
+
+    # Save model weights to disk
+    save_path = saver.save(sess, model_path)
+    print "Model saved in file: %s" % save_path
+
+# Running a new session
+print "Starting 2nd session..."
+with tf.Session() as sess:
+    # Initialize variables
+    sess.run(init)
+
+    # Restore model weights from previously saved model
+    load_path = saver.restore(sess, model_path)
+    print "Model restored from file: %s" % save_path
+
+    # Resume training
+    for epoch in range(7):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples / batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
+                                                          y: batch_y})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if epoch % display_step == 0:
+            print "Epoch:", '%04d' % (epoch + 1), "cost=", \
+                "{:.9f}".format(avg_cost)
+    print "Second Optimization Finished!"
+
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    print "Accuracy:", accuracy.eval(
+        {x: mnist.test.images, y: mnist.test.labels})

+ 1 - 0
examples/4_Utils/tensorboard_advanced.py

@@ -0,0 +1 @@
+# TODO

+ 93 - 0
examples/4_Utils/tensorboard_basic.py

@@ -0,0 +1,93 @@
+'''
+Graph and Loss visualization using Tensorboard.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+import tensorflow as tf
+
+# Import MINST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.01
+training_epochs = 25
+batch_size = 100
+display_step = 1
+logs_path = '/tmp/tensorflow_logs'
+
+# tf Graph Input
+# mnist data image of shape 28*28=784
+x = tf.placeholder(tf.float32, [None, 784], name='InputData')
+# 0-9 digits recognition => 10 classes
+y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
+
+# Set model weights
+W = tf.Variable(tf.zeros([784, 10]), name='Weights')
+b = tf.Variable(tf.zeros([10]), name='Bias')
+
+# Construct model and encapsulating all ops into scopes, making
+# Tensorboard's Graph visualization more convenient
+with tf.name_scope('Model'):
+    # Model
+    pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
+with tf.name_scope('Loss'):
+    # Minimize error using cross entropy
+    cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
+with tf.name_scope('SGD'):
+    # Gradient Descent
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+with tf.name_scope('Accuracy'):
+    # Accuracy
+    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    acc = tf.reduce_mean(tf.cast(acc, tf.float32))
+
+# Initializing the variables
+init = tf.initialize_all_variables()
+
+# Create a summary to monitor cost tensor
+tf.scalar_summary("loss", cost)
+# Create a summary to monitor accuracy tensor
+tf.scalar_summary("accuracy", acc)
+# Merge all summaries into a single op
+merged_summary_op = tf.merge_all_summaries()
+
+# Launch the graph
+with tf.Session() as sess:
+    sess.run(init)
+
+    # op to write logs to Tensorboard
+    summary_writer = tf.train.SummaryWriter(logs_path)
+
+    # Training cycle
+    for epoch in range(training_epochs):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop), cost op (to get loss value)
+            # and summary nodes
+            _, c, summary = sess.run([optimizer, cost, merged_summary_op],
+                                     feed_dict={x: batch_xs, y: batch_ys})
+            # Write logs at every iteration
+            summary_writer.add_summary(summary, epoch * total_batch + i)
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if (epoch+1) % display_step == 0:
+            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
+
+    print "Optimization Finished!"
+
+    # Test model
+    # Calculate accuracy
+    print "Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})
+
+    print "Run the command line:\n" \
+          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
+          "\nThen open http://0.0.0.0:6006/ into your web browser"

+ 0 - 78
examples/5 - User Interface/graph_visualization.py

@@ -1,78 +0,0 @@
-'''
-Graph Visualization with TensorFlow.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
-
-Author: Aymeric Damien
-Project: https://github.com/aymericdamien/TensorFlow-Examples/
-'''
-
-import tensorflow as tf
-import numpy
-
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
-# Use Logistic Regression from our previous example
-
-# Parameters
-learning_rate = 0.01
-training_epochs = 10
-batch_size = 100
-display_step = 1
-
-# tf Graph Input
-x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
-y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
-
-# Create model
-
-# Set model weights
-W = tf.Variable(tf.zeros([784, 10]), name="weights")
-b = tf.Variable(tf.zeros([10]), name="bias")
-
-# Construct model
-activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
-
-# Minimize error using cross entropy
-cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
-
-# Initializing the variables
-init = tf.initialize_all_variables()
-
-# Launch the graph
-with tf.Session() as sess:
-    sess.run(init)
-
-    # Set logs writer into folder /tmp/tensorflow_logs
-    summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
-
-    # Training cycle
-    for epoch in range(training_epochs):
-        avg_cost = 0.
-        total_batch = int(mnist.train.num_examples/batch_size)
-        # Loop over all batches
-        for i in range(total_batch):
-            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-            # Fit training using batch data
-            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
-            # Compute average loss
-            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
-        # Display logs per epoch step
-        if epoch % display_step == 0:
-            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
-
-    print "Optimization Finished!"
-
-    # Test model
-    correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
-    # Calculate accuracy
-    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
-    print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
-
-'''
-Run the command line: tensorboard --logdir=/tmp/tensorflow_logs
-Open http://localhost:6006/ into your web browser
-'''
-

+ 0 - 86
examples/5 - User Interface/loss_visualization.py

@@ -1,86 +0,0 @@
-'''
-Loss Visualization with TensorFlow.
-This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
-
-Author: Aymeric Damien
-Project: https://github.com/aymericdamien/TensorFlow-Examples/
-'''
-
-import tensorflow as tf
-import numpy
-
-# Import MINST data
-import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
-
-# Use Logistic Regression from our previous example
-
-# Parameters
-learning_rate = 0.01
-training_epochs = 10
-batch_size = 100
-display_step = 1
-
-# tf Graph Input
-x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
-y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
-
-# Create model
-
-# Set model weights
-W = tf.Variable(tf.zeros([784, 10]), name="weights")
-b = tf.Variable(tf.zeros([10]), name="bias")
-
-# Construct model
-activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
-
-# Minimize error using cross entropy
-cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
-optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
-
-# Initializing the variables
-init = tf.initialize_all_variables()
-
-# Create a summary to monitor cost function
-tf.scalar_summary("loss", cost)
-
-# Merge all summaries to a single operator
-merged_summary_op = tf.merge_all_summaries()
-
-# Launch the graph
-with tf.Session() as sess:
-    sess.run(init)
-
-    # Set logs writer into folder /tmp/tensorflow_logs
-    summary_writer = tf.train.SummaryWriter('/tmp/tensorflow_logs', graph_def=sess.graph_def)
-
-    # Training cycle
-    for epoch in range(training_epochs):
-        avg_cost = 0.
-        total_batch = int(mnist.train.num_examples/batch_size)
-        # Loop over all batches
-        for i in range(total_batch):
-            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
-            # Fit training using batch data
-            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
-            # Compute average loss
-            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
-            # Write logs at every iteration
-            summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
-            summary_writer.add_summary(summary_str, epoch*total_batch + i)
-        # Display logs per epoch step
-        if epoch % display_step == 0:
-            print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
-
-    print "Optimization Finished!"
-
-    # Test model
-    correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
-    # Calculate accuracy
-    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
-    print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
-
-'''
-Run the command line: tensorboard --logdir=/tmp/tensorflow_logs
-Open http://localhost:6006/ into your web browser
-'''

examples/4 - Multi GPU/multigpu_basics.py → examples/5_MultiGPU/multigpu_basics.py


+ 2 - 2
notebooks/1 - Introduction/basic_operations.ipynb

@@ -205,7 +205,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -217,4 +217,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}

+ 2 - 2
notebooks/1 - Introduction/helloworld.ipynb

@@ -72,7 +72,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -84,4 +84,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}

+ 0 - 144
notebooks/2 - Basic Classifiers/input_data.py

@@ -1,144 +0,0 @@
-"""Functions for downloading and reading MNIST data."""
-from __future__ import print_function
-import gzip
-import os
-import urllib
-import numpy
-SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
-def maybe_download(filename, work_directory):
-  """Download the data from Yann's website, unless it's already here."""
-  if not os.path.exists(work_directory):
-    os.mkdir(work_directory)
-  filepath = os.path.join(work_directory, filename)
-  if not os.path.exists(filepath):
-    filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
-    statinfo = os.stat(filepath)
-    print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
-  return filepath
-def _read32(bytestream):
-  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
-  return numpy.frombuffer(bytestream.read(4), dtype=dt)
-def extract_images(filename):
-  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2051:
-      raise ValueError(
-          'Invalid magic number %d in MNIST image file: %s' %
-          (magic, filename))
-    num_images = _read32(bytestream)
-    rows = _read32(bytestream)
-    cols = _read32(bytestream)
-    buf = bytestream.read(rows * cols * num_images)
-    data = numpy.frombuffer(buf, dtype=numpy.uint8)
-    data = data.reshape(num_images, rows, cols, 1)
-    return data
-def dense_to_one_hot(labels_dense, num_classes=10):
-  """Convert class labels from scalars to one-hot vectors."""
-  num_labels = labels_dense.shape[0]
-  index_offset = numpy.arange(num_labels) * num_classes
-  labels_one_hot = numpy.zeros((num_labels, num_classes))
-  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
-  return labels_one_hot
-def extract_labels(filename, one_hot=False):
-  """Extract the labels into a 1D uint8 numpy array [index]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2049:
-      raise ValueError(
-          'Invalid magic number %d in MNIST label file: %s' %
-          (magic, filename))
-    num_items = _read32(bytestream)
-    buf = bytestream.read(num_items)
-    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
-    if one_hot:
-      return dense_to_one_hot(labels)
-    return labels
-class DataSet(object):
-  def __init__(self, images, labels, fake_data=False):
-    if fake_data:
-      self._num_examples = 10000
-    else:
-      assert images.shape[0] == labels.shape[0], (
-          "images.shape: %s labels.shape: %s" % (images.shape,
-                                                 labels.shape))
-      self._num_examples = images.shape[0]
-      # Convert shape from [num examples, rows, columns, depth]
-      # to [num examples, rows*columns] (assuming depth == 1)
-      assert images.shape[3] == 1
-      images = images.reshape(images.shape[0],
-                              images.shape[1] * images.shape[2])
-      # Convert from [0, 255] -> [0.0, 1.0].
-      images = images.astype(numpy.float32)
-      images = numpy.multiply(images, 1.0 / 255.0)
-    self._images = images
-    self._labels = labels
-    self._epochs_completed = 0
-    self._index_in_epoch = 0
-  @property
-  def images(self):
-    return self._images
-  @property
-  def labels(self):
-    return self._labels
-  @property
-  def num_examples(self):
-    return self._num_examples
-  @property
-  def epochs_completed(self):
-    return self._epochs_completed
-  def next_batch(self, batch_size, fake_data=False):
-    """Return the next `batch_size` examples from this data set."""
-    if fake_data:
-      fake_image = [1.0 for _ in xrange(784)]
-      fake_label = 0
-      return [fake_image for _ in xrange(batch_size)], [
-          fake_label for _ in xrange(batch_size)]
-    start = self._index_in_epoch
-    self._index_in_epoch += batch_size
-    if self._index_in_epoch > self._num_examples:
-      # Finished epoch
-      self._epochs_completed += 1
-      # Shuffle the data
-      perm = numpy.arange(self._num_examples)
-      numpy.random.shuffle(perm)
-      self._images = self._images[perm]
-      self._labels = self._labels[perm]
-      # Start next epoch
-      start = 0
-      self._index_in_epoch = batch_size
-      assert batch_size <= self._num_examples
-    end = self._index_in_epoch
-    return self._images[start:end], self._labels[start:end]
-def read_data_sets(train_dir, fake_data=False, one_hot=False):
-  class DataSets(object):
-    pass
-  data_sets = DataSets()
-  if fake_data:
-    data_sets.train = DataSet([], [], fake_data=True)
-    data_sets.validation = DataSet([], [], fake_data=True)
-    data_sets.test = DataSet([], [], fake_data=True)
-    return data_sets
-  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
-  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
-  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
-  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
-  VALIDATION_SIZE = 5000
-  local_file = maybe_download(TRAIN_IMAGES, train_dir)
-  train_images = extract_images(local_file)
-  local_file = maybe_download(TRAIN_LABELS, train_dir)
-  train_labels = extract_labels(local_file, one_hot=one_hot)
-  local_file = maybe_download(TEST_IMAGES, train_dir)
-  test_images = extract_images(local_file)
-  local_file = maybe_download(TEST_LABELS, train_dir)
-  test_labels = extract_labels(local_file, one_hot=one_hot)
-  validation_images = train_images[:VALIDATION_SIZE]
-  validation_labels = train_labels[:VALIDATION_SIZE]
-  train_images = train_images[VALIDATION_SIZE:]
-  train_labels = train_labels[VALIDATION_SIZE:]
-  data_sets.train = DataSet(train_images, train_labels)
-  data_sets.validation = DataSet(validation_images, validation_labels)
-  data_sets.test = DataSet(test_images, test_labels)
-  return data_sets

Fichier diff supprimé car celui-ci est trop grand
+ 0 - 253
notebooks/2 - Basic Classifiers/linear_regression.ipynb


+ 202 - 0
notebooks/2_BasicModels/linear_regression.ipynb

@@ -0,0 +1,202 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# A linear regression learning algorithm example using TensorFlow library.\n",
+    "\n",
+    "# Author: Aymeric Damien\n",
+    "# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "import tensorflow as tf\n",
+    "import numpy\n",
+    "import matplotlib.pyplot as plt\n",
+    "rng = numpy.random"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 1000\n",
+    "display_step = 50"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Training Data\n",
+    "train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,\n",
+    "                         7.042,10.791,5.313,7.997,5.654,9.27,3.1])\n",
+    "train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,\n",
+    "                         2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n",
+    "n_samples = train_X.shape[0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# tf Graph Input\n",
+    "X = tf.placeholder(\"float\")\n",
+    "Y = tf.placeholder(\"float\")\n",
+    "\n",
+    "# Set model weights\n",
+    "W = tf.Variable(rng.randn(), name=\"weight\")\n",
+    "b = tf.Variable(rng.randn(), name=\"bias\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Construct a linear model\n",
+    "pred = tf.add(tf.mul(X, W), b)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Mean squared error\n",
+    "cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)\n",
+    "# Gradient descent\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0050 cost= 0.207037717 W= 0.451217 b= -0.649001\n",
+      "Epoch: 0100 cost= 0.192011863 W= 0.439226 b= -0.562735\n",
+      "Epoch: 0150 cost= 0.178721249 W= 0.427948 b= -0.481599\n",
+      "Epoch: 0200 cost= 0.166965470 W= 0.41734 b= -0.40529\n",
+      "Epoch: 0250 cost= 0.156567261 W= 0.407363 b= -0.333518\n",
+      "Epoch: 0300 cost= 0.147369981 W= 0.39798 b= -0.266015\n",
+      "Epoch: 0350 cost= 0.139234960 W= 0.389155 b= -0.202527\n",
+      "Epoch: 0400 cost= 0.132039562 W= 0.380854 b= -0.142815\n",
+      "Epoch: 0450 cost= 0.125675321 W= 0.373048 b= -0.0866538\n",
+      "Epoch: 0500 cost= 0.120046206 W= 0.365705 b= -0.0338331\n",
+      "Epoch: 0550 cost= 0.115067400 W= 0.3588 b= 0.0158462\n",
+      "Epoch: 0600 cost= 0.110663772 W= 0.352305 b= 0.0625707\n",
+      "Epoch: 0650 cost= 0.106768914 W= 0.346196 b= 0.106516\n",
+      "Epoch: 0700 cost= 0.103324078 W= 0.340451 b= 0.147848\n",
+      "Epoch: 0750 cost= 0.100277305 W= 0.335047 b= 0.186722\n",
+      "Epoch: 0800 cost= 0.097582638 W= 0.329965 b= 0.223284\n",
+      "Epoch: 0850 cost= 0.095199391 W= 0.325184 b= 0.257671\n",
+      "Epoch: 0900 cost= 0.093091547 W= 0.320689 b= 0.290013\n",
+      "Epoch: 0950 cost= 0.091227390 W= 0.31646 b= 0.320432\n",
+      "Epoch: 1000 cost= 0.089578770 W= 0.312484 b= 0.349041\n",
+      "Optimization Finished!\n",
+      "Training cost= 0.0895788 W= 0.312484 b= 0.349041 \n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Fit all training data\n",
+    "    for epoch in range(training_epochs):\n",
+    "        for (x, y) in zip(train_X, train_Y):\n",
+    "            sess.run(optimizer, feed_dict={X: x, Y: y})\n",
+    "\n",
+    "        #Display logs per epoch step\n",
+    "        if (epoch+1) % display_step == 0:\n",
+    "            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(c), \\\n",
+    "                \"W=\", sess.run(W), \"b=\", sess.run(b)\n",
+    "\n",
+    "    print \"Optimization Finished!\"\n",
+    "    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})\n",
+    "    print \"Training cost=\", training_cost, \"W=\", sess.run(W), \"b=\", sess.run(b), '\\n'\n",
+    "\n",
+    "    #Graphic display\n",
+    "    plt.plot(train_X, train_Y, 'ro', label='Original data')\n",
+    "    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\n",
+    "    plt.legend()\n",
+    "    plt.show()"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 55 - 111
notebooks/2 - Basic Classifiers/logistic_regression.ipynb

@@ -18,7 +18,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 2,
    "metadata": {
     "collapsed": false
    },
@@ -35,25 +35,16 @@
     }
    ],
    "source": [
+    "import tensorflow as tf\n",
+    "\n",
     "# Import MINST data\n",
-    "import input_data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
     "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "import tensorflow as tf"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 3,
    "metadata": {
     "collapsed": true
    },
@@ -63,79 +54,31 @@
     "learning_rate = 0.01\n",
     "training_epochs = 25\n",
     "batch_size = 100\n",
-    "display_step = 1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "display_step = 1\n",
+    "\n",
     "# tf Graph Input\n",
-    "x = tf.placeholder(\"float\", [None, 784]) # mnist data image of shape 28*28=784\n",
-    "y = tf.placeholder(\"float\", [None, 10]) # 0-9 digits recognition => 10 classes"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Create model\n",
+    "x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784\n",
+    "y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes\n",
     "\n",
     "# Set model weights\n",
     "W = tf.Variable(tf.zeros([784, 10]))\n",
-    "b = tf.Variable(tf.zeros([10]))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "b = tf.Variable(tf.zeros([10]))\n",
+    "\n",
     "# Construct model\n",
-    "activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n",
+    "\n",
     "# Minimize error using cross entropy\n",
-    "# Cross entropy\n",
-    "cost = -tf.reduce_sum(y*tf.log(activation)) \n",
+    "cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n",
     "# Gradient Descent\n",
-    "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
+    "\n",
     "# Initializing the variables\n",
     "init = tf.initialize_all_variables()"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": 4,
    "metadata": {
     "collapsed": false
    },
@@ -144,33 +87,33 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Epoch: 0001 cost= 29.860479714\n",
-      "Epoch: 0002 cost= 22.080549484\n",
-      "Epoch: 0003 cost= 21.237104595\n",
-      "Epoch: 0004 cost= 20.460196280\n",
-      "Epoch: 0005 cost= 20.185128237\n",
-      "Epoch: 0006 cost= 19.940297202\n",
-      "Epoch: 0007 cost= 19.645111119\n",
-      "Epoch: 0008 cost= 19.507218031\n",
-      "Epoch: 0009 cost= 19.389794492\n",
-      "Epoch: 0010 cost= 19.177005816\n",
-      "Epoch: 0011 cost= 19.082493615\n",
-      "Epoch: 0012 cost= 19.072873598\n",
-      "Epoch: 0013 cost= 18.938005402\n",
-      "Epoch: 0014 cost= 18.891806430\n",
-      "Epoch: 0015 cost= 18.839480221\n",
-      "Epoch: 0016 cost= 18.769349510\n",
-      "Epoch: 0017 cost= 18.590865587\n",
-      "Epoch: 0018 cost= 18.623413677\n",
-      "Epoch: 0019 cost= 18.546149085\n",
-      "Epoch: 0020 cost= 18.432274895\n",
-      "Epoch: 0021 cost= 18.358189004\n",
-      "Epoch: 0022 cost= 18.380014628\n",
-      "Epoch: 0023 cost= 18.499993471\n",
-      "Epoch: 0024 cost= 18.386477311\n",
-      "Epoch: 0025 cost= 18.258080609\n",
+      "Epoch: 0001 cost= 1.182138961\n",
+      "Epoch: 0002 cost= 0.664670898\n",
+      "Epoch: 0003 cost= 0.552613988\n",
+      "Epoch: 0004 cost= 0.498497931\n",
+      "Epoch: 0005 cost= 0.465418769\n",
+      "Epoch: 0006 cost= 0.442546219\n",
+      "Epoch: 0007 cost= 0.425473814\n",
+      "Epoch: 0008 cost= 0.412171735\n",
+      "Epoch: 0009 cost= 0.401359516\n",
+      "Epoch: 0010 cost= 0.392401536\n",
+      "Epoch: 0011 cost= 0.384750201\n",
+      "Epoch: 0012 cost= 0.378185581\n",
+      "Epoch: 0013 cost= 0.372401533\n",
+      "Epoch: 0014 cost= 0.367302442\n",
+      "Epoch: 0015 cost= 0.362702316\n",
+      "Epoch: 0016 cost= 0.358568827\n",
+      "Epoch: 0017 cost= 0.354882155\n",
+      "Epoch: 0018 cost= 0.351430912\n",
+      "Epoch: 0019 cost= 0.348316068\n",
+      "Epoch: 0020 cost= 0.345392556\n",
+      "Epoch: 0021 cost= 0.342737278\n",
+      "Epoch: 0022 cost= 0.340264994\n",
+      "Epoch: 0023 cost= 0.337890242\n",
+      "Epoch: 0024 cost= 0.335708558\n",
+      "Epoch: 0025 cost= 0.333686476\n",
       "Optimization Finished!\n",
-      "Accuracy: 0.9048\n"
+      "Accuracy: 0.889667\n"
      ]
     }
    ],
@@ -187,42 +130,43 @@
     "        for i in range(total_batch):\n",
     "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
     "            # Fit training using batch data\n",
-    "            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n",
+    "                                                          y: batch_ys})\n",
     "            # Compute average loss\n",
-    "            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch\n",
+    "            avg_cost += c / total_batch\n",
     "        # Display logs per epoch step\n",
-    "        if epoch % display_step == 0:\n",
+    "        if (epoch+1) % display_step == 0:\n",
     "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n",
     "\n",
     "    print \"Optimization Finished!\"\n",
     "\n",
     "    # Test model\n",
-    "    correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))\n",
-    "    # Calculate accuracy\n",
-    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
-    "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})"
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy for 3000 examples\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
+    "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})"
    ]
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.8"
+   "version": "2.7.11"
   }
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}

+ 20 - 78
notebooks/2 - Basic Classifiers/nearest_neighbor.ipynb

@@ -9,7 +9,7 @@
    "outputs": [],
    "source": [
     "# A nearest neighbor learning algorithm example using TensorFlow library.\n",
-    "# This example is using the MNIST database of handwritten digits \n",
+    "# This example is using the MNIST database of handwritten digits\n",
     "# (http://yann.lecun.com/exdb/mnist/)\n",
     "\n",
     "# Author: Aymeric Damien\n",
@@ -20,18 +20,6 @@
    "cell_type": "code",
    "execution_count": 2,
    "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import tensorflow as tf"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
     "collapsed": false
    },
    "outputs": [
@@ -47,14 +35,17 @@
     }
    ],
    "source": [
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "\n",
     "# Import MINST data\n",
-    "import input_data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
     "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 3,
    "metadata": {
     "collapsed": true
    },
@@ -62,67 +53,27 @@
    "source": [
     "# In this example, we limit mnist data\n",
     "Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)\n",
-    "Xte, Yte = mnist.test.next_batch(200) #200 for testing"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Reshape images to 1D\n",
-    "Xtr = np.reshape(Xtr, newshape=(-1, 28*28))\n",
-    "Xte = np.reshape(Xte, newshape=(-1, 28*28))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "Xte, Yte = mnist.test.next_batch(200) #200 for testing\n",
+    "\n",
     "# tf Graph Input\n",
     "xtr = tf.placeholder(\"float\", [None, 784])\n",
-    "xte = tf.placeholder(\"float\", [784])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "xte = tf.placeholder(\"float\", [784])\n",
+    "\n",
     "# Nearest Neighbor calculation using L1 Distance\n",
     "# Calculate L1 Distance\n",
     "distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)\n",
-    "# Predict: Get min distance index (Nearest neighbor)\n",
+    "# Prediction: Get min distance index (Nearest neighbor)\n",
     "pred = tf.arg_min(distance, 0)\n",
     "\n",
-    "accuracy = 0."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "accuracy = 0.\n",
+    "\n",
     "# Initializing the variables\n",
     "init = tf.initialize_all_variables()"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": 4,
    "metadata": {
     "collapsed": false
    },
@@ -344,46 +295,37 @@
     "    # loop over test data\n",
     "    for i in range(len(Xte)):\n",
     "        # Get nearest neighbor\n",
-    "        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})\n",
+    "        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})\n",
     "        # Get nearest neighbor class label and compare it to its true label\n",
     "        print \"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]), \\\n",
-    "              \"True Class:\", np.argmax(Yte[i])\n",
+    "            \"True Class:\", np.argmax(Yte[i])\n",
     "        # Calculate accuracy\n",
     "        if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n",
     "            accuracy += 1./len(Xte)\n",
     "    print \"Done!\"\n",
     "    print \"Accuracy:\", accuracy"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.8"
+   "version": "2.7.11"
   }
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}

+ 0 - 348
notebooks/3 - Neural Networks/alexnet.ipynb

@@ -1,348 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# AlexNet implementation example using TensorFlow library.\n",
-    "# This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
-    "# AlexNet Paper (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)\n",
-    "\n",
-    "# Author: Aymeric Damien\n",
-    "# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Import MINST data\n",
-    "import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "import tensorflow as tf"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 17,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Parameters\n",
-    "learning_rate = 0.001\n",
-    "training_iters = 300000\n",
-    "batch_size = 64\n",
-    "display_step = 100"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Network Parameters\n",
-    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
-    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
-    "dropout = 0.8 # Dropout, probability to keep units"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# tf Graph input\n",
-    "x = tf.placeholder(tf.float32, [None, n_input])\n",
-    "y = tf.placeholder(tf.float32, [None, n_classes])\n",
-    "keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Create AlexNet model\n",
-    "def conv2d(name, l_input, w, b):\n",
-    "    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], \n",
-    "                                                  padding='SAME'),b), name=name)\n",
-    "\n",
-    "def max_pool(name, l_input, k):\n",
-    "    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], \n",
-    "                          padding='SAME', name=name)\n",
-    "\n",
-    "def norm(name, l_input, lsize=4):\n",
-    "    return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)\n",
-    "\n",
-    "def alex_net(_X, _weights, _biases, _dropout):\n",
-    "    # Reshape input picture\n",
-    "    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])\n",
-    "\n",
-    "    # Convolution Layer\n",
-    "    conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])\n",
-    "    # Max Pooling (down-sampling)\n",
-    "    pool1 = max_pool('pool1', conv1, k=2)\n",
-    "    # Apply Normalization\n",
-    "    norm1 = norm('norm1', pool1, lsize=4)\n",
-    "    # Apply Dropout\n",
-    "    norm1 = tf.nn.dropout(norm1, _dropout)\n",
-    "\n",
-    "    # Convolution Layer\n",
-    "    conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])\n",
-    "    # Max Pooling (down-sampling)\n",
-    "    pool2 = max_pool('pool2', conv2, k=2)\n",
-    "    # Apply Normalization\n",
-    "    norm2 = norm('norm2', pool2, lsize=4)\n",
-    "    # Apply Dropout\n",
-    "    norm2 = tf.nn.dropout(norm2, _dropout)\n",
-    "\n",
-    "    # Convolution Layer\n",
-    "    conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])\n",
-    "    # Max Pooling (down-sampling)\n",
-    "    pool3 = max_pool('pool3', conv3, k=2)\n",
-    "    # Apply Normalization\n",
-    "    norm3 = norm('norm3', pool3, lsize=4)\n",
-    "    # Apply Dropout\n",
-    "    norm3 = tf.nn.dropout(norm3, _dropout)\n",
-    "\n",
-    "    # Fully connected layer\n",
-    "    # Reshape conv3 output to fit dense layer input\n",
-    "    dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) \n",
-    "    # Relu activation\n",
-    "    dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')\n",
-    "    \n",
-    "    # Relu activation\n",
-    "    dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') \n",
-    "\n",
-    "    # Output, class prediction\n",
-    "    out = tf.matmul(dense2, _weights['out']) + _biases['out']\n",
-    "    return out"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Store layers weight & bias\n",
-    "weights = {\n",
-    "    'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),\n",
-    "    'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),\n",
-    "    'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),\n",
-    "    'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),\n",
-    "    'wd2': tf.Variable(tf.random_normal([1024, 1024])),\n",
-    "    'out': tf.Variable(tf.random_normal([1024, 10]))\n",
-    "}\n",
-    "biases = {\n",
-    "    'bc1': tf.Variable(tf.random_normal([64])),\n",
-    "    'bc2': tf.Variable(tf.random_normal([128])),\n",
-    "    'bc3': tf.Variable(tf.random_normal([256])),\n",
-    "    'bd1': tf.Variable(tf.random_normal([1024])),\n",
-    "    'bd2': tf.Variable(tf.random_normal([1024])),\n",
-    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
-    "}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Construct model\n",
-    "pred = alex_net(x, weights, biases, keep_prob)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
-    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Evaluate model\n",
-    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
-    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Iter 6400, Minibatch Loss= 29666.185547, Training Accuracy= 0.59375\n",
-      "Iter 12800, Minibatch Loss= 22125.562500, Training Accuracy= 0.60938\n",
-      "Iter 19200, Minibatch Loss= 22631.134766, Training Accuracy= 0.59375\n",
-      "Iter 25600, Minibatch Loss= 18498.414062, Training Accuracy= 0.62500\n",
-      "Iter 32000, Minibatch Loss= 11318.283203, Training Accuracy= 0.70312\n",
-      "Iter 38400, Minibatch Loss= 12076.280273, Training Accuracy= 0.70312\n",
-      "Iter 44800, Minibatch Loss= 8195.520508, Training Accuracy= 0.82812\n",
-      "Iter 51200, Minibatch Loss= 5176.181641, Training Accuracy= 0.84375\n",
-      "Iter 57600, Minibatch Loss= 8951.896484, Training Accuracy= 0.81250\n",
-      "Iter 64000, Minibatch Loss= 10096.946289, Training Accuracy= 0.78125\n",
-      "Iter 70400, Minibatch Loss= 11466.641602, Training Accuracy= 0.68750\n",
-      "Iter 76800, Minibatch Loss= 7469.824219, Training Accuracy= 0.78125\n",
-      "Iter 83200, Minibatch Loss= 4147.449219, Training Accuracy= 0.89062\n",
-      "Iter 89600, Minibatch Loss= 5904.782227, Training Accuracy= 0.82812\n",
-      "Iter 96000, Minibatch Loss= 718.493713, Training Accuracy= 0.93750\n",
-      "Iter 102400, Minibatch Loss= 2184.151367, Training Accuracy= 0.93750\n",
-      "Iter 108800, Minibatch Loss= 2354.463135, Training Accuracy= 0.89062\n",
-      "Iter 115200, Minibatch Loss= 8612.959961, Training Accuracy= 0.81250\n",
-      "Iter 121600, Minibatch Loss= 2225.773926, Training Accuracy= 0.84375\n",
-      "Iter 128000, Minibatch Loss= 160.583618, Training Accuracy= 0.96875\n",
-      "Iter 134400, Minibatch Loss= 1524.846069, Training Accuracy= 0.93750\n",
-      "Iter 140800, Minibatch Loss= 3501.871094, Training Accuracy= 0.89062\n",
-      "Iter 147200, Minibatch Loss= 661.977051, Training Accuracy= 0.96875\n",
-      "Iter 153600, Minibatch Loss= 367.857788, Training Accuracy= 0.98438\n",
-      "Iter 160000, Minibatch Loss= 1735.458740, Training Accuracy= 0.90625\n",
-      "Iter 166400, Minibatch Loss= 209.320374, Training Accuracy= 0.95312\n",
-      "Iter 172800, Minibatch Loss= 1788.553955, Training Accuracy= 0.90625\n",
-      "Iter 179200, Minibatch Loss= 912.995544, Training Accuracy= 0.93750\n",
-      "Iter 185600, Minibatch Loss= 2534.074463, Training Accuracy= 0.87500\n",
-      "Iter 192000, Minibatch Loss= 73.052612, Training Accuracy= 0.96875\n",
-      "Iter 198400, Minibatch Loss= 1609.606323, Training Accuracy= 0.93750\n",
-      "Iter 204800, Minibatch Loss= 1823.219727, Training Accuracy= 0.96875\n",
-      "Iter 211200, Minibatch Loss= 578.051086, Training Accuracy= 0.96875\n",
-      "Iter 217600, Minibatch Loss= 1532.326172, Training Accuracy= 0.89062\n",
-      "Iter 224000, Minibatch Loss= 769.775269, Training Accuracy= 0.95312\n",
-      "Iter 230400, Minibatch Loss= 2614.737793, Training Accuracy= 0.92188\n",
-      "Iter 236800, Minibatch Loss= 938.664368, Training Accuracy= 0.95312\n",
-      "Iter 243200, Minibatch Loss= 1520.495605, Training Accuracy= 0.93750\n",
-      "Iter 249600, Minibatch Loss= 657.419739, Training Accuracy= 0.95312\n",
-      "Iter 256000, Minibatch Loss= 522.802124, Training Accuracy= 0.90625\n",
-      "Iter 262400, Minibatch Loss= 211.188477, Training Accuracy= 0.96875\n",
-      "Iter 268800, Minibatch Loss= 520.451172, Training Accuracy= 0.92188\n",
-      "Iter 275200, Minibatch Loss= 1418.759155, Training Accuracy= 0.89062\n",
-      "Iter 281600, Minibatch Loss= 241.748596, Training Accuracy= 0.96875\n",
-      "Iter 288000, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
-      "Iter 294400, Minibatch Loss= 1535.772827, Training Accuracy= 0.92188\n",
-      "Optimization Finished!\n",
-      "Testing Accuracy: 0.980469\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Launch the graph\n",
-    "with tf.Session() as sess:\n",
-    "    sess.run(init)\n",
-    "    step = 1\n",
-    "    # Keep training until reach max iterations\n",
-    "    while step * batch_size < training_iters:\n",
-    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
-    "        # Fit training using batch data\n",
-    "        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})\n",
-    "        if step % display_step == 0:\n",
-    "            # Calculate batch accuracy\n",
-    "            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n",
-    "            # Calculate batch loss\n",
-    "            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n",
-    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" \\\n",
-    "                  + \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n",
-    "        step += 1\n",
-    "    print \"Optimization Finished!\"\n",
-    "    # Calculate accuracy for 256 mnist test images\n",
-    "    print \"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], \n",
-    "                                                             y: mnist.test.labels[:256], \n",
-    "                                                             keep_prob: 1.})"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.8"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

+ 0 - 350
notebooks/3 - Neural Networks/bidirectional_rnn.ipynb

@@ -1,350 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "'''\n",
-    "A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
-    "This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
-    "Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n",
-    "\n",
-    "Author: Aymeric Damien\n",
-    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
-    "'''"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Import MINST data\n",
-    "import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow.python.ops.constant_op import constant\n",
-    "from tensorflow.models.rnn import rnn, rnn_cell\n",
-    "import numpy as np"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "'''\n",
-    "To classify images using a bidirectional reccurent neural network, we consider every image row as a sequence of pixels.\n",
-    "Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample.\n",
-    "'''\n",
-    "\n",
-    "# Parameters\n",
-    "learning_rate = 0.001\n",
-    "training_iters = 100000\n",
-    "batch_size = 128\n",
-    "display_step = 10\n",
-    "\n",
-    "# Network Parameters\n",
-    "n_input = 28 # MNIST data input (img shape: 28*28)\n",
-    "n_steps = 28 # timesteps\n",
-    "n_hidden = 128 # hidden layer num of features\n",
-    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
-    "\n",
-    "# tf Graph input\n",
-    "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
-    "# Tensorflow LSTM cell requires 2x n_hidden length (state & cell)\n",
-    "istate_fw = tf.placeholder(\"float\", [None, 2*n_hidden])\n",
-    "istate_bw = tf.placeholder(\"float\", [None, 2*n_hidden])\n",
-    "y = tf.placeholder(\"float\", [None, n_classes])\n",
-    "\n",
-    "# Define weights\n",
-    "weights = {\n",
-    "    # Hidden layer weights => 2*n_hidden because of foward + backward cells\n",
-    "    'hidden': tf.Variable(tf.random_normal([n_input, 2*n_hidden])),\n",
-    "    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))\n",
-    "}\n",
-    "biases = {\n",
-    "    'hidden': tf.Variable(tf.random_normal([2*n_hidden])),\n",
-    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
-    "}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases, _batch_size, _seq_len):\n",
-    "\n",
-    "    # BiRNN requires to supply sequence_length as [batch_size, int64]\n",
-    "    # Note: Tensorflow 0.6.0 requires BiRNN sequence_length parameter to be set\n",
-    "    # For a better implementation with latest version of tensorflow, check below\n",
-    "    _seq_len = tf.fill([_batch_size], constant(_seq_len, dtype=tf.int64))\n",
-    "\n",
-    "    # input shape: (batch_size, n_steps, n_input)\n",
-    "    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size\n",
-    "    # Reshape to prepare input to hidden activation\n",
-    "    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)\n",
-    "    # Linear activation\n",
-    "    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']\n",
-    "\n",
-    "    # Define lstm cells with tensorflow\n",
-    "    # Forward direction cell\n",
-    "    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
-    "    # Backward direction cell\n",
-    "    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
-    "    # Split data because rnn cell needs a list of inputs for the RNN inner loop\n",
-    "    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)\n",
-    "\n",
-    "    # Get lstm cell output\n",
-    "    outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X,\n",
-    "                                            initial_state_fw=_istate_fw,\n",
-    "                                            initial_state_bw=_istate_bw,\n",
-    "                                            sequence_length=_seq_len)\n",
-    "\n",
-    "    # Linear activation\n",
-    "    # Get inner loop last output\n",
-    "    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']\n",
-    "\n",
-    "pred = BiRNN(x, istate_fw, istate_bw, weights, biases, batch_size, n_steps)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# NOTE: The following code is working with current master version of tensorflow\n",
-    "#       BiRNN sequence_length parameter isn't required, so we don't define it\n",
-    "#\n",
-    "# def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases):\n",
-    "#\n",
-    "#     # input shape: (batch_size, n_steps, n_input)\n",
-    "#     _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size\n",
-    "#     # Reshape to prepare input to hidden activation\n",
-    "#     _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)\n",
-    "#     # Linear activation\n",
-    "#     _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']\n",
-    "#\n",
-    "#     # Define lstm cells with tensorflow\n",
-    "#     # Forward direction cell\n",
-    "#     lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
-    "#     # Backward direction cell\n",
-    "#     lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
-    "#     # Split data because rnn cell needs a list of inputs for the RNN inner loop\n",
-    "#     _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)\n",
-    "#\n",
-    "#     # Get lstm cell output\n",
-    "#     outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X,\n",
-    "#                                             initial_state_fw=_istate_fw,\n",
-    "#                                             initial_state_bw=_istate_bw)\n",
-    "#\n",
-    "#     # Linear activation\n",
-    "#     # Get inner loop last output\n",
-    "#     return tf.matmul(outputs[-1], _weights['out']) + _biases['out']\n",
-    "#\n",
-    "# pred = BiRNN(x, istate_fw, istate_bw, weights, biases)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss\n",
-    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer\n",
-    "\n",
-    "# Evaluate model\n",
-    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
-    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
-    "\n",
-    "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Iter 1280, Minibatch Loss= 4.548751, Training Accuracy= 0.25781\n",
-      "Iter 2560, Minibatch Loss= 1.881705, Training Accuracy= 0.36719\n",
-      "Iter 3840, Minibatch Loss= 1.791362, Training Accuracy= 0.34375\n",
-      "Iter 5120, Minibatch Loss= 1.186327, Training Accuracy= 0.63281\n",
-      "Iter 6400, Minibatch Loss= 0.933242, Training Accuracy= 0.66406\n",
-      "Iter 7680, Minibatch Loss= 1.210745, Training Accuracy= 0.59375\n",
-      "Iter 8960, Minibatch Loss= 0.893051, Training Accuracy= 0.63281\n",
-      "Iter 10240, Minibatch Loss= 0.752483, Training Accuracy= 0.77344\n",
-      "Iter 11520, Minibatch Loss= 0.599419, Training Accuracy= 0.77344\n",
-      "Iter 12800, Minibatch Loss= 0.931269, Training Accuracy= 0.67969\n",
-      "Iter 14080, Minibatch Loss= 0.521487, Training Accuracy= 0.82031\n",
-      "Iter 15360, Minibatch Loss= 0.593033, Training Accuracy= 0.78906\n",
-      "Iter 16640, Minibatch Loss= 0.554892, Training Accuracy= 0.78906\n",
-      "Iter 17920, Minibatch Loss= 0.495159, Training Accuracy= 0.86719\n",
-      "Iter 19200, Minibatch Loss= 0.477557, Training Accuracy= 0.82812\n",
-      "Iter 20480, Minibatch Loss= 0.345205, Training Accuracy= 0.89844\n",
-      "Iter 21760, Minibatch Loss= 0.764044, Training Accuracy= 0.76562\n",
-      "Iter 23040, Minibatch Loss= 0.360194, Training Accuracy= 0.86719\n",
-      "Iter 24320, Minibatch Loss= 0.563836, Training Accuracy= 0.79688\n",
-      "Iter 25600, Minibatch Loss= 0.619804, Training Accuracy= 0.78906\n",
-      "Iter 26880, Minibatch Loss= 0.489240, Training Accuracy= 0.81250\n",
-      "Iter 28160, Minibatch Loss= 0.386111, Training Accuracy= 0.89844\n",
-      "Iter 29440, Minibatch Loss= 0.443906, Training Accuracy= 0.88281\n",
-      "Iter 30720, Minibatch Loss= 0.363123, Training Accuracy= 0.86719\n",
-      "Iter 32000, Minibatch Loss= 0.447942, Training Accuracy= 0.85938\n",
-      "Iter 33280, Minibatch Loss= 0.375448, Training Accuracy= 0.88281\n",
-      "Iter 34560, Minibatch Loss= 0.605834, Training Accuracy= 0.81250\n",
-      "Iter 35840, Minibatch Loss= 0.235447, Training Accuracy= 0.90625\n",
-      "Iter 37120, Minibatch Loss= 0.485220, Training Accuracy= 0.86719\n",
-      "Iter 38400, Minibatch Loss= 0.327258, Training Accuracy= 0.92969\n",
-      "Iter 39680, Minibatch Loss= 0.216945, Training Accuracy= 0.91406\n",
-      "Iter 40960, Minibatch Loss= 0.554652, Training Accuracy= 0.82812\n",
-      "Iter 42240, Minibatch Loss= 0.409230, Training Accuracy= 0.87500\n",
-      "Iter 43520, Minibatch Loss= 0.204563, Training Accuracy= 0.92188\n",
-      "Iter 44800, Minibatch Loss= 0.359138, Training Accuracy= 0.87500\n",
-      "Iter 46080, Minibatch Loss= 0.306512, Training Accuracy= 0.89844\n",
-      "Iter 47360, Minibatch Loss= 0.356531, Training Accuracy= 0.86719\n",
-      "Iter 48640, Minibatch Loss= 0.319080, Training Accuracy= 0.87500\n",
-      "Iter 49920, Minibatch Loss= 0.326718, Training Accuracy= 0.89844\n",
-      "Iter 51200, Minibatch Loss= 0.346867, Training Accuracy= 0.88281\n",
-      "Iter 52480, Minibatch Loss= 0.248568, Training Accuracy= 0.92969\n",
-      "Iter 53760, Minibatch Loss= 0.127805, Training Accuracy= 0.94531\n",
-      "Iter 55040, Minibatch Loss= 0.386457, Training Accuracy= 0.88281\n",
-      "Iter 56320, Minibatch Loss= 0.384653, Training Accuracy= 0.84375\n",
-      "Iter 57600, Minibatch Loss= 0.384377, Training Accuracy= 0.85938\n",
-      "Iter 58880, Minibatch Loss= 0.378528, Training Accuracy= 0.83594\n",
-      "Iter 60160, Minibatch Loss= 0.183152, Training Accuracy= 0.94531\n",
-      "Iter 61440, Minibatch Loss= 0.211561, Training Accuracy= 0.92969\n",
-      "Iter 62720, Minibatch Loss= 0.194529, Training Accuracy= 0.94531\n",
-      "Iter 64000, Minibatch Loss= 0.175247, Training Accuracy= 0.93750\n",
-      "Iter 65280, Minibatch Loss= 0.270519, Training Accuracy= 0.89844\n",
-      "Iter 66560, Minibatch Loss= 0.225893, Training Accuracy= 0.94531\n",
-      "Iter 67840, Minibatch Loss= 0.391300, Training Accuracy= 0.91406\n",
-      "Iter 69120, Minibatch Loss= 0.259621, Training Accuracy= 0.87500\n",
-      "Iter 70400, Minibatch Loss= 0.255645, Training Accuracy= 0.92969\n",
-      "Iter 71680, Minibatch Loss= 0.217164, Training Accuracy= 0.91406\n",
-      "Iter 72960, Minibatch Loss= 0.235931, Training Accuracy= 0.92188\n",
-      "Iter 74240, Minibatch Loss= 0.193127, Training Accuracy= 0.92188\n",
-      "Iter 75520, Minibatch Loss= 0.246558, Training Accuracy= 0.92969\n",
-      "Iter 76800, Minibatch Loss= 0.167383, Training Accuracy= 0.92969\n",
-      "Iter 78080, Minibatch Loss= 0.130506, Training Accuracy= 0.96875\n",
-      "Iter 79360, Minibatch Loss= 0.168879, Training Accuracy= 0.96875\n",
-      "Iter 80640, Minibatch Loss= 0.245589, Training Accuracy= 0.93750\n",
-      "Iter 81920, Minibatch Loss= 0.136840, Training Accuracy= 0.94531\n",
-      "Iter 83200, Minibatch Loss= 0.133286, Training Accuracy= 0.96875\n",
-      "Iter 84480, Minibatch Loss= 0.221121, Training Accuracy= 0.95312\n",
-      "Iter 85760, Minibatch Loss= 0.257268, Training Accuracy= 0.91406\n",
-      "Iter 87040, Minibatch Loss= 0.227299, Training Accuracy= 0.92969\n",
-      "Iter 88320, Minibatch Loss= 0.170016, Training Accuracy= 0.96094\n",
-      "Iter 89600, Minibatch Loss= 0.350118, Training Accuracy= 0.89844\n",
-      "Iter 90880, Minibatch Loss= 0.149303, Training Accuracy= 0.95312\n",
-      "Iter 92160, Minibatch Loss= 0.200295, Training Accuracy= 0.94531\n",
-      "Iter 93440, Minibatch Loss= 0.274823, Training Accuracy= 0.89844\n",
-      "Iter 94720, Minibatch Loss= 0.162888, Training Accuracy= 0.96875\n",
-      "Iter 96000, Minibatch Loss= 0.164938, Training Accuracy= 0.93750\n",
-      "Iter 97280, Minibatch Loss= 0.257220, Training Accuracy= 0.92969\n",
-      "Iter 98560, Minibatch Loss= 0.208767, Training Accuracy= 0.92188\n",
-      "Iter 99840, Minibatch Loss= 0.101323, Training Accuracy= 0.97656\n",
-      "Optimization Finished!\n",
-      "Testing Accuracy: 0.945312\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Launch the graph\n",
-    "with tf.Session() as sess:\n",
-    "    sess.run(init)\n",
-    "    step = 1\n",
-    "    # Keep training until reach max iterations\n",
-    "    while step * batch_size < training_iters:\n",
-    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
-    "        # Reshape data to get 28 seq of 28 elements\n",
-    "        batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))\n",
-    "        # Fit training using batch data\n",
-    "        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                       istate_fw: np.zeros((batch_size, 2*n_hidden)),\n",
-    "                                       istate_bw: np.zeros((batch_size, 2*n_hidden))})\n",
-    "        if step % display_step == 0:\n",
-    "            # Calculate batch accuracy\n",
-    "            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                                istate_fw: np.zeros((batch_size, 2*n_hidden)),\n",
-    "                                                istate_bw: np.zeros((batch_size, 2*n_hidden))})\n",
-    "            # Calculate batch loss\n",
-    "            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                             istate_fw: np.zeros((batch_size, 2*n_hidden)),\n",
-    "                                             istate_bw: np.zeros((batch_size, 2*n_hidden))})\n",
-    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \"{:.6f}\".format(loss) + \\\n",
-    "                  \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n",
-    "        step += 1\n",
-    "    print \"Optimization Finished!\"\n",
-    "    # Calculate accuracy for 128 mnist test images\n",
-    "    test_len = 128\n",
-    "    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))\n",
-    "    test_label = mnist.test.labels[:test_len]\n",
-    "    print \"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: test_data, y: test_label,\n",
-    "                                                             istate_fw: np.zeros((test_len, 2*n_hidden)),\n",
-    "                                                             istate_bw: np.zeros((test_len, 2*n_hidden))})"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.8"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

+ 0 - 324
notebooks/3 - Neural Networks/convolutional_network.ipynb

@@ -1,324 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# A Convolutional Network implementation example using TensorFlow library.\n",
-    "# This example is using the MNIST database of handwritten digits\n",
-    "# (http://yann.lecun.com/exdb/mnist/)\n",
-    "\n",
-    "# Author: Aymeric Damien\n",
-    "# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Import MINST data\n",
-    "import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "import tensorflow as tf"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Parameters\n",
-    "learning_rate = 0.001\n",
-    "training_iters = 100000\n",
-    "batch_size = 128\n",
-    "display_step = 20"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Network Parameters\n",
-    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
-    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
-    "dropout = 0.75 # Dropout, probability to keep units"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# tf Graph input\n",
-    "x = tf.placeholder(tf.float32, [None, n_input])\n",
-    "y = tf.placeholder(tf.float32, [None, n_classes])\n",
-    "keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Create model\n",
-    "def conv2d(img, w, b):\n",
-    "    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], \n",
-    "                                                  padding='SAME'),b))\n",
-    "\n",
-    "def max_pool(img, k):\n",
-    "    return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n",
-    "\n",
-    "def conv_net(_X, _weights, _biases, _dropout):\n",
-    "    # Reshape input picture\n",
-    "    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])\n",
-    "\n",
-    "    # Convolution Layer\n",
-    "    conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])\n",
-    "    # Max Pooling (down-sampling)\n",
-    "    conv1 = max_pool(conv1, k=2)\n",
-    "    # Apply Dropout\n",
-    "    conv1 = tf.nn.dropout(conv1, _dropout)\n",
-    "\n",
-    "    # Convolution Layer\n",
-    "    conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])\n",
-    "    # Max Pooling (down-sampling)\n",
-    "    conv2 = max_pool(conv2, k=2)\n",
-    "    # Apply Dropout\n",
-    "    conv2 = tf.nn.dropout(conv2, _dropout)\n",
-    "\n",
-    "    # Fully connected layer\n",
-    "    # Reshape conv2 output to fit dense layer input\n",
-    "    dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) \n",
-    "    # Relu activation\n",
-    "    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1']))\n",
-    "    # Apply Dropout\n",
-    "    dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout\n",
-    "\n",
-    "    # Output, class prediction\n",
-    "    out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])\n",
-    "    return out"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Store layers weight & bias\n",
-    "weights = {\n",
-    "    # 5x5 conv, 1 input, 32 outputs\n",
-    "    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), \n",
-    "    # 5x5 conv, 32 inputs, 64 outputs\n",
-    "    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), \n",
-    "    # fully connected, 7*7*64 inputs, 1024 outputs\n",
-    "    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), \n",
-    "    # 1024 inputs, 10 outputs (class prediction)\n",
-    "    'out': tf.Variable(tf.random_normal([1024, n_classes])) \n",
-    "}\n",
-    "\n",
-    "biases = {\n",
-    "    'bc1': tf.Variable(tf.random_normal([32])),\n",
-    "    'bc2': tf.Variable(tf.random_normal([64])),\n",
-    "    'bd1': tf.Variable(tf.random_normal([1024])),\n",
-    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
-    "}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Construct model\n",
-    "pred = conv_net(x, weights, biases, keep_prob)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
-    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Evaluate model\n",
-    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
-    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Iter 2560, Minibatch Loss= 26046.011719, Training Accuracy= 0.21094\n",
-      "Iter 5120, Minibatch Loss= 10456.769531, Training Accuracy= 0.52344\n",
-      "Iter 7680, Minibatch Loss= 6273.207520, Training Accuracy= 0.71875\n",
-      "Iter 10240, Minibatch Loss= 6276.231445, Training Accuracy= 0.64062\n",
-      "Iter 12800, Minibatch Loss= 4188.221680, Training Accuracy= 0.77344\n",
-      "Iter 15360, Minibatch Loss= 2717.077637, Training Accuracy= 0.80469\n",
-      "Iter 17920, Minibatch Loss= 4057.120361, Training Accuracy= 0.81250\n",
-      "Iter 20480, Minibatch Loss= 1696.550415, Training Accuracy= 0.87500\n",
-      "Iter 23040, Minibatch Loss= 2525.317627, Training Accuracy= 0.85938\n",
-      "Iter 25600, Minibatch Loss= 2341.906738, Training Accuracy= 0.87500\n",
-      "Iter 28160, Minibatch Loss= 4200.535156, Training Accuracy= 0.79688\n",
-      "Iter 30720, Minibatch Loss= 1888.964355, Training Accuracy= 0.89062\n",
-      "Iter 33280, Minibatch Loss= 2167.645996, Training Accuracy= 0.84375\n",
-      "Iter 35840, Minibatch Loss= 1932.107544, Training Accuracy= 0.89844\n",
-      "Iter 38400, Minibatch Loss= 1562.430054, Training Accuracy= 0.90625\n",
-      "Iter 40960, Minibatch Loss= 1676.755249, Training Accuracy= 0.84375\n",
-      "Iter 43520, Minibatch Loss= 1003.626099, Training Accuracy= 0.93750\n",
-      "Iter 46080, Minibatch Loss= 1176.615479, Training Accuracy= 0.86719\n",
-      "Iter 48640, Minibatch Loss= 1260.592651, Training Accuracy= 0.88281\n",
-      "Iter 51200, Minibatch Loss= 1399.667969, Training Accuracy= 0.86719\n",
-      "Iter 53760, Minibatch Loss= 1259.961426, Training Accuracy= 0.89844\n",
-      "Iter 56320, Minibatch Loss= 1415.800781, Training Accuracy= 0.89062\n",
-      "Iter 58880, Minibatch Loss= 1835.365967, Training Accuracy= 0.85156\n",
-      "Iter 61440, Minibatch Loss= 1395.168823, Training Accuracy= 0.90625\n",
-      "Iter 64000, Minibatch Loss= 973.283569, Training Accuracy= 0.88281\n",
-      "Iter 66560, Minibatch Loss= 818.093811, Training Accuracy= 0.92969\n",
-      "Iter 69120, Minibatch Loss= 1178.744263, Training Accuracy= 0.92188\n",
-      "Iter 71680, Minibatch Loss= 845.889709, Training Accuracy= 0.89844\n",
-      "Iter 74240, Minibatch Loss= 1259.505615, Training Accuracy= 0.90625\n",
-      "Iter 76800, Minibatch Loss= 738.037109, Training Accuracy= 0.89844\n",
-      "Iter 79360, Minibatch Loss= 862.499146, Training Accuracy= 0.93750\n",
-      "Iter 81920, Minibatch Loss= 739.704041, Training Accuracy= 0.90625\n",
-      "Iter 84480, Minibatch Loss= 652.880310, Training Accuracy= 0.95312\n",
-      "Iter 87040, Minibatch Loss= 635.464600, Training Accuracy= 0.92969\n",
-      "Iter 89600, Minibatch Loss= 933.166626, Training Accuracy= 0.90625\n",
-      "Iter 92160, Minibatch Loss= 213.874893, Training Accuracy= 0.96094\n",
-      "Iter 94720, Minibatch Loss= 609.575684, Training Accuracy= 0.91406\n",
-      "Iter 97280, Minibatch Loss= 560.208008, Training Accuracy= 0.93750\n",
-      "Iter 99840, Minibatch Loss= 963.577148, Training Accuracy= 0.90625\n",
-      "Optimization Finished!\n",
-      "Testing Accuracy: 0.960938\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Launch the graph\n",
-    "with tf.Session() as sess:\n",
-    "    sess.run(init)\n",
-    "    step = 1\n",
-    "    # Keep training until reach max iterations\n",
-    "    while step * batch_size < training_iters:\n",
-    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
-    "        # Fit training using batch data\n",
-    "        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})\n",
-    "        if step % display_step == 0:\n",
-    "            # Calculate batch accuracy\n",
-    "            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n",
-    "            # Calculate batch loss\n",
-    "            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n",
-    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
-    "                  \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n",
-    "        step += 1\n",
-    "    print \"Optimization Finished!\"\n",
-    "    # Calculate accuracy for 256 mnist test images\n",
-    "    print \"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], \n",
-    "                                                             y: mnist.test.labels[:256], \n",
-    "                                                             keep_prob: 1.})"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.8"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

+ 0 - 144
notebooks/3 - Neural Networks/input_data.py

@@ -1,144 +0,0 @@
-"""Functions for downloading and reading MNIST data."""
-from __future__ import print_function
-import gzip
-import os
-import urllib
-import numpy
-SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
-def maybe_download(filename, work_directory):
-  """Download the data from Yann's website, unless it's already here."""
-  if not os.path.exists(work_directory):
-    os.mkdir(work_directory)
-  filepath = os.path.join(work_directory, filename)
-  if not os.path.exists(filepath):
-    filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
-    statinfo = os.stat(filepath)
-    print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
-  return filepath
-def _read32(bytestream):
-  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
-  return numpy.frombuffer(bytestream.read(4), dtype=dt)
-def extract_images(filename):
-  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2051:
-      raise ValueError(
-          'Invalid magic number %d in MNIST image file: %s' %
-          (magic, filename))
-    num_images = _read32(bytestream)
-    rows = _read32(bytestream)
-    cols = _read32(bytestream)
-    buf = bytestream.read(rows * cols * num_images)
-    data = numpy.frombuffer(buf, dtype=numpy.uint8)
-    data = data.reshape(num_images, rows, cols, 1)
-    return data
-def dense_to_one_hot(labels_dense, num_classes=10):
-  """Convert class labels from scalars to one-hot vectors."""
-  num_labels = labels_dense.shape[0]
-  index_offset = numpy.arange(num_labels) * num_classes
-  labels_one_hot = numpy.zeros((num_labels, num_classes))
-  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
-  return labels_one_hot
-def extract_labels(filename, one_hot=False):
-  """Extract the labels into a 1D uint8 numpy array [index]."""
-  print('Extracting', filename)
-  with gzip.open(filename) as bytestream:
-    magic = _read32(bytestream)
-    if magic != 2049:
-      raise ValueError(
-          'Invalid magic number %d in MNIST label file: %s' %
-          (magic, filename))
-    num_items = _read32(bytestream)
-    buf = bytestream.read(num_items)
-    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
-    if one_hot:
-      return dense_to_one_hot(labels)
-    return labels
-class DataSet(object):
-  def __init__(self, images, labels, fake_data=False):
-    if fake_data:
-      self._num_examples = 10000
-    else:
-      assert images.shape[0] == labels.shape[0], (
-          "images.shape: %s labels.shape: %s" % (images.shape,
-                                                 labels.shape))
-      self._num_examples = images.shape[0]
-      # Convert shape from [num examples, rows, columns, depth]
-      # to [num examples, rows*columns] (assuming depth == 1)
-      assert images.shape[3] == 1
-      images = images.reshape(images.shape[0],
-                              images.shape[1] * images.shape[2])
-      # Convert from [0, 255] -> [0.0, 1.0].
-      images = images.astype(numpy.float32)
-      images = numpy.multiply(images, 1.0 / 255.0)
-    self._images = images
-    self._labels = labels
-    self._epochs_completed = 0
-    self._index_in_epoch = 0
-  @property
-  def images(self):
-    return self._images
-  @property
-  def labels(self):
-    return self._labels
-  @property
-  def num_examples(self):
-    return self._num_examples
-  @property
-  def epochs_completed(self):
-    return self._epochs_completed
-  def next_batch(self, batch_size, fake_data=False):
-    """Return the next `batch_size` examples from this data set."""
-    if fake_data:
-      fake_image = [1.0 for _ in xrange(784)]
-      fake_label = 0
-      return [fake_image for _ in xrange(batch_size)], [
-          fake_label for _ in xrange(batch_size)]
-    start = self._index_in_epoch
-    self._index_in_epoch += batch_size
-    if self._index_in_epoch > self._num_examples:
-      # Finished epoch
-      self._epochs_completed += 1
-      # Shuffle the data
-      perm = numpy.arange(self._num_examples)
-      numpy.random.shuffle(perm)
-      self._images = self._images[perm]
-      self._labels = self._labels[perm]
-      # Start next epoch
-      start = 0
-      self._index_in_epoch = batch_size
-      assert batch_size <= self._num_examples
-    end = self._index_in_epoch
-    return self._images[start:end], self._labels[start:end]
-def read_data_sets(train_dir, fake_data=False, one_hot=False):
-  class DataSets(object):
-    pass
-  data_sets = DataSets()
-  if fake_data:
-    data_sets.train = DataSet([], [], fake_data=True)
-    data_sets.validation = DataSet([], [], fake_data=True)
-    data_sets.test = DataSet([], [], fake_data=True)
-    return data_sets
-  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
-  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
-  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
-  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
-  VALIDATION_SIZE = 5000
-  local_file = maybe_download(TRAIN_IMAGES, train_dir)
-  train_images = extract_images(local_file)
-  local_file = maybe_download(TRAIN_LABELS, train_dir)
-  train_labels = extract_labels(local_file, one_hot=one_hot)
-  local_file = maybe_download(TEST_IMAGES, train_dir)
-  test_images = extract_images(local_file)
-  local_file = maybe_download(TEST_LABELS, train_dir)
-  test_labels = extract_labels(local_file, one_hot=one_hot)
-  validation_images = train_images[:VALIDATION_SIZE]
-  validation_labels = train_labels[:VALIDATION_SIZE]
-  train_images = train_images[VALIDATION_SIZE:]
-  train_labels = train_labels[VALIDATION_SIZE:]
-  data_sets.train = DataSet(train_images, train_labels)
-  data_sets.validation = DataSet(validation_images, validation_labels)
-  data_sets.test = DataSet(test_images, test_labels)
-  return data_sets

+ 0 - 299
notebooks/3 - Neural Networks/reccurent_network.ipynb

@@ -1,299 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "'''\n",
-    "A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
-    "This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
-    "Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n",
-    "\n",
-    "Author: Aymeric Damien\n",
-    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
-    "'''"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Import MINST data\n",
-    "import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow.models.rnn import rnn, rnn_cell\n",
-    "import numpy as np"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "'''\n",
-    "To classify images using a reccurent neural network, we consider every image row as a sequence of pixels.\n",
-    "Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 steps for every sample.\n",
-    "'''\n",
-    "\n",
-    "# Parameters\n",
-    "learning_rate = 0.001\n",
-    "training_iters = 100000\n",
-    "batch_size = 128\n",
-    "display_step = 10\n",
-    "\n",
-    "# Network Parameters\n",
-    "n_input = 28 # MNIST data input (img shape: 28*28)\n",
-    "n_steps = 28 # timesteps\n",
-    "n_hidden = 128 # hidden layer num of features\n",
-    "n_classes = 10 # MNIST total classes (0-9 digits)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "# tf Graph input\n",
-    "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
-    "istate = tf.placeholder(\"float\", [None, 2*n_hidden]) #state & cell => 2x n_hidden\n",
-    "y = tf.placeholder(\"float\", [None, n_classes])\n",
-    "\n",
-    "# Define weights\n",
-    "weights = {\n",
-    "    'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights\n",
-    "    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n",
-    "}\n",
-    "biases = {\n",
-    "    'hidden': tf.Variable(tf.random_normal([n_hidden])),\n",
-    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
-    "}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "def RNN(_X, _istate, _weights, _biases):\n",
-    "\n",
-    "    # input shape: (batch_size, n_steps, n_input)\n",
-    "    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size\n",
-    "    # Reshape to prepare input to hidden activation\n",
-    "    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)\n",
-    "    # Linear activation\n",
-    "    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']\n",
-    "\n",
-    "    # Define a lstm cell with tensorflow\n",
-    "    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
-    "    # Split data because rnn cell needs a list of inputs for the RNN inner loop\n",
-    "    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)\n",
-    "\n",
-    "    # Get lstm cell output\n",
-    "    outputs, states = rnn.rnn(lstm_cell, _X, initial_state=_istate)\n",
-    "\n",
-    "    # Linear activation\n",
-    "    # Get inner loop last output\n",
-    "    return tf.matmul(outputs[-1], _weights['out']) + _biases['out']"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "pred = RNN(x, istate, weights, biases)\n",
-    "\n",
-    "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss\n",
-    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer\n",
-    "\n",
-    "# Evaluate model\n",
-    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
-    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Iter 1280, Minibatch Loss= 1.888242, Training Accuracy= 0.39844\n",
-      "Iter 2560, Minibatch Loss= 1.519879, Training Accuracy= 0.47656\n",
-      "Iter 3840, Minibatch Loss= 1.238005, Training Accuracy= 0.63281\n",
-      "Iter 5120, Minibatch Loss= 0.933760, Training Accuracy= 0.71875\n",
-      "Iter 6400, Minibatch Loss= 0.832130, Training Accuracy= 0.73438\n",
-      "Iter 7680, Minibatch Loss= 0.979760, Training Accuracy= 0.70312\n",
-      "Iter 8960, Minibatch Loss= 0.821921, Training Accuracy= 0.71875\n",
-      "Iter 10240, Minibatch Loss= 0.710566, Training Accuracy= 0.79688\n",
-      "Iter 11520, Minibatch Loss= 0.578501, Training Accuracy= 0.82812\n",
-      "Iter 12800, Minibatch Loss= 0.765049, Training Accuracy= 0.75000\n",
-      "Iter 14080, Minibatch Loss= 0.582995, Training Accuracy= 0.78125\n",
-      "Iter 15360, Minibatch Loss= 0.575092, Training Accuracy= 0.79688\n",
-      "Iter 16640, Minibatch Loss= 0.701214, Training Accuracy= 0.75781\n",
-      "Iter 17920, Minibatch Loss= 0.561972, Training Accuracy= 0.78125\n",
-      "Iter 19200, Minibatch Loss= 0.394480, Training Accuracy= 0.85938\n",
-      "Iter 20480, Minibatch Loss= 0.356244, Training Accuracy= 0.91406\n",
-      "Iter 21760, Minibatch Loss= 0.632163, Training Accuracy= 0.78125\n",
-      "Iter 23040, Minibatch Loss= 0.269334, Training Accuracy= 0.90625\n",
-      "Iter 24320, Minibatch Loss= 0.485007, Training Accuracy= 0.86719\n",
-      "Iter 25600, Minibatch Loss= 0.569704, Training Accuracy= 0.78906\n",
-      "Iter 26880, Minibatch Loss= 0.267697, Training Accuracy= 0.92188\n",
-      "Iter 28160, Minibatch Loss= 0.381177, Training Accuracy= 0.90625\n",
-      "Iter 29440, Minibatch Loss= 0.350800, Training Accuracy= 0.87500\n",
-      "Iter 30720, Minibatch Loss= 0.356782, Training Accuracy= 0.90625\n",
-      "Iter 32000, Minibatch Loss= 0.322511, Training Accuracy= 0.89062\n",
-      "Iter 33280, Minibatch Loss= 0.309195, Training Accuracy= 0.90625\n",
-      "Iter 34560, Minibatch Loss= 0.535408, Training Accuracy= 0.83594\n",
-      "Iter 35840, Minibatch Loss= 0.281643, Training Accuracy= 0.92969\n",
-      "Iter 37120, Minibatch Loss= 0.290962, Training Accuracy= 0.89844\n",
-      "Iter 38400, Minibatch Loss= 0.204718, Training Accuracy= 0.93750\n",
-      "Iter 39680, Minibatch Loss= 0.205882, Training Accuracy= 0.92969\n",
-      "Iter 40960, Minibatch Loss= 0.481441, Training Accuracy= 0.84375\n",
-      "Iter 42240, Minibatch Loss= 0.348245, Training Accuracy= 0.89844\n",
-      "Iter 43520, Minibatch Loss= 0.274692, Training Accuracy= 0.90625\n",
-      "Iter 44800, Minibatch Loss= 0.171815, Training Accuracy= 0.94531\n",
-      "Iter 46080, Minibatch Loss= 0.171035, Training Accuracy= 0.93750\n",
-      "Iter 47360, Minibatch Loss= 0.235800, Training Accuracy= 0.89844\n",
-      "Iter 48640, Minibatch Loss= 0.235974, Training Accuracy= 0.93750\n",
-      "Iter 49920, Minibatch Loss= 0.207323, Training Accuracy= 0.92188\n",
-      "Iter 51200, Minibatch Loss= 0.212989, Training Accuracy= 0.91406\n",
-      "Iter 52480, Minibatch Loss= 0.151774, Training Accuracy= 0.95312\n",
-      "Iter 53760, Minibatch Loss= 0.090070, Training Accuracy= 0.96875\n",
-      "Iter 55040, Minibatch Loss= 0.264714, Training Accuracy= 0.92969\n",
-      "Iter 56320, Minibatch Loss= 0.235086, Training Accuracy= 0.92969\n",
-      "Iter 57600, Minibatch Loss= 0.160302, Training Accuracy= 0.95312\n",
-      "Iter 58880, Minibatch Loss= 0.106515, Training Accuracy= 0.96875\n",
-      "Iter 60160, Minibatch Loss= 0.236039, Training Accuracy= 0.94531\n",
-      "Iter 61440, Minibatch Loss= 0.279540, Training Accuracy= 0.90625\n",
-      "Iter 62720, Minibatch Loss= 0.173585, Training Accuracy= 0.93750\n",
-      "Iter 64000, Minibatch Loss= 0.191009, Training Accuracy= 0.92188\n",
-      "Iter 65280, Minibatch Loss= 0.210331, Training Accuracy= 0.89844\n",
-      "Iter 66560, Minibatch Loss= 0.223444, Training Accuracy= 0.94531\n",
-      "Iter 67840, Minibatch Loss= 0.278210, Training Accuracy= 0.91406\n",
-      "Iter 69120, Minibatch Loss= 0.174290, Training Accuracy= 0.95312\n",
-      "Iter 70400, Minibatch Loss= 0.188701, Training Accuracy= 0.94531\n",
-      "Iter 71680, Minibatch Loss= 0.210277, Training Accuracy= 0.94531\n",
-      "Iter 72960, Minibatch Loss= 0.249951, Training Accuracy= 0.95312\n",
-      "Iter 74240, Minibatch Loss= 0.209853, Training Accuracy= 0.92188\n",
-      "Iter 75520, Minibatch Loss= 0.049742, Training Accuracy= 0.99219\n",
-      "Iter 76800, Minibatch Loss= 0.250095, Training Accuracy= 0.92969\n",
-      "Iter 78080, Minibatch Loss= 0.133853, Training Accuracy= 0.95312\n",
-      "Iter 79360, Minibatch Loss= 0.110206, Training Accuracy= 0.97656\n",
-      "Iter 80640, Minibatch Loss= 0.141906, Training Accuracy= 0.93750\n",
-      "Iter 81920, Minibatch Loss= 0.126872, Training Accuracy= 0.94531\n",
-      "Iter 83200, Minibatch Loss= 0.138925, Training Accuracy= 0.95312\n",
-      "Iter 84480, Minibatch Loss= 0.128652, Training Accuracy= 0.96094\n",
-      "Iter 85760, Minibatch Loss= 0.099837, Training Accuracy= 0.96094\n",
-      "Iter 87040, Minibatch Loss= 0.119000, Training Accuracy= 0.95312\n",
-      "Iter 88320, Minibatch Loss= 0.179807, Training Accuracy= 0.95312\n",
-      "Iter 89600, Minibatch Loss= 0.141792, Training Accuracy= 0.96094\n",
-      "Iter 90880, Minibatch Loss= 0.142424, Training Accuracy= 0.96094\n",
-      "Iter 92160, Minibatch Loss= 0.159564, Training Accuracy= 0.96094\n",
-      "Iter 93440, Minibatch Loss= 0.111984, Training Accuracy= 0.95312\n",
-      "Iter 94720, Minibatch Loss= 0.238978, Training Accuracy= 0.92969\n",
-      "Iter 96000, Minibatch Loss= 0.068002, Training Accuracy= 0.97656\n",
-      "Iter 97280, Minibatch Loss= 0.191819, Training Accuracy= 0.94531\n",
-      "Iter 98560, Minibatch Loss= 0.081197, Training Accuracy= 0.99219\n",
-      "Iter 99840, Minibatch Loss= 0.206797, Training Accuracy= 0.95312\n",
-      "Optimization Finished!\n",
-      "Testing Accuracy: 0.941406\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()\n",
-    "\n",
-    "# Launch the graph\n",
-    "with tf.Session() as sess:\n",
-    "    sess.run(init)\n",
-    "    step = 1\n",
-    "    # Keep training until reach max iterations\n",
-    "    while step * batch_size < training_iters:\n",
-    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
-    "        # Reshape data to get 28 seq of 28 elements\n",
-    "        batch_xs = batch_xs.reshape((batch_size, n_steps, n_input))\n",
-    "        # Fit training using batch data\n",
-    "        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                       istate: np.zeros((batch_size, 2*n_hidden))})\n",
-    "        if step % display_step == 0:\n",
-    "            # Calculate batch accuracy\n",
-    "            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                                istate: np.zeros((batch_size, 2*n_hidden))})\n",
-    "            # Calculate batch loss\n",
-    "            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,\n",
-    "                                             istate: np.zeros((batch_size, 2*n_hidden))})\n",
-    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \"{:.6f}\".format(loss) + \\\n",
-    "                  \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n",
-    "        step += 1\n",
-    "    print \"Optimization Finished!\"\n",
-    "    # Calculate accuracy for 256 mnist test images\n",
-    "    test_len = 256\n",
-    "    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))\n",
-    "    test_label = mnist.test.labels[:test_len]\n",
-    "    print \"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: test_data, y: test_label,\n",
-    "                                                             istate: np.zeros((test_len, 2*n_hidden))})"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.8"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

+ 226 - 0
notebooks/3_Neural Networks/autoencoder.ipynb

@@ -0,0 +1,226 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "\"\"\" Auto Encoder Example.\n",
+    "Using an auto encoder on MNIST handwritten digits.\n",
+    "References:\n",
+    "    Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. \"Gradient-based\n",
+    "    learning applied to document recognition.\" Proceedings of the IEEE,\n",
+    "    86(11):2278-2324, November 1998.\n",
+    "Links:\n",
+    "    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/\n",
+    "\"\"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import division, print_function, absolute_import\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 20\n",
+    "batch_size = 256\n",
+    "display_step = 1\n",
+    "examples_to_show = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer num features\n",
+    "n_hidden_2 = 128 # 2nd layer num features\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "\n",
+    "# tf Graph input (only pictures)\n",
+    "X = tf.placeholder(\"float\", [None, n_input])\n",
+    "\n",
+    "weights = {\n",
+    "    'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n",
+    "    'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n",
+    "    'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),\n",
+    "    'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),\n",
+    "}\n",
+    "biases = {\n",
+    "    'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
+    "    'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),\n",
+    "    'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
+    "    'decoder_b2': tf.Variable(tf.random_normal([n_input])),\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Building the encoder\n",
+    "def encoder(x):\n",
+    "    # Encoder Hidden layer with sigmoid activation #1\n",
+    "    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),\n",
+    "                                   biases['encoder_b1']))\n",
+    "    # Decoder Hidden layer with sigmoid activation #2\n",
+    "    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),\n",
+    "                                   biases['encoder_b2']))\n",
+    "    return layer_2\n",
+    "\n",
+    "\n",
+    "# Building the decoder\n",
+    "def decoder(x):\n",
+    "    # Encoder Hidden layer with sigmoid activation #1\n",
+    "    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),\n",
+    "                                   biases['decoder_b1']))\n",
+    "    # Decoder Hidden layer with sigmoid activation #2\n",
+    "    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),\n",
+    "                                   biases['decoder_b2']))\n",
+    "    return layer_2\n",
+    "\n",
+    "# Construct model\n",
+    "encoder_op = encoder(X)\n",
+    "decoder_op = decoder(encoder_op)\n",
+    "\n",
+    "# Prediction\n",
+    "y_pred = decoder_op\n",
+    "# Targets (Labels) are the input data.\n",
+    "y_true = X\n",
+    "\n",
+    "# Define loss and optimizer, minimize the squared error\n",
+    "cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))\n",
+    "optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0001 cost= 0.218654603\n",
+      "Epoch: 0002 cost= 0.173306286\n",
+      "Epoch: 0003 cost= 0.154793650\n",
+      "Epoch: 0004 cost= 0.146902516\n",
+      "Epoch: 0005 cost= 0.141993478\n",
+      "Epoch: 0006 cost= 0.132718414\n",
+      "Epoch: 0007 cost= 0.125991374\n",
+      "Epoch: 0008 cost= 0.122500181\n",
+      "Epoch: 0009 cost= 0.115299642\n",
+      "Epoch: 0010 cost= 0.115390278\n",
+      "Epoch: 0011 cost= 0.114480168\n",
+      "Epoch: 0012 cost= 0.113888472\n",
+      "Epoch: 0013 cost= 0.111597553\n",
+      "Epoch: 0014 cost= 0.110663064\n",
+      "Epoch: 0015 cost= 0.108673096\n",
+      "Epoch: 0016 cost= 0.104775786\n",
+      "Epoch: 0017 cost= 0.106273368\n",
+      "Epoch: 0018 cost= 0.104061618\n",
+      "Epoch: 0019 cost= 0.103227913\n",
+      "Epoch: 0020 cost= 0.099696413\n",
+      "Optimization Finished!\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "# Using InteractiveSession (more convenient while using Notebooks)\n",
+    "sess = tf.InteractiveSession()\n",
+    "sess.run(init)\n",
+    "\n",
+    "total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "# Training cycle\n",
+    "for epoch in range(training_epochs):\n",
+    "    # Loop over all batches\n",
+    "    for i in range(total_batch):\n",
+    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
+    "        # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "        _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})\n",
+    "    # Display logs per epoch step\n",
+    "    if epoch % display_step == 0:\n",
+    "        print(\"Epoch:\", '%04d' % (epoch+1),\n",
+    "              \"cost=\", \"{:.9f}\".format(c))\n",
+    "\n",
+    "print(\"Optimization Finished!\")\n",
+    "\n",
+    "# Applying encode and decode over test set\n",
+    "encode_decode = sess.run(\n",
+    "    y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})\n",
+    "# Compare original images with their reconstructions\n",
+    "f, a = plt.subplots(2, 10, figsize=(10, 2))\n",
+    "for i in range(examples_to_show):\n",
+    "    a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))\n",
+    "    a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))\n",
+    "f.show()\n",
+    "plt.draw()\n",
+    "plt.waitforbuttonpress()"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 293 - 0
notebooks/3_Neural Networks/bidirectional_rnn.ipynb

@@ -0,0 +1,293 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
+    "This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
+    "Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n",
+    "\n",
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "from tensorflow.models.rnn import rnn, rnn_cell\n",
+    "import numpy as np\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "To classify images using a bidirectional reccurent neural network, we consider\n",
+    "every image row as a sequence of pixels. Because MNIST image shape is 28*28px,\n",
+    "we will then handle 28 sequences of 28 steps for every sample.\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "training_iters = 100000\n",
+    "batch_size = 128\n",
+    "display_step = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_input = 28 # MNIST data input (img shape: 28*28)\n",
+    "n_steps = 28 # timesteps\n",
+    "n_hidden = 128 # hidden layer num of features\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
+    "y = tf.placeholder(\"float\", [None, n_classes])\n",
+    "\n",
+    "# Define weights\n",
+    "weights = {\n",
+    "    # Hidden layer weights => 2*n_hidden because of foward + backward cells\n",
+    "    'hidden': tf.Variable(tf.random_normal([n_input, 2*n_hidden])),\n",
+    "    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'hidden': tf.Variable(tf.random_normal([2*n_hidden])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def BiRNN(x, weights, biases):\n",
+    "\n",
+    "    # Prepare data shape to match `bidirectional_rnn` function requirements\n",
+    "    # Current data input shape: (batch_size, n_steps, n_input)\n",
+    "    # Permuting batch_size and n_steps\n",
+    "    x = tf.transpose(x, [1, 0, 2])\n",
+    "    # Reshape to (n_steps*batch_size, n_input)\n",
+    "    x = tf.reshape(x, [-1, n_input])\n",
+    "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)\n",
+    "    x = tf.split(0, n_steps, x)\n",
+    "\n",
+    "    # Define lstm cells with tensorflow\n",
+    "    # Forward direction cell\n",
+    "    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "    # Backward direction cell\n",
+    "    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "\n",
+    "    # Get lstm cell output\n",
+    "    outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
+    "                                    dtype=tf.float32)\n",
+    "\n",
+    "    # Linear activation, using rnn inner loop last output\n",
+    "    return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
+    "\n",
+    "pred = BiRNN(x, weights, biases)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Evaluate model\n",
+    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Iter 1280, Minibatch Loss= 1.689740, Training Accuracy= 0.36719\n",
+      "Iter 2560, Minibatch Loss= 1.477009, Training Accuracy= 0.44531\n",
+      "Iter 3840, Minibatch Loss= 1.245874, Training Accuracy= 0.53125\n",
+      "Iter 5120, Minibatch Loss= 0.990923, Training Accuracy= 0.64062\n",
+      "Iter 6400, Minibatch Loss= 0.752950, Training Accuracy= 0.71875\n",
+      "Iter 7680, Minibatch Loss= 1.023025, Training Accuracy= 0.61719\n",
+      "Iter 8960, Minibatch Loss= 0.921414, Training Accuracy= 0.68750\n",
+      "Iter 10240, Minibatch Loss= 0.719829, Training Accuracy= 0.75000\n",
+      "Iter 11520, Minibatch Loss= 0.468657, Training Accuracy= 0.86719\n",
+      "Iter 12800, Minibatch Loss= 0.654315, Training Accuracy= 0.78125\n",
+      "Iter 14080, Minibatch Loss= 0.595391, Training Accuracy= 0.83594\n",
+      "Iter 15360, Minibatch Loss= 0.392862, Training Accuracy= 0.83594\n",
+      "Iter 16640, Minibatch Loss= 0.421122, Training Accuracy= 0.92188\n",
+      "Iter 17920, Minibatch Loss= 0.311471, Training Accuracy= 0.88281\n",
+      "Iter 19200, Minibatch Loss= 0.276949, Training Accuracy= 0.92188\n",
+      "Iter 20480, Minibatch Loss= 0.170499, Training Accuracy= 0.94531\n",
+      "Iter 21760, Minibatch Loss= 0.419481, Training Accuracy= 0.86719\n",
+      "Iter 23040, Minibatch Loss= 0.183765, Training Accuracy= 0.92188\n",
+      "Iter 24320, Minibatch Loss= 0.386232, Training Accuracy= 0.86719\n",
+      "Iter 25600, Minibatch Loss= 0.335571, Training Accuracy= 0.88281\n",
+      "Iter 26880, Minibatch Loss= 0.169092, Training Accuracy= 0.92969\n",
+      "Iter 28160, Minibatch Loss= 0.247623, Training Accuracy= 0.92969\n",
+      "Iter 29440, Minibatch Loss= 0.242989, Training Accuracy= 0.94531\n",
+      "Iter 30720, Minibatch Loss= 0.253811, Training Accuracy= 0.92188\n",
+      "Iter 32000, Minibatch Loss= 0.169660, Training Accuracy= 0.93750\n",
+      "Iter 33280, Minibatch Loss= 0.291349, Training Accuracy= 0.90625\n",
+      "Iter 34560, Minibatch Loss= 0.172026, Training Accuracy= 0.95312\n",
+      "Iter 35840, Minibatch Loss= 0.186019, Training Accuracy= 0.93750\n",
+      "Iter 37120, Minibatch Loss= 0.298480, Training Accuracy= 0.89062\n",
+      "Iter 38400, Minibatch Loss= 0.158750, Training Accuracy= 0.92188\n",
+      "Iter 39680, Minibatch Loss= 0.162706, Training Accuracy= 0.94531\n",
+      "Iter 40960, Minibatch Loss= 0.339814, Training Accuracy= 0.86719\n",
+      "Iter 42240, Minibatch Loss= 0.068817, Training Accuracy= 0.99219\n",
+      "Iter 43520, Minibatch Loss= 0.188742, Training Accuracy= 0.93750\n",
+      "Iter 44800, Minibatch Loss= 0.176708, Training Accuracy= 0.92969\n",
+      "Iter 46080, Minibatch Loss= 0.096726, Training Accuracy= 0.96875\n",
+      "Iter 47360, Minibatch Loss= 0.220973, Training Accuracy= 0.92969\n",
+      "Iter 48640, Minibatch Loss= 0.226749, Training Accuracy= 0.94531\n",
+      "Iter 49920, Minibatch Loss= 0.188906, Training Accuracy= 0.94531\n",
+      "Iter 51200, Minibatch Loss= 0.145194, Training Accuracy= 0.95312\n",
+      "Iter 52480, Minibatch Loss= 0.168948, Training Accuracy= 0.95312\n",
+      "Iter 53760, Minibatch Loss= 0.069116, Training Accuracy= 0.97656\n",
+      "Iter 55040, Minibatch Loss= 0.228721, Training Accuracy= 0.93750\n",
+      "Iter 56320, Minibatch Loss= 0.152915, Training Accuracy= 0.95312\n",
+      "Iter 57600, Minibatch Loss= 0.126974, Training Accuracy= 0.96875\n",
+      "Iter 58880, Minibatch Loss= 0.078870, Training Accuracy= 0.97656\n",
+      "Iter 60160, Minibatch Loss= 0.225498, Training Accuracy= 0.95312\n",
+      "Iter 61440, Minibatch Loss= 0.111760, Training Accuracy= 0.97656\n",
+      "Iter 62720, Minibatch Loss= 0.161434, Training Accuracy= 0.97656\n",
+      "Iter 64000, Minibatch Loss= 0.207190, Training Accuracy= 0.94531\n",
+      "Iter 65280, Minibatch Loss= 0.103831, Training Accuracy= 0.96094\n",
+      "Iter 66560, Minibatch Loss= 0.153846, Training Accuracy= 0.93750\n",
+      "Iter 67840, Minibatch Loss= 0.082717, Training Accuracy= 0.96875\n",
+      "Iter 69120, Minibatch Loss= 0.199301, Training Accuracy= 0.95312\n",
+      "Iter 70400, Minibatch Loss= 0.139725, Training Accuracy= 0.96875\n",
+      "Iter 71680, Minibatch Loss= 0.169596, Training Accuracy= 0.95312\n",
+      "Iter 72960, Minibatch Loss= 0.142444, Training Accuracy= 0.96094\n",
+      "Iter 74240, Minibatch Loss= 0.145822, Training Accuracy= 0.95312\n",
+      "Iter 75520, Minibatch Loss= 0.129086, Training Accuracy= 0.94531\n",
+      "Iter 76800, Minibatch Loss= 0.078082, Training Accuracy= 0.97656\n",
+      "Iter 78080, Minibatch Loss= 0.151803, Training Accuracy= 0.94531\n",
+      "Iter 79360, Minibatch Loss= 0.050142, Training Accuracy= 0.98438\n",
+      "Iter 80640, Minibatch Loss= 0.136788, Training Accuracy= 0.95312\n",
+      "Iter 81920, Minibatch Loss= 0.130100, Training Accuracy= 0.94531\n",
+      "Iter 83200, Minibatch Loss= 0.058298, Training Accuracy= 0.98438\n",
+      "Iter 84480, Minibatch Loss= 0.120124, Training Accuracy= 0.96094\n",
+      "Iter 85760, Minibatch Loss= 0.064916, Training Accuracy= 0.97656\n",
+      "Iter 87040, Minibatch Loss= 0.137179, Training Accuracy= 0.93750\n",
+      "Iter 88320, Minibatch Loss= 0.138268, Training Accuracy= 0.95312\n",
+      "Iter 89600, Minibatch Loss= 0.072827, Training Accuracy= 0.97656\n",
+      "Iter 90880, Minibatch Loss= 0.123839, Training Accuracy= 0.96875\n",
+      "Iter 92160, Minibatch Loss= 0.087194, Training Accuracy= 0.96875\n",
+      "Iter 93440, Minibatch Loss= 0.083489, Training Accuracy= 0.97656\n",
+      "Iter 94720, Minibatch Loss= 0.131827, Training Accuracy= 0.95312\n",
+      "Iter 96000, Minibatch Loss= 0.098764, Training Accuracy= 0.96875\n",
+      "Iter 97280, Minibatch Loss= 0.115553, Training Accuracy= 0.94531\n",
+      "Iter 98560, Minibatch Loss= 0.079704, Training Accuracy= 0.96875\n",
+      "Iter 99840, Minibatch Loss= 0.064562, Training Accuracy= 0.98438\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.992188\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "    step = 1\n",
+    "    # Keep training until reach max iterations\n",
+    "    while step * batch_size < training_iters:\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Reshape data to get 28 seq of 28 elements\n",
+    "        batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n",
+    "        if step % display_step == 0:\n",
+    "            # Calculate batch accuracy\n",
+    "            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n",
+    "            # Calculate batch loss\n",
+    "            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n",
+    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.5f}\".format(acc)\n",
+    "        step += 1\n",
+    "    print \"Optimization Finished!\"\n",
+    "\n",
+    "    # Calculate accuracy for 128 mnist test images\n",
+    "    test_len = 128\n",
+    "    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))\n",
+    "    test_label = mnist.test.labels[:test_len]\n",
+    "    print \"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 387 - 0
notebooks/3_Neural Networks/convolutional_network.ipynb

@@ -0,0 +1,387 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "A Convolutional Network implementation example using TensorFlow library.\n",
+    "This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/)\n",
+    "\n",
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "training_iters = 200000\n",
+    "batch_size = 128\n",
+    "display_step = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "dropout = 0.75 # Dropout, probability to keep units\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(tf.float32, [None, n_input])\n",
+    "y = tf.placeholder(tf.float32, [None, n_classes])\n",
+    "keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create some wrappers for simplicity\n",
+    "def conv2d(x, W, b, strides=1):\n",
+    "    # Conv2D wrapper, with bias and relu activation\n",
+    "    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n",
+    "    x = tf.nn.bias_add(x, b)\n",
+    "    return tf.nn.relu(x)\n",
+    "\n",
+    "\n",
+    "def maxpool2d(x, k=2):\n",
+    "    # MaxPool2D wrapper\n",
+    "    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n",
+    "                          padding='SAME')\n",
+    "\n",
+    "\n",
+    "# Create model\n",
+    "def conv_net(x, weights, biases, dropout):\n",
+    "    # Reshape input picture\n",
+    "    x = tf.reshape(x, shape=[-1, 28, 28, 1])\n",
+    "\n",
+    "    # Convolution Layer\n",
+    "    conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n",
+    "    # Max Pooling (down-sampling)\n",
+    "    conv1 = maxpool2d(conv1, k=2)\n",
+    "\n",
+    "    # Convolution Layer\n",
+    "    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n",
+    "    # Max Pooling (down-sampling)\n",
+    "    conv2 = maxpool2d(conv2, k=2)\n",
+    "\n",
+    "    # Fully connected layer\n",
+    "    # Reshape conv2 output to fit fully connected layer input\n",
+    "    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\n",
+    "    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n",
+    "    fc1 = tf.nn.relu(fc1)\n",
+    "    # Apply Dropout\n",
+    "    fc1 = tf.nn.dropout(fc1, dropout)\n",
+    "\n",
+    "    # Output, class prediction\n",
+    "    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n",
+    "    return out"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    # 5x5 conv, 1 input, 32 outputs\n",
+    "    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\n",
+    "    # 5x5 conv, 32 inputs, 64 outputs\n",
+    "    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\n",
+    "    # fully connected, 7*7*64 inputs, 1024 outputs\n",
+    "    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\n",
+    "    # 1024 inputs, 10 outputs (class prediction)\n",
+    "    'out': tf.Variable(tf.random_normal([1024, n_classes]))\n",
+    "}\n",
+    "\n",
+    "biases = {\n",
+    "    'bc1': tf.Variable(tf.random_normal([32])),\n",
+    "    'bc2': tf.Variable(tf.random_normal([64])),\n",
+    "    'bd1': tf.Variable(tf.random_normal([1024])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}\n",
+    "\n",
+    "# Construct model\n",
+    "pred = conv_net(x, weights, biases, keep_prob)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Evaluate model\n",
+    "correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Iter 1280, Minibatch Loss= 17231.589844, Training Accuracy= 0.25000\n",
+      "Iter 2560, Minibatch Loss= 10580.260742, Training Accuracy= 0.54688\n",
+      "Iter 3840, Minibatch Loss= 7395.362793, Training Accuracy= 0.64062\n",
+      "Iter 5120, Minibatch Loss= 4864.292480, Training Accuracy= 0.75781\n",
+      "Iter 6400, Minibatch Loss= 3830.062012, Training Accuracy= 0.80469\n",
+      "Iter 7680, Minibatch Loss= 6031.701172, Training Accuracy= 0.72656\n",
+      "Iter 8960, Minibatch Loss= 2549.708740, Training Accuracy= 0.81250\n",
+      "Iter 10240, Minibatch Loss= 2010.484985, Training Accuracy= 0.84375\n",
+      "Iter 11520, Minibatch Loss= 1607.380981, Training Accuracy= 0.89062\n",
+      "Iter 12800, Minibatch Loss= 1983.302856, Training Accuracy= 0.82812\n",
+      "Iter 14080, Minibatch Loss= 401.215088, Training Accuracy= 0.94531\n",
+      "Iter 15360, Minibatch Loss= 976.289307, Training Accuracy= 0.95312\n",
+      "Iter 16640, Minibatch Loss= 1844.699951, Training Accuracy= 0.89844\n",
+      "Iter 17920, Minibatch Loss= 1009.859863, Training Accuracy= 0.92969\n",
+      "Iter 19200, Minibatch Loss= 1397.939453, Training Accuracy= 0.88281\n",
+      "Iter 20480, Minibatch Loss= 540.369995, Training Accuracy= 0.96094\n",
+      "Iter 21760, Minibatch Loss= 2589.246826, Training Accuracy= 0.87500\n",
+      "Iter 23040, Minibatch Loss= 404.981293, Training Accuracy= 0.96094\n",
+      "Iter 24320, Minibatch Loss= 742.155396, Training Accuracy= 0.93750\n",
+      "Iter 25600, Minibatch Loss= 851.599731, Training Accuracy= 0.93750\n",
+      "Iter 26880, Minibatch Loss= 1527.609619, Training Accuracy= 0.90625\n",
+      "Iter 28160, Minibatch Loss= 1209.633301, Training Accuracy= 0.91406\n",
+      "Iter 29440, Minibatch Loss= 1123.146851, Training Accuracy= 0.93750\n",
+      "Iter 30720, Minibatch Loss= 950.860596, Training Accuracy= 0.92188\n",
+      "Iter 32000, Minibatch Loss= 1217.373779, Training Accuracy= 0.92188\n",
+      "Iter 33280, Minibatch Loss= 859.433105, Training Accuracy= 0.91406\n",
+      "Iter 34560, Minibatch Loss= 487.426331, Training Accuracy= 0.95312\n",
+      "Iter 35840, Minibatch Loss= 287.507721, Training Accuracy= 0.96875\n",
+      "Iter 37120, Minibatch Loss= 786.797485, Training Accuracy= 0.91406\n",
+      "Iter 38400, Minibatch Loss= 248.981216, Training Accuracy= 0.97656\n",
+      "Iter 39680, Minibatch Loss= 147.081467, Training Accuracy= 0.97656\n",
+      "Iter 40960, Minibatch Loss= 1198.459106, Training Accuracy= 0.93750\n",
+      "Iter 42240, Minibatch Loss= 717.058716, Training Accuracy= 0.92188\n",
+      "Iter 43520, Minibatch Loss= 351.870453, Training Accuracy= 0.96094\n",
+      "Iter 44800, Minibatch Loss= 271.505554, Training Accuracy= 0.96875\n",
+      "Iter 46080, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
+      "Iter 47360, Minibatch Loss= 806.163818, Training Accuracy= 0.95312\n",
+      "Iter 48640, Minibatch Loss= 1055.359009, Training Accuracy= 0.91406\n",
+      "Iter 49920, Minibatch Loss= 459.845520, Training Accuracy= 0.94531\n",
+      "Iter 51200, Minibatch Loss= 133.995087, Training Accuracy= 0.97656\n",
+      "Iter 52480, Minibatch Loss= 378.886780, Training Accuracy= 0.96094\n",
+      "Iter 53760, Minibatch Loss= 122.112671, Training Accuracy= 0.98438\n",
+      "Iter 55040, Minibatch Loss= 357.410950, Training Accuracy= 0.96875\n",
+      "Iter 56320, Minibatch Loss= 164.791595, Training Accuracy= 0.98438\n",
+      "Iter 57600, Minibatch Loss= 740.711060, Training Accuracy= 0.95312\n",
+      "Iter 58880, Minibatch Loss= 755.948364, Training Accuracy= 0.92969\n",
+      "Iter 60160, Minibatch Loss= 289.819153, Training Accuracy= 0.94531\n",
+      "Iter 61440, Minibatch Loss= 162.940323, Training Accuracy= 0.96875\n",
+      "Iter 62720, Minibatch Loss= 616.192200, Training Accuracy= 0.92969\n",
+      "Iter 64000, Minibatch Loss= 649.317993, Training Accuracy= 0.92188\n",
+      "Iter 65280, Minibatch Loss= 1021.529785, Training Accuracy= 0.93750\n",
+      "Iter 66560, Minibatch Loss= 203.839050, Training Accuracy= 0.96094\n",
+      "Iter 67840, Minibatch Loss= 469.755249, Training Accuracy= 0.96094\n",
+      "Iter 69120, Minibatch Loss= 36.496567, Training Accuracy= 0.98438\n",
+      "Iter 70400, Minibatch Loss= 214.677551, Training Accuracy= 0.95312\n",
+      "Iter 71680, Minibatch Loss= 115.657990, Training Accuracy= 0.96875\n",
+      "Iter 72960, Minibatch Loss= 354.555115, Training Accuracy= 0.96875\n",
+      "Iter 74240, Minibatch Loss= 124.091103, Training Accuracy= 0.97656\n",
+      "Iter 75520, Minibatch Loss= 614.557251, Training Accuracy= 0.94531\n",
+      "Iter 76800, Minibatch Loss= 343.182983, Training Accuracy= 0.95312\n",
+      "Iter 78080, Minibatch Loss= 678.875183, Training Accuracy= 0.94531\n",
+      "Iter 79360, Minibatch Loss= 313.656494, Training Accuracy= 0.95312\n",
+      "Iter 80640, Minibatch Loss= 169.024185, Training Accuracy= 0.96094\n",
+      "Iter 81920, Minibatch Loss= 98.455017, Training Accuracy= 0.96875\n",
+      "Iter 83200, Minibatch Loss= 359.754517, Training Accuracy= 0.92188\n",
+      "Iter 84480, Minibatch Loss= 214.993103, Training Accuracy= 0.96875\n",
+      "Iter 85760, Minibatch Loss= 262.921265, Training Accuracy= 0.97656\n",
+      "Iter 87040, Minibatch Loss= 558.218445, Training Accuracy= 0.89844\n",
+      "Iter 88320, Minibatch Loss= 122.281952, Training Accuracy= 0.99219\n",
+      "Iter 89600, Minibatch Loss= 300.606689, Training Accuracy= 0.93750\n",
+      "Iter 90880, Minibatch Loss= 261.051025, Training Accuracy= 0.98438\n",
+      "Iter 92160, Minibatch Loss= 59.812164, Training Accuracy= 0.98438\n",
+      "Iter 93440, Minibatch Loss= 309.307312, Training Accuracy= 0.96875\n",
+      "Iter 94720, Minibatch Loss= 626.035706, Training Accuracy= 0.95312\n",
+      "Iter 96000, Minibatch Loss= 317.929260, Training Accuracy= 0.96875\n",
+      "Iter 97280, Minibatch Loss= 196.908218, Training Accuracy= 0.97656\n",
+      "Iter 98560, Minibatch Loss= 843.143250, Training Accuracy= 0.95312\n",
+      "Iter 99840, Minibatch Loss= 389.142761, Training Accuracy= 0.96875\n",
+      "Iter 101120, Minibatch Loss= 246.468994, Training Accuracy= 0.96094\n",
+      "Iter 102400, Minibatch Loss= 110.580948, Training Accuracy= 0.98438\n",
+      "Iter 103680, Minibatch Loss= 208.350586, Training Accuracy= 0.96875\n",
+      "Iter 104960, Minibatch Loss= 506.229462, Training Accuracy= 0.94531\n",
+      "Iter 106240, Minibatch Loss= 49.548233, Training Accuracy= 0.98438\n",
+      "Iter 107520, Minibatch Loss= 728.496582, Training Accuracy= 0.92969\n",
+      "Iter 108800, Minibatch Loss= 187.256622, Training Accuracy= 0.97656\n",
+      "Iter 110080, Minibatch Loss= 273.696899, Training Accuracy= 0.97656\n",
+      "Iter 111360, Minibatch Loss= 317.126678, Training Accuracy= 0.96094\n",
+      "Iter 112640, Minibatch Loss= 148.293365, Training Accuracy= 0.98438\n",
+      "Iter 113920, Minibatch Loss= 139.360168, Training Accuracy= 0.97656\n",
+      "Iter 115200, Minibatch Loss= 167.539093, Training Accuracy= 0.98438\n",
+      "Iter 116480, Minibatch Loss= 565.433594, Training Accuracy= 0.94531\n",
+      "Iter 117760, Minibatch Loss= 8.117203, Training Accuracy= 0.99219\n",
+      "Iter 119040, Minibatch Loss= 348.071472, Training Accuracy= 0.96875\n",
+      "Iter 120320, Minibatch Loss= 287.732849, Training Accuracy= 0.97656\n",
+      "Iter 121600, Minibatch Loss= 156.525284, Training Accuracy= 0.96875\n",
+      "Iter 122880, Minibatch Loss= 296.147339, Training Accuracy= 0.98438\n",
+      "Iter 124160, Minibatch Loss= 260.941956, Training Accuracy= 0.98438\n",
+      "Iter 125440, Minibatch Loss= 241.011719, Training Accuracy= 0.98438\n",
+      "Iter 126720, Minibatch Loss= 185.330444, Training Accuracy= 0.98438\n",
+      "Iter 128000, Minibatch Loss= 346.407013, Training Accuracy= 0.96875\n",
+      "Iter 129280, Minibatch Loss= 522.477173, Training Accuracy= 0.94531\n",
+      "Iter 130560, Minibatch Loss= 97.665955, Training Accuracy= 0.96094\n",
+      "Iter 131840, Minibatch Loss= 111.370262, Training Accuracy= 0.96875\n",
+      "Iter 133120, Minibatch Loss= 106.377136, Training Accuracy= 0.97656\n",
+      "Iter 134400, Minibatch Loss= 432.294983, Training Accuracy= 0.96094\n",
+      "Iter 135680, Minibatch Loss= 104.584610, Training Accuracy= 0.98438\n",
+      "Iter 136960, Minibatch Loss= 439.611053, Training Accuracy= 0.95312\n",
+      "Iter 138240, Minibatch Loss= 171.394562, Training Accuracy= 0.96875\n",
+      "Iter 139520, Minibatch Loss= 83.505905, Training Accuracy= 0.98438\n",
+      "Iter 140800, Minibatch Loss= 240.278427, Training Accuracy= 0.98438\n",
+      "Iter 142080, Minibatch Loss= 417.140320, Training Accuracy= 0.96094\n",
+      "Iter 143360, Minibatch Loss= 77.656067, Training Accuracy= 0.97656\n",
+      "Iter 144640, Minibatch Loss= 284.589844, Training Accuracy= 0.97656\n",
+      "Iter 145920, Minibatch Loss= 372.114288, Training Accuracy= 0.96875\n",
+      "Iter 147200, Minibatch Loss= 352.900024, Training Accuracy= 0.96094\n",
+      "Iter 148480, Minibatch Loss= 148.120621, Training Accuracy= 0.97656\n",
+      "Iter 149760, Minibatch Loss= 127.385742, Training Accuracy= 0.98438\n",
+      "Iter 151040, Minibatch Loss= 383.167175, Training Accuracy= 0.96094\n",
+      "Iter 152320, Minibatch Loss= 331.846649, Training Accuracy= 0.94531\n",
+      "Iter 153600, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
+      "Iter 154880, Minibatch Loss= 24.065147, Training Accuracy= 0.99219\n",
+      "Iter 156160, Minibatch Loss= 43.433868, Training Accuracy= 0.99219\n",
+      "Iter 157440, Minibatch Loss= 205.383972, Training Accuracy= 0.96875\n",
+      "Iter 158720, Minibatch Loss= 83.019257, Training Accuracy= 0.97656\n",
+      "Iter 160000, Minibatch Loss= 195.710556, Training Accuracy= 0.96875\n",
+      "Iter 161280, Minibatch Loss= 177.192932, Training Accuracy= 0.95312\n",
+      "Iter 162560, Minibatch Loss= 261.618713, Training Accuracy= 0.96875\n",
+      "Iter 163840, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
+      "Iter 165120, Minibatch Loss= 62.901100, Training Accuracy= 0.97656\n",
+      "Iter 166400, Minibatch Loss= 17.181839, Training Accuracy= 0.98438\n",
+      "Iter 167680, Minibatch Loss= 102.738960, Training Accuracy= 0.96875\n",
+      "Iter 168960, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
+      "Iter 170240, Minibatch Loss= 71.784363, Training Accuracy= 0.99219\n",
+      "Iter 171520, Minibatch Loss= 260.672852, Training Accuracy= 0.96875\n",
+      "Iter 172800, Minibatch Loss= 186.616119, Training Accuracy= 0.96094\n",
+      "Iter 174080, Minibatch Loss= 312.432312, Training Accuracy= 0.96875\n",
+      "Iter 175360, Minibatch Loss= 45.828953, Training Accuracy= 0.99219\n",
+      "Iter 176640, Minibatch Loss= 62.931808, Training Accuracy= 0.98438\n",
+      "Iter 177920, Minibatch Loss= 63.452362, Training Accuracy= 0.97656\n",
+      "Iter 179200, Minibatch Loss= 53.608818, Training Accuracy= 0.98438\n",
+      "Iter 180480, Minibatch Loss= 57.089508, Training Accuracy= 0.97656\n",
+      "Iter 181760, Minibatch Loss= 601.268799, Training Accuracy= 0.93750\n",
+      "Iter 183040, Minibatch Loss= 59.850044, Training Accuracy= 0.97656\n",
+      "Iter 184320, Minibatch Loss= 145.267883, Training Accuracy= 0.96875\n",
+      "Iter 185600, Minibatch Loss= 24.205322, Training Accuracy= 0.99219\n",
+      "Iter 186880, Minibatch Loss= 51.866646, Training Accuracy= 0.98438\n",
+      "Iter 188160, Minibatch Loss= 166.911987, Training Accuracy= 0.96875\n",
+      "Iter 189440, Minibatch Loss= 32.308147, Training Accuracy= 0.98438\n",
+      "Iter 190720, Minibatch Loss= 514.898071, Training Accuracy= 0.92188\n",
+      "Iter 192000, Minibatch Loss= 146.610031, Training Accuracy= 0.98438\n",
+      "Iter 193280, Minibatch Loss= 23.939758, Training Accuracy= 0.99219\n",
+      "Iter 194560, Minibatch Loss= 224.806641, Training Accuracy= 0.97656\n",
+      "Iter 195840, Minibatch Loss= 71.935089, Training Accuracy= 0.98438\n",
+      "Iter 197120, Minibatch Loss= 182.021210, Training Accuracy= 0.96875\n",
+      "Iter 198400, Minibatch Loss= 125.573784, Training Accuracy= 0.96875\n",
+      "Iter 199680, Minibatch Loss= 122.506104, Training Accuracy= 0.96875\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.972656\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "    step = 1\n",
+    "    # Keep training until reach max iterations\n",
+    "    while step * batch_size < training_iters:\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\n",
+    "                                       keep_prob: dropout})\n",
+    "        if step % display_step == 0:\n",
+    "            # Calculate batch loss and accuracy\n",
+    "            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n",
+    "                                                              y: batch_y,\n",
+    "                                                              keep_prob: 1.})\n",
+    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.5f}\".format(acc)\n",
+    "        step += 1\n",
+    "    print \"Optimization Finished!\"\n",
+    "\n",
+    "    # Calculate accuracy for 256 mnist test images\n",
+    "    print \"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={x: mnist.test.images[:256],\n",
+    "                                      y: mnist.test.labels[:256],\n",
+    "                                      keep_prob: 1.})"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 66 - 115
notebooks/3 - Neural Networks/multilayer_perceptron.ipynb

@@ -2,23 +2,25 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {
     "collapsed": true
    },
    "outputs": [],
    "source": [
-    "# A Multilayer Perceptron implementation example using TensorFlow library.\n",
-    "# This example is using the MNIST database of handwritten digits\n",
-    "# (http://yann.lecun.com/exdb/mnist/)\n",
+    "'''\n",
+    "A Multilayer Perceptron implementation example using TensorFlow library.\n",
+    "This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/)\n",
     "\n",
-    "# Author: Aymeric Damien\n",
-    "# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 1,
    "metadata": {
     "collapsed": false
    },
@@ -36,24 +38,15 @@
    ],
    "source": [
     "# Import MINST data\n",
-    "import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "\n",
     "import tensorflow as tf"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 2,
    "metadata": {
     "collapsed": true
    },
@@ -63,32 +56,14 @@
     "learning_rate = 0.001\n",
     "training_epochs = 15\n",
     "batch_size = 100\n",
-    "display_step = 1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "display_step = 1\n",
+    "\n",
     "# Network Parameters\n",
-    "n_hidden_1 = 256 # 1st layer num features\n",
-    "n_hidden_2 = 256 # 2nd layer num features\n",
+    "n_hidden_1 = 256 # 1st layer number of features\n",
+    "n_hidden_2 = 256 # 2nd layer number of features\n",
     "n_input = 784 # MNIST data input (img shape: 28*28)\n",
-    "n_classes = 10 # MNIST total classes (0-9 digits)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
     "# tf Graph input\n",
     "x = tf.placeholder(\"float\", [None, n_input])\n",
     "y = tf.placeholder(\"float\", [None, n_classes])"
@@ -96,24 +71,28 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 3,
    "metadata": {
     "collapsed": true
    },
    "outputs": [],
    "source": [
     "# Create model\n",
-    "def multilayer_perceptron(_X, _weights, _biases):\n",
-    "    #Hidden layer with RELU activation\n",
-    "    layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) \n",
-    "    #Hidden layer with RELU activation\n",
-    "    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) \n",
-    "    return tf.matmul(layer_2, weights['out']) + biases['out']"
+    "def multilayer_perceptron(x, weights, biases):\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n",
+    "    layer_1 = tf.nn.relu(layer_1)\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n",
+    "    layer_2 = tf.nn.relu(layer_2)\n",
+    "    # Output layer with linear activation\n",
+    "    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n",
+    "    return out_layer"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 4,
    "metadata": {
     "collapsed": true
    },
@@ -129,51 +108,22 @@
     "    'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
     "    'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n",
     "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
-    "}"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "}\n",
+    "\n",
     "# Construct model\n",
-    "pred = multilayer_perceptron(x, weights, biases)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "pred = multilayer_perceptron(x, weights, biases)\n",
+    "\n",
     "# Define loss and optimizer\n",
-    "# Softmax loss\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) \n",
-    "# Adam Optimizer\n",
-    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
     "# Initializing the variables\n",
     "init = tf.initialize_all_variables()"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 5,
    "metadata": {
     "collapsed": false
    },
@@ -182,23 +132,23 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Epoch: 0001 cost= 160.113980416\n",
-      "Epoch: 0002 cost= 38.665780694\n",
-      "Epoch: 0003 cost= 24.118004577\n",
-      "Epoch: 0004 cost= 16.440921303\n",
-      "Epoch: 0005 cost= 11.689460141\n",
-      "Epoch: 0006 cost= 8.469423468\n",
-      "Epoch: 0007 cost= 6.223237230\n",
-      "Epoch: 0008 cost= 4.560174118\n",
-      "Epoch: 0009 cost= 3.250516910\n",
-      "Epoch: 0010 cost= 2.359658795\n",
-      "Epoch: 0011 cost= 1.694081847\n",
-      "Epoch: 0012 cost= 1.167997509\n",
-      "Epoch: 0013 cost= 0.872986831\n",
-      "Epoch: 0014 cost= 0.630616366\n",
-      "Epoch: 0015 cost= 0.487381571\n",
+      "Epoch: 0001 cost= 185.342230390\n",
+      "Epoch: 0002 cost= 44.266946572\n",
+      "Epoch: 0003 cost= 27.999560453\n",
+      "Epoch: 0004 cost= 19.655567043\n",
+      "Epoch: 0005 cost= 14.284429696\n",
+      "Epoch: 0006 cost= 10.640310403\n",
+      "Epoch: 0007 cost= 7.904047886\n",
+      "Epoch: 0008 cost= 5.989115090\n",
+      "Epoch: 0009 cost= 4.689374613\n",
+      "Epoch: 0010 cost= 3.455884229\n",
+      "Epoch: 0011 cost= 2.733002625\n",
+      "Epoch: 0012 cost= 2.101091420\n",
+      "Epoch: 0013 cost= 1.496508092\n",
+      "Epoch: 0014 cost= 1.245452015\n",
+      "Epoch: 0015 cost= 0.912072906\n",
       "Optimization Finished!\n",
-      "Accuracy: 0.9462\n"
+      "Accuracy: 0.9422\n"
      ]
     }
    ],
@@ -213,15 +163,16 @@
     "        total_batch = int(mnist.train.num_examples/batch_size)\n",
     "        # Loop over all batches\n",
     "        for i in range(total_batch):\n",
-    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
-    "            # Fit training using batch data\n",
-    "            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n",
+    "                                                          y: batch_y})\n",
     "            # Compute average loss\n",
-    "            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch\n",
+    "            avg_cost += c / total_batch\n",
     "        # Display logs per epoch step\n",
     "        if epoch % display_step == 0:\n",
-    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n",
-    "\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n",
+    "                \"{:.9f}\".format(avg_cost)\n",
     "    print \"Optimization Finished!\"\n",
     "\n",
     "    # Test model\n",
@@ -234,23 +185,23 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.8"
+   "version": "2.7.11"
   }
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}

+ 289 - 0
notebooks/3_Neural Networks/recurrent_network.ipynb

@@ -0,0 +1,289 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
+    "This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
+    "Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf\n",
+    "\n",
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "from tensorflow.models.rnn import rnn, rnn_cell\n",
+    "import numpy as np\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "To classify images using a reccurent neural network, we consider every image\n",
+    "row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then\n",
+    "handle 28 sequences of 28 steps for every sample.\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "training_iters = 100000\n",
+    "batch_size = 128\n",
+    "display_step = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_input = 28 # MNIST data input (img shape: 28*28)\n",
+    "n_steps = 28 # timesteps\n",
+    "n_hidden = 128 # hidden layer num of features\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
+    "y = tf.placeholder(\"float\", [None, n_classes])\n",
+    "\n",
+    "# Define weights\n",
+    "weights = {\n",
+    "    'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'hidden': tf.Variable(tf.random_normal([n_hidden])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def RNN(x, weights, biases):\n",
+    "\n",
+    "    # Prepare data shape to match `rnn` function requirements\n",
+    "    # Current data input shape: (batch_size, n_steps, n_input)\n",
+    "    # Permuting batch_size and n_steps\n",
+    "    x = tf.transpose(x, [1, 0, 2])\n",
+    "    # Reshaping to (n_steps*batch_size, n_input)\n",
+    "    x = tf.reshape(x, [-1, n_input])\n",
+    "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)\n",
+    "    # This input shape is required by `rnn` function\n",
+    "    x = tf.split(0, n_steps, x)\n",
+    "\n",
+    "    # Define a lstm cell with tensorflow\n",
+    "    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "\n",
+    "    # Get lstm cell output\n",
+    "    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\n",
+    "\n",
+    "    # Linear activation, using rnn inner loop last output\n",
+    "    return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
+    "\n",
+    "pred = RNN(x, weights, biases)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Evaluate model\n",
+    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Iter 1280, Minibatch Loss= 1.538532, Training Accuracy= 0.49219\n",
+      "Iter 2560, Minibatch Loss= 1.462834, Training Accuracy= 0.50781\n",
+      "Iter 3840, Minibatch Loss= 1.048393, Training Accuracy= 0.66406\n",
+      "Iter 5120, Minibatch Loss= 0.889872, Training Accuracy= 0.71875\n",
+      "Iter 6400, Minibatch Loss= 0.681855, Training Accuracy= 0.76562\n",
+      "Iter 7680, Minibatch Loss= 0.987207, Training Accuracy= 0.69531\n",
+      "Iter 8960, Minibatch Loss= 0.759543, Training Accuracy= 0.71094\n",
+      "Iter 10240, Minibatch Loss= 0.557055, Training Accuracy= 0.80469\n",
+      "Iter 11520, Minibatch Loss= 0.371352, Training Accuracy= 0.89844\n",
+      "Iter 12800, Minibatch Loss= 0.661293, Training Accuracy= 0.80469\n",
+      "Iter 14080, Minibatch Loss= 0.474259, Training Accuracy= 0.86719\n",
+      "Iter 15360, Minibatch Loss= 0.328436, Training Accuracy= 0.88281\n",
+      "Iter 16640, Minibatch Loss= 0.348017, Training Accuracy= 0.93750\n",
+      "Iter 17920, Minibatch Loss= 0.340086, Training Accuracy= 0.88281\n",
+      "Iter 19200, Minibatch Loss= 0.261532, Training Accuracy= 0.89844\n",
+      "Iter 20480, Minibatch Loss= 0.161785, Training Accuracy= 0.94531\n",
+      "Iter 21760, Minibatch Loss= 0.419619, Training Accuracy= 0.83594\n",
+      "Iter 23040, Minibatch Loss= 0.120714, Training Accuracy= 0.95312\n",
+      "Iter 24320, Minibatch Loss= 0.339519, Training Accuracy= 0.89062\n",
+      "Iter 25600, Minibatch Loss= 0.405463, Training Accuracy= 0.88281\n",
+      "Iter 26880, Minibatch Loss= 0.172193, Training Accuracy= 0.95312\n",
+      "Iter 28160, Minibatch Loss= 0.256769, Training Accuracy= 0.91406\n",
+      "Iter 29440, Minibatch Loss= 0.247753, Training Accuracy= 0.91406\n",
+      "Iter 30720, Minibatch Loss= 0.230820, Training Accuracy= 0.91406\n",
+      "Iter 32000, Minibatch Loss= 0.216861, Training Accuracy= 0.93750\n",
+      "Iter 33280, Minibatch Loss= 0.236337, Training Accuracy= 0.89062\n",
+      "Iter 34560, Minibatch Loss= 0.252351, Training Accuracy= 0.93750\n",
+      "Iter 35840, Minibatch Loss= 0.180090, Training Accuracy= 0.92188\n",
+      "Iter 37120, Minibatch Loss= 0.304125, Training Accuracy= 0.91406\n",
+      "Iter 38400, Minibatch Loss= 0.114474, Training Accuracy= 0.96094\n",
+      "Iter 39680, Minibatch Loss= 0.158405, Training Accuracy= 0.96875\n",
+      "Iter 40960, Minibatch Loss= 0.285858, Training Accuracy= 0.92188\n",
+      "Iter 42240, Minibatch Loss= 0.134199, Training Accuracy= 0.96094\n",
+      "Iter 43520, Minibatch Loss= 0.235847, Training Accuracy= 0.92969\n",
+      "Iter 44800, Minibatch Loss= 0.155971, Training Accuracy= 0.94531\n",
+      "Iter 46080, Minibatch Loss= 0.061549, Training Accuracy= 0.99219\n",
+      "Iter 47360, Minibatch Loss= 0.232569, Training Accuracy= 0.94531\n",
+      "Iter 48640, Minibatch Loss= 0.270348, Training Accuracy= 0.91406\n",
+      "Iter 49920, Minibatch Loss= 0.202416, Training Accuracy= 0.92188\n",
+      "Iter 51200, Minibatch Loss= 0.113857, Training Accuracy= 0.96094\n",
+      "Iter 52480, Minibatch Loss= 0.137900, Training Accuracy= 0.94531\n",
+      "Iter 53760, Minibatch Loss= 0.052416, Training Accuracy= 0.98438\n",
+      "Iter 55040, Minibatch Loss= 0.312064, Training Accuracy= 0.91406\n",
+      "Iter 56320, Minibatch Loss= 0.144335, Training Accuracy= 0.93750\n",
+      "Iter 57600, Minibatch Loss= 0.114723, Training Accuracy= 0.96875\n",
+      "Iter 58880, Minibatch Loss= 0.193597, Training Accuracy= 0.96094\n",
+      "Iter 60160, Minibatch Loss= 0.110877, Training Accuracy= 0.95312\n",
+      "Iter 61440, Minibatch Loss= 0.119864, Training Accuracy= 0.96094\n",
+      "Iter 62720, Minibatch Loss= 0.118780, Training Accuracy= 0.94531\n",
+      "Iter 64000, Minibatch Loss= 0.082259, Training Accuracy= 0.97656\n",
+      "Iter 65280, Minibatch Loss= 0.087364, Training Accuracy= 0.97656\n",
+      "Iter 66560, Minibatch Loss= 0.207975, Training Accuracy= 0.92969\n",
+      "Iter 67840, Minibatch Loss= 0.120612, Training Accuracy= 0.96875\n",
+      "Iter 69120, Minibatch Loss= 0.070608, Training Accuracy= 0.96875\n",
+      "Iter 70400, Minibatch Loss= 0.100786, Training Accuracy= 0.96094\n",
+      "Iter 71680, Minibatch Loss= 0.114746, Training Accuracy= 0.94531\n",
+      "Iter 72960, Minibatch Loss= 0.083427, Training Accuracy= 0.96875\n",
+      "Iter 74240, Minibatch Loss= 0.089978, Training Accuracy= 0.96094\n",
+      "Iter 75520, Minibatch Loss= 0.195322, Training Accuracy= 0.94531\n",
+      "Iter 76800, Minibatch Loss= 0.161109, Training Accuracy= 0.96094\n",
+      "Iter 78080, Minibatch Loss= 0.169762, Training Accuracy= 0.94531\n",
+      "Iter 79360, Minibatch Loss= 0.054240, Training Accuracy= 0.98438\n",
+      "Iter 80640, Minibatch Loss= 0.160100, Training Accuracy= 0.95312\n",
+      "Iter 81920, Minibatch Loss= 0.110728, Training Accuracy= 0.96875\n",
+      "Iter 83200, Minibatch Loss= 0.054918, Training Accuracy= 0.98438\n",
+      "Iter 84480, Minibatch Loss= 0.104170, Training Accuracy= 0.96875\n",
+      "Iter 85760, Minibatch Loss= 0.071871, Training Accuracy= 0.97656\n",
+      "Iter 87040, Minibatch Loss= 0.170529, Training Accuracy= 0.96094\n",
+      "Iter 88320, Minibatch Loss= 0.087350, Training Accuracy= 0.96875\n",
+      "Iter 89600, Minibatch Loss= 0.079943, Training Accuracy= 0.96875\n",
+      "Iter 90880, Minibatch Loss= 0.128451, Training Accuracy= 0.92969\n",
+      "Iter 92160, Minibatch Loss= 0.046963, Training Accuracy= 0.98438\n",
+      "Iter 93440, Minibatch Loss= 0.162998, Training Accuracy= 0.96875\n",
+      "Iter 94720, Minibatch Loss= 0.122588, Training Accuracy= 0.96094\n",
+      "Iter 96000, Minibatch Loss= 0.073954, Training Accuracy= 0.97656\n",
+      "Iter 97280, Minibatch Loss= 0.130790, Training Accuracy= 0.96094\n",
+      "Iter 98560, Minibatch Loss= 0.067689, Training Accuracy= 0.97656\n",
+      "Iter 99840, Minibatch Loss= 0.186411, Training Accuracy= 0.92188\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.976562\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "    step = 1\n",
+    "    # Keep training until reach max iterations\n",
+    "    while step * batch_size < training_iters:\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Reshape data to get 28 seq of 28 elements\n",
+    "        batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n",
+    "        if step % display_step == 0:\n",
+    "            # Calculate batch accuracy\n",
+    "            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n",
+    "            # Calculate batch loss\n",
+    "            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n",
+    "            print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.5f}\".format(acc)\n",
+    "        step += 1\n",
+    "    print \"Optimization Finished!\"\n",
+    "\n",
+    "    # Calculate accuracy for 128 mnist test images\n",
+    "    test_len = 128\n",
+    "    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))\n",
+    "    test_label = mnist.test.labels[:test_len]\n",
+    "    print \"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 271 - 0
notebooks/4_Utils/save_restore_model.ipynb

@@ -0,0 +1,271 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "Save and Restore a model using TensorFlow.\n",
+    "This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/)\n",
+    "\n",
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "batch_size = 100\n",
+    "display_step = 1\n",
+    "model_path = \"/tmp/model.ckpt\"\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer number of features\n",
+    "n_hidden_2 = 256 # 2nd layer number of features\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(\"float\", [None, n_input])\n",
+    "y = tf.placeholder(\"float\", [None, n_classes])\n",
+    "\n",
+    "\n",
+    "# Create model\n",
+    "def multilayer_perceptron(x, weights, biases):\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n",
+    "    layer_1 = tf.nn.relu(layer_1)\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n",
+    "    layer_2 = tf.nn.relu(layer_2)\n",
+    "    # Output layer with linear activation\n",
+    "    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n",
+    "    return out_layer\n",
+    "\n",
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n",
+    "    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
+    "    'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}\n",
+    "\n",
+    "# Construct model\n",
+    "pred = multilayer_perceptron(x, weights, biases)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# 'Saver' op to save and restore all the variables\n",
+    "saver = tf.train.Saver()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Starting 1st session...\n",
+      "Epoch: 0001 cost= 182.770135574\n",
+      "Epoch: 0002 cost= 44.863718596\n",
+      "Epoch: 0003 cost= 27.965412349\n",
+      "First Optimization Finished!\n",
+      "Accuracy: 0.906\n",
+      "Model saved in file: /tmp/model.ckpt\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Running first session\n",
+    "print \"Starting 1st session...\"\n",
+    "with tf.Session() as sess:\n",
+    "    # Initialize variables\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(3):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n",
+    "                                                          y: batch_y})\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if epoch % display_step == 0:\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n",
+    "                \"{:.9f}\".format(avg_cost)\n",
+    "    print \"First Optimization Finished!\"\n",
+    "\n",
+    "    # Test model\n",
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
+    "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n",
+    "\n",
+    "    # Save model weights to disk\n",
+    "    save_path = saver.save(sess, model_path)\n",
+    "    print \"Model saved in file: %s\" % save_path"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Starting 2nd session...\n",
+      "Model restored from file: /tmp/model.ckpt\n",
+      "Epoch: 0001 cost= 19.658836002\n",
+      "Epoch: 0002 cost= 14.354811554\n",
+      "Epoch: 0003 cost= 10.580801367\n",
+      "Epoch: 0004 cost= 8.012172253\n",
+      "Epoch: 0005 cost= 5.985675981\n",
+      "Epoch: 0006 cost= 4.572637980\n",
+      "Epoch: 0007 cost= 3.329074899\n",
+      "Second Optimization Finished!\n",
+      "Accuracy: 0.9371\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Running a new session\n",
+    "print \"Starting 2nd session...\"\n",
+    "with tf.Session() as sess:\n",
+    "    # Initialize variables\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Restore model weights from previously saved model\n",
+    "    load_path = saver.restore(sess, model_path)\n",
+    "    print \"Model restored from file: %s\" % save_path\n",
+    "\n",
+    "    # Resume training\n",
+    "    for epoch in range(7):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples / batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n",
+    "                                                          y: batch_y})\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if epoch % display_step == 0:\n",
+    "            print \"Epoch:\", '%04d' % (epoch + 1), \"cost=\", \\\n",
+    "                \"{:.9f}\".format(avg_cost)\n",
+    "    print \"Second Optimization Finished!\"\n",
+    "\n",
+    "    # Test model\n",
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
+    "    print \"Accuracy:\", accuracy.eval(\n",
+    "        {x: mnist.test.images, y: mnist.test.labels})"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    ""
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 212 - 0
notebooks/4_Utils/tensorboard_basic.ipynb

@@ -0,0 +1,212 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "'''\n",
+    "Graph and Loss visualization using Tensorboard.\n",
+    "This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/)\n",
+    "\n",
+    "Author: Aymeric Damien\n",
+    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
+    "'''"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 25\n",
+    "batch_size = 100\n",
+    "display_step = 1\n",
+    "logs_path = '/tmp/tensorflow_logs'\n",
+    "\n",
+    "# tf Graph Input\n",
+    "# mnist data image of shape 28*28=784\n",
+    "x = tf.placeholder(tf.float32, [None, 784], name='InputData')\n",
+    "# 0-9 digits recognition => 10 classes\n",
+    "y = tf.placeholder(tf.float32, [None, 10], name='LabelData')\n",
+    "\n",
+    "# Set model weights\n",
+    "W = tf.Variable(tf.zeros([784, 10]), name='Weights')\n",
+    "b = tf.Variable(tf.zeros([10]), name='Bias')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Construct model and encapsulating all ops into scopes, making\n",
+    "# Tensorboard's Graph visualization more convenient\n",
+    "with tf.name_scope('Model'):\n",
+    "    # Model\n",
+    "    pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n",
+    "with tf.name_scope('Loss'):\n",
+    "    # Minimize error using cross entropy\n",
+    "    cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n",
+    "with tf.name_scope('SGD'):\n",
+    "    # Gradient Descent\n",
+    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
+    "with tf.name_scope('Accuracy'):\n",
+    "    # Accuracy\n",
+    "    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    acc = tf.reduce_mean(tf.cast(acc, tf.float32))\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.initialize_all_variables()\n",
+    "\n",
+    "# Create a summary to monitor cost tensor\n",
+    "tf.scalar_summary(\"loss\", cost)\n",
+    "# Create a summary to monitor accuracy tensor\n",
+    "tf.scalar_summary(\"accuracy\", acc)\n",
+    "# Merge all summaries into a single op\n",
+    "merged_summary_op = tf.merge_all_summaries()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0001 cost= 1.182138957\n",
+      "Epoch: 0002 cost= 0.664735104\n",
+      "Epoch: 0003 cost= 0.552622685\n",
+      "Epoch: 0004 cost= 0.498596912\n",
+      "Epoch: 0005 cost= 0.465510372\n",
+      "Epoch: 0006 cost= 0.442504281\n",
+      "Epoch: 0007 cost= 0.425473650\n",
+      "Epoch: 0008 cost= 0.412175615\n",
+      "Epoch: 0009 cost= 0.401374554\n",
+      "Epoch: 0010 cost= 0.392403109\n",
+      "Epoch: 0011 cost= 0.384748503\n",
+      "Epoch: 0012 cost= 0.378154479\n",
+      "Epoch: 0013 cost= 0.372405099\n",
+      "Epoch: 0014 cost= 0.367272844\n",
+      "Epoch: 0015 cost= 0.362745077\n",
+      "Epoch: 0016 cost= 0.358575674\n",
+      "Epoch: 0017 cost= 0.354862829\n",
+      "Epoch: 0018 cost= 0.351437834\n",
+      "Epoch: 0019 cost= 0.348300697\n",
+      "Epoch: 0020 cost= 0.345401101\n",
+      "Epoch: 0021 cost= 0.342762216\n",
+      "Epoch: 0022 cost= 0.340199728\n",
+      "Epoch: 0023 cost= 0.337916089\n",
+      "Epoch: 0024 cost= 0.335764083\n",
+      "Epoch: 0025 cost= 0.333645939\n",
+      "Optimization Finished!\n",
+      "Accuracy: 0.9143\n",
+      "Run the command line:\n",
+      "--> tensorboard --logdir=/tmp/tensorflow_logs \n",
+      "Then open http://0.0.0.0:6006/ into your web browser\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the graph\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # op to write logs to Tensorboard\n",
+    "    summary_writer = tf.train.SummaryWriter(logs_path)\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(training_epochs):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop), cost op (to get loss value)\n",
+    "            # and summary nodes\n",
+    "            _, c, summary = sess.run([optimizer, cost, merged_summary_op],\n",
+    "                                     feed_dict={x: batch_xs, y: batch_ys})\n",
+    "            # Write logs at every iteration\n",
+    "            summary_writer.add_summary(summary, epoch * total_batch + i)\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if (epoch+1) % display_step == 0:\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n",
+    "\n",
+    "    print \"Optimization Finished!\"\n",
+    "\n",
+    "    # Test model\n",
+    "    # Calculate accuracy\n",
+    "    print \"Accuracy:\", acc.eval({x: mnist.test.images, y: mnist.test.labels})\n",
+    "\n",
+    "    print \"Run the command line:\\n\" \\\n",
+    "          \"--> tensorboard --logdir=/tmp/tensorflow_logs \" \\\n",
+    "          \"\\nThen open http://0.0.0.0:6006/ into your web browser\""
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Fichier diff supprimé car celui-ci est trop grand
+ 0 - 226
notebooks/5 - User Interface/graph_visualization.ipynb


Fichier diff supprimé car celui-ci est trop grand
+ 0 - 196
notebooks/5 - User Interface/loss_visualization.ipynb


+ 2 - 2
notebooks/4 - Multi GPU/multigpu_basics.ipynb

@@ -163,7 +163,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 2.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -175,4 +175,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}