Browse Source

Merge pull request #85 from normanheckscher/master

begin refactor for TF1.0
Aymeric Damien 8 years ago
parent
commit
9f9371270d

+ 1 - 1
README.md

@@ -98,7 +98,7 @@ The following examples are coming from [TFLearn](https://github.com/tflearn/tfle
 
 
 ## Dependencies
 ## Dependencies
 ```
 ```
-tensorflow
+tensorflow 1.0alpha
 numpy
 numpy
 matplotlib
 matplotlib
 cuda
 cuda

+ 1 - 1
examples/2_BasicModels/linear_regression.py

@@ -41,7 +41,7 @@ cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 1 - 1
examples/2_BasicModels/logistic_regression.py

@@ -38,7 +38,7 @@ cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 2 - 2
examples/2_BasicModels/nearest_neighbor.py

@@ -26,14 +26,14 @@ xte = tf.placeholder("float", [784])
 
 
 # Nearest Neighbor calculation using L1 Distance
 # Nearest Neighbor calculation using L1 Distance
 # Calculate L1 Distance
 # Calculate L1 Distance
-distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
+distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)
 # Prediction: Get min distance index (Nearest neighbor)
 # Prediction: Get min distance index (Nearest neighbor)
 pred = tf.arg_min(distance, 0)
 pred = tf.arg_min(distance, 0)
 
 
 accuracy = 0.
 accuracy = 0.
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 2 - 2
examples/3_NeuralNetworks/autoencoder.py

@@ -17,7 +17,7 @@ import matplotlib.pyplot as plt
 
 
 # Import MNIST data
 # Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
 
 
 # Parameters
 # Parameters
 learning_rate = 0.01
 learning_rate = 0.01
@@ -83,7 +83,7 @@ cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
 optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
 optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 8 - 8
examples/3_NeuralNetworks/bidirectional_rnn.py

@@ -10,7 +10,7 @@ Project: https://github.com/aymericdamien/TensorFlow-Examples/
 from __future__ import print_function
 from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
-from tensorflow.python.ops import rnn, rnn_cell
+from tensorflow.contrib import rnn
 import numpy as np
 import numpy as np
 
 
 # Import MNIST data
 # Import MNIST data
@@ -60,20 +60,20 @@ def BiRNN(x, weights, biases):
     # Reshape to (n_steps*batch_size, n_input)
     # Reshape to (n_steps*batch_size, n_input)
     x = tf.reshape(x, [-1, n_input])
     x = tf.reshape(x, [-1, n_input])
     # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
     # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
-    x = tf.split(0, n_steps, x)
+    x = tf.split(x, n_steps, 0)
 
 
     # Define lstm cells with tensorflow
     # Define lstm cells with tensorflow
     # Forward direction cell
     # Forward direction cell
-    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
+    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
     # Backward direction cell
     # Backward direction cell
-    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
+    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
 
 
     # Get lstm cell output
     # Get lstm cell output
     try:
     try:
-        outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
+        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                               dtype=tf.float32)
                                               dtype=tf.float32)
     except Exception: # Old TensorFlow version only returns outputs not states
     except Exception: # Old TensorFlow version only returns outputs not states
-        outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
+        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                         dtype=tf.float32)
                                         dtype=tf.float32)
 
 
     # Linear activation, using rnn inner loop last output
     # Linear activation, using rnn inner loop last output
@@ -82,7 +82,7 @@ def BiRNN(x, weights, biases):
 pred = BiRNN(x, weights, biases)
 pred = BiRNN(x, weights, biases)
 
 
 # Define loss and optimizer
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 
 # Evaluate model
 # Evaluate model
@@ -90,7 +90,7 @@ correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 2 - 2
examples/3_NeuralNetworks/convolutional_network.py

@@ -96,7 +96,7 @@ biases = {
 pred = conv_net(x, weights, biases, keep_prob)
 pred = conv_net(x, weights, biases, keep_prob)
 
 
 # Define loss and optimizer
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 
 # Evaluate model
 # Evaluate model
@@ -104,7 +104,7 @@ correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 2 - 2
examples/3_NeuralNetworks/multilayer_perceptron.py

@@ -60,11 +60,11 @@ biases = {
 pred = multilayer_perceptron(x, weights, biases)
 pred = multilayer_perceptron(x, weights, biases)
 
 
 # Define loss and optimizer
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 6 - 6
examples/3_NeuralNetworks/recurrent_network.py

@@ -10,7 +10,7 @@ Project: https://github.com/aymericdamien/TensorFlow-Examples/
 from __future__ import print_function
 from __future__ import print_function
 
 
 import tensorflow as tf
 import tensorflow as tf
-from tensorflow.python.ops import rnn, rnn_cell
+from tensorflow.contrib import rnn
 
 
 # Import MNIST data
 # Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
@@ -58,13 +58,13 @@ def RNN(x, weights, biases):
     # Reshaping to (n_steps*batch_size, n_input)
     # Reshaping to (n_steps*batch_size, n_input)
     x = tf.reshape(x, [-1, n_input])
     x = tf.reshape(x, [-1, n_input])
     # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
     # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
-    x = tf.split(0, n_steps, x)
+    x = tf.split(x, n_steps, 0)
 
 
     # Define a lstm cell with tensorflow
     # Define a lstm cell with tensorflow
-    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
+    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
 
 
     # Get lstm cell output
     # Get lstm cell output
-    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
+    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
 
 
     # Linear activation, using rnn inner loop last output
     # Linear activation, using rnn inner loop last output
     return tf.matmul(outputs[-1], weights['out']) + biases['out']
     return tf.matmul(outputs[-1], weights['out']) + biases['out']
@@ -72,7 +72,7 @@ def RNN(x, weights, biases):
 pred = RNN(x, weights, biases)
 pred = RNN(x, weights, biases)
 
 
 # Define loss and optimizer
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 
 # Evaluate model
 # Evaluate model
@@ -80,7 +80,7 @@ correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:

+ 3 - 3
examples/4_Utils/save_restore_model.py

@@ -11,7 +11,7 @@ from __future__ import print_function
 
 
 # Import MNIST data
 # Import MNIST data
 from tensorflow.examples.tutorials.mnist import input_data
 from tensorflow.examples.tutorials.mnist import input_data
-mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
 
 
 import tensorflow as tf
 import tensorflow as tf
 
 
@@ -60,11 +60,11 @@ biases = {
 pred = multilayer_perceptron(x, weights, biases)
 pred = multilayer_perceptron(x, weights, biases)
 
 
 # Define loss and optimizer
 # Define loss and optimizer
-cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 
 
 # Initializing the variables
 # Initializing the variables
-init = tf.initialize_all_variables()
+init = tf.global_variables_initializer()
 
 
 # 'Saver' op to save and restore all the variables
 # 'Saver' op to save and restore all the variables
 saver = tf.train.Saver()
 saver = tf.train.Saver()

+ 8 - 8
examples/4_Utils/tensorboard_advanced.py

@@ -41,12 +41,12 @@ def multilayer_perceptron(x, weights, biases):
     layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
     layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
     layer_1 = tf.nn.relu(layer_1)
     layer_1 = tf.nn.relu(layer_1)
     # Create a summary to visualize the first layer ReLU activation
     # Create a summary to visualize the first layer ReLU activation
-    tf.histogram_summary("relu1", layer_1)
+    tf.summary.histogram("relu1", layer_1)
     # Hidden layer with RELU activation
     # Hidden layer with RELU activation
     layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
     layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
     layer_2 = tf.nn.relu(layer_2)
     layer_2 = tf.nn.relu(layer_2)
     # Create another summary to visualize the second layer ReLU activation
     # Create another summary to visualize the second layer ReLU activation
-    tf.histogram_summary("relu2", layer_2)
+    tf.summary.histogram("relu2", layer_2)
     # Output layer
     # Output layer
     out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
     out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
     return out_layer
     return out_layer
@@ -91,24 +91,24 @@ with tf.name_scope('Accuracy'):
 init = tf.initialize_all_variables()
 init = tf.initialize_all_variables()
 
 
 # Create a summary to monitor cost tensor
 # Create a summary to monitor cost tensor
-tf.scalar_summary("loss", loss)
+tf.summary.scalar("loss", loss)
 # Create a summary to monitor accuracy tensor
 # Create a summary to monitor accuracy tensor
-tf.scalar_summary("accuracy", acc)
+tf.summary.scalar("accuracy", acc)
 # Create summaries to visualize weights
 # Create summaries to visualize weights
 for var in tf.trainable_variables():
 for var in tf.trainable_variables():
-    tf.histogram_summary(var.name, var)
+    tf.summary.histogram(var.name, var)
 # Summarize all gradients
 # Summarize all gradients
 for grad, var in grads:
 for grad, var in grads:
-    tf.histogram_summary(var.name + '/gradient', grad)
+    tf.summary.histogram(var.name + '/gradient', grad)
 # Merge all summaries into a single op
 # Merge all summaries into a single op
-merged_summary_op = tf.merge_all_summaries()
+merged_summary_op = tf.summary.merge_all()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:
     sess.run(init)
     sess.run(init)
 
 
     # op to write logs to Tensorboard
     # op to write logs to Tensorboard
-    summary_writer = tf.train.SummaryWriter(logs_path,
+    summary_writer = tf.summary.FileWriter(logs_path,
                                             graph=tf.get_default_graph())
                                             graph=tf.get_default_graph())
 
 
     # Training cycle
     # Training cycle

+ 4 - 4
examples/4_Utils/tensorboard_basic.py

@@ -52,18 +52,18 @@ with tf.name_scope('Accuracy'):
 init = tf.initialize_all_variables()
 init = tf.initialize_all_variables()
 
 
 # Create a summary to monitor cost tensor
 # Create a summary to monitor cost tensor
-tf.scalar_summary("loss", cost)
+tf.summary.scalar("loss", cost)
 # Create a summary to monitor accuracy tensor
 # Create a summary to monitor accuracy tensor
-tf.scalar_summary("accuracy", acc)
+tf.summary.scalar("accuracy", acc)
 # Merge all summaries into a single op
 # Merge all summaries into a single op
-merged_summary_op = tf.merge_all_summaries()
+merged_summary_op = tf.summary.merge_all()
 
 
 # Launch the graph
 # Launch the graph
 with tf.Session() as sess:
 with tf.Session() as sess:
     sess.run(init)
     sess.run(init)
 
 
     # op to write logs to Tensorboard
     # op to write logs to Tensorboard
-    summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
+    summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
 
 
     # Training cycle
     # Training cycle
     for epoch in range(training_epochs):
     for epoch in range(training_epochs):

File diff suppressed because it is too large
+ 41 - 31
notebooks/2_BasicModels/linear_regression.ipynb


+ 38 - 39
notebooks/2_BasicModels/logistic_regression.ipynb

@@ -18,7 +18,7 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 1,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -27,10 +27,10 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -39,14 +39,14 @@
     "\n",
     "\n",
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 3,
    "execution_count": 3,
    "metadata": {
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
@@ -73,12 +73,12 @@
     "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
     "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -87,33 +87,23 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Epoch: 0001 cost= 1.182138961\n",
-      "Epoch: 0002 cost= 0.664670898\n",
-      "Epoch: 0003 cost= 0.552613988\n",
-      "Epoch: 0004 cost= 0.498497931\n",
-      "Epoch: 0005 cost= 0.465418769\n",
-      "Epoch: 0006 cost= 0.442546219\n",
-      "Epoch: 0007 cost= 0.425473814\n",
-      "Epoch: 0008 cost= 0.412171735\n",
-      "Epoch: 0009 cost= 0.401359516\n",
-      "Epoch: 0010 cost= 0.392401536\n",
-      "Epoch: 0011 cost= 0.384750201\n",
-      "Epoch: 0012 cost= 0.378185581\n",
-      "Epoch: 0013 cost= 0.372401533\n",
-      "Epoch: 0014 cost= 0.367302442\n",
-      "Epoch: 0015 cost= 0.362702316\n",
-      "Epoch: 0016 cost= 0.358568827\n",
-      "Epoch: 0017 cost= 0.354882155\n",
-      "Epoch: 0018 cost= 0.351430912\n",
-      "Epoch: 0019 cost= 0.348316068\n",
-      "Epoch: 0020 cost= 0.345392556\n",
-      "Epoch: 0021 cost= 0.342737278\n",
-      "Epoch: 0022 cost= 0.340264994\n",
-      "Epoch: 0023 cost= 0.337890242\n",
-      "Epoch: 0024 cost= 0.335708558\n",
-      "Epoch: 0025 cost= 0.333686476\n",
-      "Optimization Finished!\n",
-      "Accuracy: 0.889667\n"
+      "Epoch: 0001 cost= 1.182138959\n",
+      "Epoch: 0002 cost= 0.664778162\n",
+      "Epoch: 0003 cost= 0.552686284\n",
+      "Epoch: 0004 cost= 0.498628905\n",
+      "Epoch: 0005 cost= 0.465469866\n",
+      "Epoch: 0006 cost= 0.442537872\n",
+      "Epoch: 0007 cost= 0.425462044\n",
+      "Epoch: 0008 cost= 0.412185303\n",
+      "Epoch: 0009 cost= 0.401311587\n",
+      "Epoch: 0010 cost= 0.392326203\n",
+      "Epoch: 0011 cost= 0.384736038\n",
+      "Epoch: 0012 cost= 0.378137191\n",
+      "Epoch: 0013 cost= 0.372363752\n",
+      "Epoch: 0014 cost= 0.367308579\n",
+      "Epoch: 0015 cost= 0.362704660\n",
+      "Epoch: 0016 cost= 0.358588599\n",
+      "Epoch: 0017 cost= 0.354823110\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -146,6 +136,15 @@
     "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
     "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
     "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})"
     "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -157,16 +156,16 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,
  "nbformat_minor": 0
  "nbformat_minor": 0
-}
+}

+ 23 - 14
notebooks/2_BasicModels/nearest_neighbor.ipynb

@@ -18,7 +18,7 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 1,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -27,10 +27,10 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -40,14 +40,14 @@
     "\n",
     "\n",
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 2,
    "metadata": {
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
@@ -61,19 +61,19 @@
     "\n",
     "\n",
     "# Nearest Neighbor calculation using L1 Distance\n",
     "# Nearest Neighbor calculation using L1 Distance\n",
     "# Calculate L1 Distance\n",
     "# Calculate L1 Distance\n",
-    "distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)\n",
+    "distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)\n",
     "# Prediction: Get min distance index (Nearest neighbor)\n",
     "# Prediction: Get min distance index (Nearest neighbor)\n",
     "pred = tf.arg_min(distance, 0)\n",
     "pred = tf.arg_min(distance, 0)\n",
     "\n",
     "\n",
     "accuracy = 0.\n",
     "accuracy = 0.\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 3,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -305,6 +305,15 @@
     "    print \"Done!\"\n",
     "    print \"Done!\"\n",
     "    print \"Accuracy:\", accuracy"
     "    print \"Accuracy:\", accuracy"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -316,16 +325,16 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,
  "nbformat_minor": 0
  "nbformat_minor": 0
-}
+}

File diff suppressed because it is too large
+ 35 - 56
notebooks/3_NeuralNetworks/autoencoder.ipynb


+ 121 - 115
notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb

@@ -1,10 +1,10 @@
 {
 {
  "cells": [
  "cells": [
   {
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
    "source": [
    "source": [
     "'''\n",
     "'''\n",
     "A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
     "A Bidirectional Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
@@ -18,35 +18,26 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
    "source": [
    "source": [
     "import tensorflow as tf\n",
     "import tensorflow as tf\n",
-    "from tensorflow.models.rnn import rnn, rnn_cell\n",
+    "from tensorflow.contrib import rnn\n",
     "import numpy as np\n",
     "import numpy as np\n",
     "\n",
     "\n",
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
    "source": [
    "source": [
     "'''\n",
     "'''\n",
     "To classify images using a bidirectional reccurent neural network, we consider\n",
     "To classify images using a bidirectional reccurent neural network, we consider\n",
@@ -58,7 +49,9 @@
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 2,
    "execution_count": 2,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
     "# Parameters\n",
     "# Parameters\n",
@@ -90,7 +83,9 @@
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 3,
    "execution_count": 3,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
     "def BiRNN(x, weights, biases):\n",
     "def BiRNN(x, weights, biases):\n",
@@ -104,20 +99,20 @@
     "    # Reshape to (n_steps*batch_size, n_input)\n",
     "    # Reshape to (n_steps*batch_size, n_input)\n",
     "    x = tf.reshape(x, [-1, n_input])\n",
     "    x = tf.reshape(x, [-1, n_input])\n",
     "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
     "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
-    "    x = tf.split(0, n_steps, x)\n",
+    "    x = tf.split(x, n_steps, 0)\n",
     "\n",
     "\n",
     "    # Define lstm cells with tensorflow\n",
     "    # Define lstm cells with tensorflow\n",
     "    # Forward direction cell\n",
     "    # Forward direction cell\n",
-    "    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
     "    # Backward direction cell\n",
     "    # Backward direction cell\n",
-    "    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
     "\n",
     "\n",
     "    # Get lstm cell output\n",
     "    # Get lstm cell output\n",
     "    try:\n",
     "    try:\n",
-    "        outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
+    "        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
     "                                              dtype=tf.float32)\n",
     "                                              dtype=tf.float32)\n",
     "    except Exception: # Old TensorFlow version only returns outputs not states\n",
     "    except Exception: # Old TensorFlow version only returns outputs not states\n",
-    "        outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
+    "        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
     "                                        dtype=tf.float32)\n",
     "                                        dtype=tf.float32)\n",
     "\n",
     "\n",
     "    # Linear activation, using rnn inner loop last output\n",
     "    # Linear activation, using rnn inner loop last output\n",
@@ -126,7 +121,7 @@
     "pred = BiRNN(x, weights, biases)\n",
     "pred = BiRNN(x, weights, biases)\n",
     "\n",
     "\n",
     "# Define loss and optimizer\n",
     "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Evaluate model\n",
     "# Evaluate model\n",
@@ -134,98 +129,100 @@
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 4,
    "execution_count": 4,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [
    "outputs": [
     {
     {
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Iter 1280, Minibatch Loss= 1.689740, Training Accuracy= 0.36719\n",
-      "Iter 2560, Minibatch Loss= 1.477009, Training Accuracy= 0.44531\n",
-      "Iter 3840, Minibatch Loss= 1.245874, Training Accuracy= 0.53125\n",
-      "Iter 5120, Minibatch Loss= 0.990923, Training Accuracy= 0.64062\n",
-      "Iter 6400, Minibatch Loss= 0.752950, Training Accuracy= 0.71875\n",
-      "Iter 7680, Minibatch Loss= 1.023025, Training Accuracy= 0.61719\n",
-      "Iter 8960, Minibatch Loss= 0.921414, Training Accuracy= 0.68750\n",
-      "Iter 10240, Minibatch Loss= 0.719829, Training Accuracy= 0.75000\n",
-      "Iter 11520, Minibatch Loss= 0.468657, Training Accuracy= 0.86719\n",
-      "Iter 12800, Minibatch Loss= 0.654315, Training Accuracy= 0.78125\n",
-      "Iter 14080, Minibatch Loss= 0.595391, Training Accuracy= 0.83594\n",
-      "Iter 15360, Minibatch Loss= 0.392862, Training Accuracy= 0.83594\n",
-      "Iter 16640, Minibatch Loss= 0.421122, Training Accuracy= 0.92188\n",
-      "Iter 17920, Minibatch Loss= 0.311471, Training Accuracy= 0.88281\n",
-      "Iter 19200, Minibatch Loss= 0.276949, Training Accuracy= 0.92188\n",
-      "Iter 20480, Minibatch Loss= 0.170499, Training Accuracy= 0.94531\n",
-      "Iter 21760, Minibatch Loss= 0.419481, Training Accuracy= 0.86719\n",
-      "Iter 23040, Minibatch Loss= 0.183765, Training Accuracy= 0.92188\n",
-      "Iter 24320, Minibatch Loss= 0.386232, Training Accuracy= 0.86719\n",
-      "Iter 25600, Minibatch Loss= 0.335571, Training Accuracy= 0.88281\n",
-      "Iter 26880, Minibatch Loss= 0.169092, Training Accuracy= 0.92969\n",
-      "Iter 28160, Minibatch Loss= 0.247623, Training Accuracy= 0.92969\n",
-      "Iter 29440, Minibatch Loss= 0.242989, Training Accuracy= 0.94531\n",
-      "Iter 30720, Minibatch Loss= 0.253811, Training Accuracy= 0.92188\n",
-      "Iter 32000, Minibatch Loss= 0.169660, Training Accuracy= 0.93750\n",
-      "Iter 33280, Minibatch Loss= 0.291349, Training Accuracy= 0.90625\n",
-      "Iter 34560, Minibatch Loss= 0.172026, Training Accuracy= 0.95312\n",
-      "Iter 35840, Minibatch Loss= 0.186019, Training Accuracy= 0.93750\n",
-      "Iter 37120, Minibatch Loss= 0.298480, Training Accuracy= 0.89062\n",
-      "Iter 38400, Minibatch Loss= 0.158750, Training Accuracy= 0.92188\n",
-      "Iter 39680, Minibatch Loss= 0.162706, Training Accuracy= 0.94531\n",
-      "Iter 40960, Minibatch Loss= 0.339814, Training Accuracy= 0.86719\n",
-      "Iter 42240, Minibatch Loss= 0.068817, Training Accuracy= 0.99219\n",
-      "Iter 43520, Minibatch Loss= 0.188742, Training Accuracy= 0.93750\n",
-      "Iter 44800, Minibatch Loss= 0.176708, Training Accuracy= 0.92969\n",
-      "Iter 46080, Minibatch Loss= 0.096726, Training Accuracy= 0.96875\n",
-      "Iter 47360, Minibatch Loss= 0.220973, Training Accuracy= 0.92969\n",
-      "Iter 48640, Minibatch Loss= 0.226749, Training Accuracy= 0.94531\n",
-      "Iter 49920, Minibatch Loss= 0.188906, Training Accuracy= 0.94531\n",
-      "Iter 51200, Minibatch Loss= 0.145194, Training Accuracy= 0.95312\n",
-      "Iter 52480, Minibatch Loss= 0.168948, Training Accuracy= 0.95312\n",
-      "Iter 53760, Minibatch Loss= 0.069116, Training Accuracy= 0.97656\n",
-      "Iter 55040, Minibatch Loss= 0.228721, Training Accuracy= 0.93750\n",
-      "Iter 56320, Minibatch Loss= 0.152915, Training Accuracy= 0.95312\n",
-      "Iter 57600, Minibatch Loss= 0.126974, Training Accuracy= 0.96875\n",
-      "Iter 58880, Minibatch Loss= 0.078870, Training Accuracy= 0.97656\n",
-      "Iter 60160, Minibatch Loss= 0.225498, Training Accuracy= 0.95312\n",
-      "Iter 61440, Minibatch Loss= 0.111760, Training Accuracy= 0.97656\n",
-      "Iter 62720, Minibatch Loss= 0.161434, Training Accuracy= 0.97656\n",
-      "Iter 64000, Minibatch Loss= 0.207190, Training Accuracy= 0.94531\n",
-      "Iter 65280, Minibatch Loss= 0.103831, Training Accuracy= 0.96094\n",
-      "Iter 66560, Minibatch Loss= 0.153846, Training Accuracy= 0.93750\n",
-      "Iter 67840, Minibatch Loss= 0.082717, Training Accuracy= 0.96875\n",
-      "Iter 69120, Minibatch Loss= 0.199301, Training Accuracy= 0.95312\n",
-      "Iter 70400, Minibatch Loss= 0.139725, Training Accuracy= 0.96875\n",
-      "Iter 71680, Minibatch Loss= 0.169596, Training Accuracy= 0.95312\n",
-      "Iter 72960, Minibatch Loss= 0.142444, Training Accuracy= 0.96094\n",
-      "Iter 74240, Minibatch Loss= 0.145822, Training Accuracy= 0.95312\n",
-      "Iter 75520, Minibatch Loss= 0.129086, Training Accuracy= 0.94531\n",
-      "Iter 76800, Minibatch Loss= 0.078082, Training Accuracy= 0.97656\n",
-      "Iter 78080, Minibatch Loss= 0.151803, Training Accuracy= 0.94531\n",
-      "Iter 79360, Minibatch Loss= 0.050142, Training Accuracy= 0.98438\n",
-      "Iter 80640, Minibatch Loss= 0.136788, Training Accuracy= 0.95312\n",
-      "Iter 81920, Minibatch Loss= 0.130100, Training Accuracy= 0.94531\n",
-      "Iter 83200, Minibatch Loss= 0.058298, Training Accuracy= 0.98438\n",
-      "Iter 84480, Minibatch Loss= 0.120124, Training Accuracy= 0.96094\n",
-      "Iter 85760, Minibatch Loss= 0.064916, Training Accuracy= 0.97656\n",
-      "Iter 87040, Minibatch Loss= 0.137179, Training Accuracy= 0.93750\n",
-      "Iter 88320, Minibatch Loss= 0.138268, Training Accuracy= 0.95312\n",
-      "Iter 89600, Minibatch Loss= 0.072827, Training Accuracy= 0.97656\n",
-      "Iter 90880, Minibatch Loss= 0.123839, Training Accuracy= 0.96875\n",
-      "Iter 92160, Minibatch Loss= 0.087194, Training Accuracy= 0.96875\n",
-      "Iter 93440, Minibatch Loss= 0.083489, Training Accuracy= 0.97656\n",
-      "Iter 94720, Minibatch Loss= 0.131827, Training Accuracy= 0.95312\n",
-      "Iter 96000, Minibatch Loss= 0.098764, Training Accuracy= 0.96875\n",
-      "Iter 97280, Minibatch Loss= 0.115553, Training Accuracy= 0.94531\n",
-      "Iter 98560, Minibatch Loss= 0.079704, Training Accuracy= 0.96875\n",
-      "Iter 99840, Minibatch Loss= 0.064562, Training Accuracy= 0.98438\n",
+      "Iter 1280, Minibatch Loss= 1.557283, Training Accuracy= 0.49219\n",
+      "Iter 2560, Minibatch Loss= 1.358445, Training Accuracy= 0.56250\n",
+      "Iter 3840, Minibatch Loss= 1.043732, Training Accuracy= 0.64062\n",
+      "Iter 5120, Minibatch Loss= 0.796770, Training Accuracy= 0.72656\n",
+      "Iter 6400, Minibatch Loss= 0.626206, Training Accuracy= 0.72656\n",
+      "Iter 7680, Minibatch Loss= 1.025919, Training Accuracy= 0.65625\n",
+      "Iter 8960, Minibatch Loss= 0.744850, Training Accuracy= 0.76562\n",
+      "Iter 10240, Minibatch Loss= 0.530111, Training Accuracy= 0.84375\n",
+      "Iter 11520, Minibatch Loss= 0.383806, Training Accuracy= 0.86719\n",
+      "Iter 12800, Minibatch Loss= 0.607816, Training Accuracy= 0.82812\n",
+      "Iter 14080, Minibatch Loss= 0.410879, Training Accuracy= 0.89062\n",
+      "Iter 15360, Minibatch Loss= 0.335351, Training Accuracy= 0.89844\n",
+      "Iter 16640, Minibatch Loss= 0.428004, Training Accuracy= 0.91406\n",
+      "Iter 17920, Minibatch Loss= 0.307468, Training Accuracy= 0.91406\n",
+      "Iter 19200, Minibatch Loss= 0.249527, Training Accuracy= 0.92188\n",
+      "Iter 20480, Minibatch Loss= 0.148163, Training Accuracy= 0.96094\n",
+      "Iter 21760, Minibatch Loss= 0.445275, Training Accuracy= 0.83594\n",
+      "Iter 23040, Minibatch Loss= 0.173083, Training Accuracy= 0.93750\n",
+      "Iter 24320, Minibatch Loss= 0.373696, Training Accuracy= 0.87500\n",
+      "Iter 25600, Minibatch Loss= 0.509869, Training Accuracy= 0.85938\n",
+      "Iter 26880, Minibatch Loss= 0.198096, Training Accuracy= 0.92969\n",
+      "Iter 28160, Minibatch Loss= 0.228221, Training Accuracy= 0.92188\n",
+      "Iter 29440, Minibatch Loss= 0.280088, Training Accuracy= 0.89844\n",
+      "Iter 30720, Minibatch Loss= 0.300495, Training Accuracy= 0.91406\n",
+      "Iter 32000, Minibatch Loss= 0.171746, Training Accuracy= 0.95312\n",
+      "Iter 33280, Minibatch Loss= 0.263745, Training Accuracy= 0.89844\n",
+      "Iter 34560, Minibatch Loss= 0.177300, Training Accuracy= 0.93750\n",
+      "Iter 35840, Minibatch Loss= 0.160621, Training Accuracy= 0.95312\n",
+      "Iter 37120, Minibatch Loss= 0.321745, Training Accuracy= 0.91406\n",
+      "Iter 38400, Minibatch Loss= 0.188322, Training Accuracy= 0.93750\n",
+      "Iter 39680, Minibatch Loss= 0.104025, Training Accuracy= 0.96875\n",
+      "Iter 40960, Minibatch Loss= 0.291053, Training Accuracy= 0.89062\n",
+      "Iter 42240, Minibatch Loss= 0.131189, Training Accuracy= 0.95312\n",
+      "Iter 43520, Minibatch Loss= 0.154949, Training Accuracy= 0.92969\n",
+      "Iter 44800, Minibatch Loss= 0.150411, Training Accuracy= 0.93750\n",
+      "Iter 46080, Minibatch Loss= 0.117008, Training Accuracy= 0.96094\n",
+      "Iter 47360, Minibatch Loss= 0.181344, Training Accuracy= 0.96094\n",
+      "Iter 48640, Minibatch Loss= 0.209197, Training Accuracy= 0.94531\n",
+      "Iter 49920, Minibatch Loss= 0.159350, Training Accuracy= 0.96094\n",
+      "Iter 51200, Minibatch Loss= 0.124001, Training Accuracy= 0.95312\n",
+      "Iter 52480, Minibatch Loss= 0.165183, Training Accuracy= 0.94531\n",
+      "Iter 53760, Minibatch Loss= 0.046438, Training Accuracy= 0.97656\n",
+      "Iter 55040, Minibatch Loss= 0.199995, Training Accuracy= 0.91406\n",
+      "Iter 56320, Minibatch Loss= 0.057071, Training Accuracy= 0.97656\n",
+      "Iter 57600, Minibatch Loss= 0.177065, Training Accuracy= 0.92188\n",
+      "Iter 58880, Minibatch Loss= 0.091666, Training Accuracy= 0.96094\n",
+      "Iter 60160, Minibatch Loss= 0.069232, Training Accuracy= 0.96875\n",
+      "Iter 61440, Minibatch Loss= 0.127353, Training Accuracy= 0.94531\n",
+      "Iter 62720, Minibatch Loss= 0.095795, Training Accuracy= 0.96094\n",
+      "Iter 64000, Minibatch Loss= 0.202651, Training Accuracy= 0.96875\n",
+      "Iter 65280, Minibatch Loss= 0.118779, Training Accuracy= 0.95312\n",
+      "Iter 66560, Minibatch Loss= 0.043173, Training Accuracy= 0.98438\n",
+      "Iter 67840, Minibatch Loss= 0.152280, Training Accuracy= 0.95312\n",
+      "Iter 69120, Minibatch Loss= 0.085301, Training Accuracy= 0.96875\n",
+      "Iter 70400, Minibatch Loss= 0.093421, Training Accuracy= 0.96094\n",
+      "Iter 71680, Minibatch Loss= 0.096358, Training Accuracy= 0.96875\n",
+      "Iter 72960, Minibatch Loss= 0.053386, Training Accuracy= 0.98438\n",
+      "Iter 74240, Minibatch Loss= 0.065237, Training Accuracy= 0.97656\n",
+      "Iter 75520, Minibatch Loss= 0.228090, Training Accuracy= 0.92188\n",
+      "Iter 76800, Minibatch Loss= 0.106751, Training Accuracy= 0.95312\n",
+      "Iter 78080, Minibatch Loss= 0.187795, Training Accuracy= 0.94531\n",
+      "Iter 79360, Minibatch Loss= 0.092611, Training Accuracy= 0.96094\n",
+      "Iter 80640, Minibatch Loss= 0.137386, Training Accuracy= 0.96875\n",
+      "Iter 81920, Minibatch Loss= 0.106634, Training Accuracy= 0.98438\n",
+      "Iter 83200, Minibatch Loss= 0.111749, Training Accuracy= 0.94531\n",
+      "Iter 84480, Minibatch Loss= 0.191184, Training Accuracy= 0.94531\n",
+      "Iter 85760, Minibatch Loss= 0.063982, Training Accuracy= 0.96094\n",
+      "Iter 87040, Minibatch Loss= 0.092380, Training Accuracy= 0.96875\n",
+      "Iter 88320, Minibatch Loss= 0.089899, Training Accuracy= 0.97656\n",
+      "Iter 89600, Minibatch Loss= 0.141107, Training Accuracy= 0.94531\n",
+      "Iter 90880, Minibatch Loss= 0.075549, Training Accuracy= 0.96094\n",
+      "Iter 92160, Minibatch Loss= 0.186539, Training Accuracy= 0.94531\n",
+      "Iter 93440, Minibatch Loss= 0.079639, Training Accuracy= 0.97656\n",
+      "Iter 94720, Minibatch Loss= 0.156895, Training Accuracy= 0.95312\n",
+      "Iter 96000, Minibatch Loss= 0.088042, Training Accuracy= 0.97656\n",
+      "Iter 97280, Minibatch Loss= 0.076670, Training Accuracy= 0.96875\n",
+      "Iter 98560, Minibatch Loss= 0.051336, Training Accuracy= 0.97656\n",
+      "Iter 99840, Minibatch Loss= 0.086923, Training Accuracy= 0.98438\n",
       "Optimization Finished!\n",
       "Optimization Finished!\n",
-      "Testing Accuracy: 0.992188\n"
+      "Testing Accuracy: 0.960938\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -259,6 +256,15 @@
     "    print \"Testing Accuracy:\", \\\n",
     "    print \"Testing Accuracy:\", \\\n",
     "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
     "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -270,14 +276,14 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,

+ 173 - 175
notebooks/3_NeuralNetworks/convolutional_network.ipynb

@@ -20,28 +20,17 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
    "source": [
     "import tensorflow as tf\n",
     "import tensorflow as tf\n",
     "\n",
     "\n",
     "# Import MNIST data\n",
     "# Import MNIST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
@@ -150,7 +139,7 @@
     "pred = conv_net(x, weights, biases, keep_prob)\n",
     "pred = conv_net(x, weights, biases, keep_prob)\n",
     "\n",
     "\n",
     "# Define loss and optimizer\n",
     "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Evaluate model\n",
     "# Evaluate model\n",
@@ -158,7 +147,7 @@
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
@@ -172,164 +161,164 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Iter 1280, Minibatch Loss= 17231.589844, Training Accuracy= 0.25000\n",
-      "Iter 2560, Minibatch Loss= 10580.260742, Training Accuracy= 0.54688\n",
-      "Iter 3840, Minibatch Loss= 7395.362793, Training Accuracy= 0.64062\n",
-      "Iter 5120, Minibatch Loss= 4864.292480, Training Accuracy= 0.75781\n",
-      "Iter 6400, Minibatch Loss= 3830.062012, Training Accuracy= 0.80469\n",
-      "Iter 7680, Minibatch Loss= 6031.701172, Training Accuracy= 0.72656\n",
-      "Iter 8960, Minibatch Loss= 2549.708740, Training Accuracy= 0.81250\n",
-      "Iter 10240, Minibatch Loss= 2010.484985, Training Accuracy= 0.84375\n",
-      "Iter 11520, Minibatch Loss= 1607.380981, Training Accuracy= 0.89062\n",
-      "Iter 12800, Minibatch Loss= 1983.302856, Training Accuracy= 0.82812\n",
-      "Iter 14080, Minibatch Loss= 401.215088, Training Accuracy= 0.94531\n",
-      "Iter 15360, Minibatch Loss= 976.289307, Training Accuracy= 0.95312\n",
-      "Iter 16640, Minibatch Loss= 1844.699951, Training Accuracy= 0.89844\n",
-      "Iter 17920, Minibatch Loss= 1009.859863, Training Accuracy= 0.92969\n",
-      "Iter 19200, Minibatch Loss= 1397.939453, Training Accuracy= 0.88281\n",
-      "Iter 20480, Minibatch Loss= 540.369995, Training Accuracy= 0.96094\n",
-      "Iter 21760, Minibatch Loss= 2589.246826, Training Accuracy= 0.87500\n",
-      "Iter 23040, Minibatch Loss= 404.981293, Training Accuracy= 0.96094\n",
-      "Iter 24320, Minibatch Loss= 742.155396, Training Accuracy= 0.93750\n",
-      "Iter 25600, Minibatch Loss= 851.599731, Training Accuracy= 0.93750\n",
-      "Iter 26880, Minibatch Loss= 1527.609619, Training Accuracy= 0.90625\n",
-      "Iter 28160, Minibatch Loss= 1209.633301, Training Accuracy= 0.91406\n",
-      "Iter 29440, Minibatch Loss= 1123.146851, Training Accuracy= 0.93750\n",
-      "Iter 30720, Minibatch Loss= 950.860596, Training Accuracy= 0.92188\n",
-      "Iter 32000, Minibatch Loss= 1217.373779, Training Accuracy= 0.92188\n",
-      "Iter 33280, Minibatch Loss= 859.433105, Training Accuracy= 0.91406\n",
-      "Iter 34560, Minibatch Loss= 487.426331, Training Accuracy= 0.95312\n",
-      "Iter 35840, Minibatch Loss= 287.507721, Training Accuracy= 0.96875\n",
-      "Iter 37120, Minibatch Loss= 786.797485, Training Accuracy= 0.91406\n",
-      "Iter 38400, Minibatch Loss= 248.981216, Training Accuracy= 0.97656\n",
-      "Iter 39680, Minibatch Loss= 147.081467, Training Accuracy= 0.97656\n",
-      "Iter 40960, Minibatch Loss= 1198.459106, Training Accuracy= 0.93750\n",
-      "Iter 42240, Minibatch Loss= 717.058716, Training Accuracy= 0.92188\n",
-      "Iter 43520, Minibatch Loss= 351.870453, Training Accuracy= 0.96094\n",
-      "Iter 44800, Minibatch Loss= 271.505554, Training Accuracy= 0.96875\n",
-      "Iter 46080, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
-      "Iter 47360, Minibatch Loss= 806.163818, Training Accuracy= 0.95312\n",
-      "Iter 48640, Minibatch Loss= 1055.359009, Training Accuracy= 0.91406\n",
-      "Iter 49920, Minibatch Loss= 459.845520, Training Accuracy= 0.94531\n",
-      "Iter 51200, Minibatch Loss= 133.995087, Training Accuracy= 0.97656\n",
-      "Iter 52480, Minibatch Loss= 378.886780, Training Accuracy= 0.96094\n",
-      "Iter 53760, Minibatch Loss= 122.112671, Training Accuracy= 0.98438\n",
-      "Iter 55040, Minibatch Loss= 357.410950, Training Accuracy= 0.96875\n",
-      "Iter 56320, Minibatch Loss= 164.791595, Training Accuracy= 0.98438\n",
-      "Iter 57600, Minibatch Loss= 740.711060, Training Accuracy= 0.95312\n",
-      "Iter 58880, Minibatch Loss= 755.948364, Training Accuracy= 0.92969\n",
-      "Iter 60160, Minibatch Loss= 289.819153, Training Accuracy= 0.94531\n",
-      "Iter 61440, Minibatch Loss= 162.940323, Training Accuracy= 0.96875\n",
-      "Iter 62720, Minibatch Loss= 616.192200, Training Accuracy= 0.92969\n",
-      "Iter 64000, Minibatch Loss= 649.317993, Training Accuracy= 0.92188\n",
-      "Iter 65280, Minibatch Loss= 1021.529785, Training Accuracy= 0.93750\n",
-      "Iter 66560, Minibatch Loss= 203.839050, Training Accuracy= 0.96094\n",
-      "Iter 67840, Minibatch Loss= 469.755249, Training Accuracy= 0.96094\n",
-      "Iter 69120, Minibatch Loss= 36.496567, Training Accuracy= 0.98438\n",
-      "Iter 70400, Minibatch Loss= 214.677551, Training Accuracy= 0.95312\n",
-      "Iter 71680, Minibatch Loss= 115.657990, Training Accuracy= 0.96875\n",
-      "Iter 72960, Minibatch Loss= 354.555115, Training Accuracy= 0.96875\n",
-      "Iter 74240, Minibatch Loss= 124.091103, Training Accuracy= 0.97656\n",
-      "Iter 75520, Minibatch Loss= 614.557251, Training Accuracy= 0.94531\n",
-      "Iter 76800, Minibatch Loss= 343.182983, Training Accuracy= 0.95312\n",
-      "Iter 78080, Minibatch Loss= 678.875183, Training Accuracy= 0.94531\n",
-      "Iter 79360, Minibatch Loss= 313.656494, Training Accuracy= 0.95312\n",
-      "Iter 80640, Minibatch Loss= 169.024185, Training Accuracy= 0.96094\n",
-      "Iter 81920, Minibatch Loss= 98.455017, Training Accuracy= 0.96875\n",
-      "Iter 83200, Minibatch Loss= 359.754517, Training Accuracy= 0.92188\n",
-      "Iter 84480, Minibatch Loss= 214.993103, Training Accuracy= 0.96875\n",
-      "Iter 85760, Minibatch Loss= 262.921265, Training Accuracy= 0.97656\n",
-      "Iter 87040, Minibatch Loss= 558.218445, Training Accuracy= 0.89844\n",
-      "Iter 88320, Minibatch Loss= 122.281952, Training Accuracy= 0.99219\n",
-      "Iter 89600, Minibatch Loss= 300.606689, Training Accuracy= 0.93750\n",
-      "Iter 90880, Minibatch Loss= 261.051025, Training Accuracy= 0.98438\n",
-      "Iter 92160, Minibatch Loss= 59.812164, Training Accuracy= 0.98438\n",
-      "Iter 93440, Minibatch Loss= 309.307312, Training Accuracy= 0.96875\n",
-      "Iter 94720, Minibatch Loss= 626.035706, Training Accuracy= 0.95312\n",
-      "Iter 96000, Minibatch Loss= 317.929260, Training Accuracy= 0.96875\n",
-      "Iter 97280, Minibatch Loss= 196.908218, Training Accuracy= 0.97656\n",
-      "Iter 98560, Minibatch Loss= 843.143250, Training Accuracy= 0.95312\n",
-      "Iter 99840, Minibatch Loss= 389.142761, Training Accuracy= 0.96875\n",
-      "Iter 101120, Minibatch Loss= 246.468994, Training Accuracy= 0.96094\n",
-      "Iter 102400, Minibatch Loss= 110.580948, Training Accuracy= 0.98438\n",
-      "Iter 103680, Minibatch Loss= 208.350586, Training Accuracy= 0.96875\n",
-      "Iter 104960, Minibatch Loss= 506.229462, Training Accuracy= 0.94531\n",
-      "Iter 106240, Minibatch Loss= 49.548233, Training Accuracy= 0.98438\n",
-      "Iter 107520, Minibatch Loss= 728.496582, Training Accuracy= 0.92969\n",
-      "Iter 108800, Minibatch Loss= 187.256622, Training Accuracy= 0.97656\n",
-      "Iter 110080, Minibatch Loss= 273.696899, Training Accuracy= 0.97656\n",
-      "Iter 111360, Minibatch Loss= 317.126678, Training Accuracy= 0.96094\n",
-      "Iter 112640, Minibatch Loss= 148.293365, Training Accuracy= 0.98438\n",
-      "Iter 113920, Minibatch Loss= 139.360168, Training Accuracy= 0.97656\n",
-      "Iter 115200, Minibatch Loss= 167.539093, Training Accuracy= 0.98438\n",
-      "Iter 116480, Minibatch Loss= 565.433594, Training Accuracy= 0.94531\n",
-      "Iter 117760, Minibatch Loss= 8.117203, Training Accuracy= 0.99219\n",
-      "Iter 119040, Minibatch Loss= 348.071472, Training Accuracy= 0.96875\n",
-      "Iter 120320, Minibatch Loss= 287.732849, Training Accuracy= 0.97656\n",
-      "Iter 121600, Minibatch Loss= 156.525284, Training Accuracy= 0.96875\n",
-      "Iter 122880, Minibatch Loss= 296.147339, Training Accuracy= 0.98438\n",
-      "Iter 124160, Minibatch Loss= 260.941956, Training Accuracy= 0.98438\n",
-      "Iter 125440, Minibatch Loss= 241.011719, Training Accuracy= 0.98438\n",
-      "Iter 126720, Minibatch Loss= 185.330444, Training Accuracy= 0.98438\n",
-      "Iter 128000, Minibatch Loss= 346.407013, Training Accuracy= 0.96875\n",
-      "Iter 129280, Minibatch Loss= 522.477173, Training Accuracy= 0.94531\n",
-      "Iter 130560, Minibatch Loss= 97.665955, Training Accuracy= 0.96094\n",
-      "Iter 131840, Minibatch Loss= 111.370262, Training Accuracy= 0.96875\n",
-      "Iter 133120, Minibatch Loss= 106.377136, Training Accuracy= 0.97656\n",
-      "Iter 134400, Minibatch Loss= 432.294983, Training Accuracy= 0.96094\n",
-      "Iter 135680, Minibatch Loss= 104.584610, Training Accuracy= 0.98438\n",
-      "Iter 136960, Minibatch Loss= 439.611053, Training Accuracy= 0.95312\n",
-      "Iter 138240, Minibatch Loss= 171.394562, Training Accuracy= 0.96875\n",
-      "Iter 139520, Minibatch Loss= 83.505905, Training Accuracy= 0.98438\n",
-      "Iter 140800, Minibatch Loss= 240.278427, Training Accuracy= 0.98438\n",
-      "Iter 142080, Minibatch Loss= 417.140320, Training Accuracy= 0.96094\n",
-      "Iter 143360, Minibatch Loss= 77.656067, Training Accuracy= 0.97656\n",
-      "Iter 144640, Minibatch Loss= 284.589844, Training Accuracy= 0.97656\n",
-      "Iter 145920, Minibatch Loss= 372.114288, Training Accuracy= 0.96875\n",
-      "Iter 147200, Minibatch Loss= 352.900024, Training Accuracy= 0.96094\n",
-      "Iter 148480, Minibatch Loss= 148.120621, Training Accuracy= 0.97656\n",
-      "Iter 149760, Minibatch Loss= 127.385742, Training Accuracy= 0.98438\n",
-      "Iter 151040, Minibatch Loss= 383.167175, Training Accuracy= 0.96094\n",
-      "Iter 152320, Minibatch Loss= 331.846649, Training Accuracy= 0.94531\n",
-      "Iter 153600, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
-      "Iter 154880, Minibatch Loss= 24.065147, Training Accuracy= 0.99219\n",
-      "Iter 156160, Minibatch Loss= 43.433868, Training Accuracy= 0.99219\n",
-      "Iter 157440, Minibatch Loss= 205.383972, Training Accuracy= 0.96875\n",
-      "Iter 158720, Minibatch Loss= 83.019257, Training Accuracy= 0.97656\n",
-      "Iter 160000, Minibatch Loss= 195.710556, Training Accuracy= 0.96875\n",
-      "Iter 161280, Minibatch Loss= 177.192932, Training Accuracy= 0.95312\n",
-      "Iter 162560, Minibatch Loss= 261.618713, Training Accuracy= 0.96875\n",
-      "Iter 163840, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
-      "Iter 165120, Minibatch Loss= 62.901100, Training Accuracy= 0.97656\n",
-      "Iter 166400, Minibatch Loss= 17.181839, Training Accuracy= 0.98438\n",
-      "Iter 167680, Minibatch Loss= 102.738960, Training Accuracy= 0.96875\n",
-      "Iter 168960, Minibatch Loss= 0.000000, Training Accuracy= 1.00000\n",
-      "Iter 170240, Minibatch Loss= 71.784363, Training Accuracy= 0.99219\n",
-      "Iter 171520, Minibatch Loss= 260.672852, Training Accuracy= 0.96875\n",
-      "Iter 172800, Minibatch Loss= 186.616119, Training Accuracy= 0.96094\n",
-      "Iter 174080, Minibatch Loss= 312.432312, Training Accuracy= 0.96875\n",
-      "Iter 175360, Minibatch Loss= 45.828953, Training Accuracy= 0.99219\n",
-      "Iter 176640, Minibatch Loss= 62.931808, Training Accuracy= 0.98438\n",
-      "Iter 177920, Minibatch Loss= 63.452362, Training Accuracy= 0.97656\n",
-      "Iter 179200, Minibatch Loss= 53.608818, Training Accuracy= 0.98438\n",
-      "Iter 180480, Minibatch Loss= 57.089508, Training Accuracy= 0.97656\n",
-      "Iter 181760, Minibatch Loss= 601.268799, Training Accuracy= 0.93750\n",
-      "Iter 183040, Minibatch Loss= 59.850044, Training Accuracy= 0.97656\n",
-      "Iter 184320, Minibatch Loss= 145.267883, Training Accuracy= 0.96875\n",
-      "Iter 185600, Minibatch Loss= 24.205322, Training Accuracy= 0.99219\n",
-      "Iter 186880, Minibatch Loss= 51.866646, Training Accuracy= 0.98438\n",
-      "Iter 188160, Minibatch Loss= 166.911987, Training Accuracy= 0.96875\n",
-      "Iter 189440, Minibatch Loss= 32.308147, Training Accuracy= 0.98438\n",
-      "Iter 190720, Minibatch Loss= 514.898071, Training Accuracy= 0.92188\n",
-      "Iter 192000, Minibatch Loss= 146.610031, Training Accuracy= 0.98438\n",
-      "Iter 193280, Minibatch Loss= 23.939758, Training Accuracy= 0.99219\n",
-      "Iter 194560, Minibatch Loss= 224.806641, Training Accuracy= 0.97656\n",
-      "Iter 195840, Minibatch Loss= 71.935089, Training Accuracy= 0.98438\n",
-      "Iter 197120, Minibatch Loss= 182.021210, Training Accuracy= 0.96875\n",
-      "Iter 198400, Minibatch Loss= 125.573784, Training Accuracy= 0.96875\n",
-      "Iter 199680, Minibatch Loss= 122.506104, Training Accuracy= 0.96875\n",
+      "Iter 1280, Minibatch Loss= 26574.855469, Training Accuracy= 0.25781\n",
+      "Iter 2560, Minibatch Loss= 11454.494141, Training Accuracy= 0.49219\n",
+      "Iter 3840, Minibatch Loss= 10070.515625, Training Accuracy= 0.55469\n",
+      "Iter 5120, Minibatch Loss= 4008.586426, Training Accuracy= 0.78125\n",
+      "Iter 6400, Minibatch Loss= 3148.004639, Training Accuracy= 0.80469\n",
+      "Iter 7680, Minibatch Loss= 6740.440430, Training Accuracy= 0.71875\n",
+      "Iter 8960, Minibatch Loss= 4103.991699, Training Accuracy= 0.80469\n",
+      "Iter 10240, Minibatch Loss= 2631.275391, Training Accuracy= 0.85938\n",
+      "Iter 11520, Minibatch Loss= 1428.798828, Training Accuracy= 0.91406\n",
+      "Iter 12800, Minibatch Loss= 3909.772705, Training Accuracy= 0.78906\n",
+      "Iter 14080, Minibatch Loss= 1423.095947, Training Accuracy= 0.88281\n",
+      "Iter 15360, Minibatch Loss= 1524.569824, Training Accuracy= 0.89062\n",
+      "Iter 16640, Minibatch Loss= 2234.539795, Training Accuracy= 0.86719\n",
+      "Iter 17920, Minibatch Loss= 933.932800, Training Accuracy= 0.90625\n",
+      "Iter 19200, Minibatch Loss= 2039.046021, Training Accuracy= 0.89062\n",
+      "Iter 20480, Minibatch Loss= 674.179932, Training Accuracy= 0.95312\n",
+      "Iter 21760, Minibatch Loss= 3778.958984, Training Accuracy= 0.82812\n",
+      "Iter 23040, Minibatch Loss= 1038.217773, Training Accuracy= 0.91406\n",
+      "Iter 24320, Minibatch Loss= 1689.513672, Training Accuracy= 0.89062\n",
+      "Iter 25600, Minibatch Loss= 1800.954956, Training Accuracy= 0.85938\n",
+      "Iter 26880, Minibatch Loss= 1086.292847, Training Accuracy= 0.90625\n",
+      "Iter 28160, Minibatch Loss= 656.042847, Training Accuracy= 0.94531\n",
+      "Iter 29440, Minibatch Loss= 1210.589844, Training Accuracy= 0.91406\n",
+      "Iter 30720, Minibatch Loss= 1099.606323, Training Accuracy= 0.90625\n",
+      "Iter 32000, Minibatch Loss= 1073.128174, Training Accuracy= 0.92969\n",
+      "Iter 33280, Minibatch Loss= 518.844543, Training Accuracy= 0.95312\n",
+      "Iter 34560, Minibatch Loss= 540.856689, Training Accuracy= 0.92188\n",
+      "Iter 35840, Minibatch Loss= 353.990906, Training Accuracy= 0.97656\n",
+      "Iter 37120, Minibatch Loss= 1488.962891, Training Accuracy= 0.91406\n",
+      "Iter 38400, Minibatch Loss= 231.191864, Training Accuracy= 0.98438\n",
+      "Iter 39680, Minibatch Loss= 171.154480, Training Accuracy= 0.98438\n",
+      "Iter 40960, Minibatch Loss= 2092.023682, Training Accuracy= 0.90625\n",
+      "Iter 42240, Minibatch Loss= 480.594299, Training Accuracy= 0.95312\n",
+      "Iter 43520, Minibatch Loss= 504.128143, Training Accuracy= 0.96875\n",
+      "Iter 44800, Minibatch Loss= 143.534485, Training Accuracy= 0.97656\n",
+      "Iter 46080, Minibatch Loss= 325.875580, Training Accuracy= 0.96094\n",
+      "Iter 47360, Minibatch Loss= 602.813049, Training Accuracy= 0.91406\n",
+      "Iter 48640, Minibatch Loss= 794.595093, Training Accuracy= 0.94531\n",
+      "Iter 49920, Minibatch Loss= 415.539032, Training Accuracy= 0.95312\n",
+      "Iter 51200, Minibatch Loss= 146.016022, Training Accuracy= 0.96094\n",
+      "Iter 52480, Minibatch Loss= 294.180786, Training Accuracy= 0.94531\n",
+      "Iter 53760, Minibatch Loss= 50.955730, Training Accuracy= 0.99219\n",
+      "Iter 55040, Minibatch Loss= 1026.607056, Training Accuracy= 0.92188\n",
+      "Iter 56320, Minibatch Loss= 283.756134, Training Accuracy= 0.96875\n",
+      "Iter 57600, Minibatch Loss= 691.538208, Training Accuracy= 0.95312\n",
+      "Iter 58880, Minibatch Loss= 491.075073, Training Accuracy= 0.96094\n",
+      "Iter 60160, Minibatch Loss= 571.951660, Training Accuracy= 0.95312\n",
+      "Iter 61440, Minibatch Loss= 284.041168, Training Accuracy= 0.97656\n",
+      "Iter 62720, Minibatch Loss= 1041.941528, Training Accuracy= 0.92969\n",
+      "Iter 64000, Minibatch Loss= 664.833923, Training Accuracy= 0.93750\n",
+      "Iter 65280, Minibatch Loss= 1582.112793, Training Accuracy= 0.88281\n",
+      "Iter 66560, Minibatch Loss= 783.135376, Training Accuracy= 0.94531\n",
+      "Iter 67840, Minibatch Loss= 245.942398, Training Accuracy= 0.96094\n",
+      "Iter 69120, Minibatch Loss= 752.858948, Training Accuracy= 0.96875\n",
+      "Iter 70400, Minibatch Loss= 623.243286, Training Accuracy= 0.94531\n",
+      "Iter 71680, Minibatch Loss= 846.498230, Training Accuracy= 0.93750\n",
+      "Iter 72960, Minibatch Loss= 586.516479, Training Accuracy= 0.95312\n",
+      "Iter 74240, Minibatch Loss= 92.774963, Training Accuracy= 0.98438\n",
+      "Iter 75520, Minibatch Loss= 644.039612, Training Accuracy= 0.95312\n",
+      "Iter 76800, Minibatch Loss= 693.247681, Training Accuracy= 0.96094\n",
+      "Iter 78080, Minibatch Loss= 466.491882, Training Accuracy= 0.96094\n",
+      "Iter 79360, Minibatch Loss= 964.212341, Training Accuracy= 0.93750\n",
+      "Iter 80640, Minibatch Loss= 230.451904, Training Accuracy= 0.97656\n",
+      "Iter 81920, Minibatch Loss= 280.434570, Training Accuracy= 0.95312\n",
+      "Iter 83200, Minibatch Loss= 213.208252, Training Accuracy= 0.97656\n",
+      "Iter 84480, Minibatch Loss= 774.836060, Training Accuracy= 0.94531\n",
+      "Iter 85760, Minibatch Loss= 164.687729, Training Accuracy= 0.96094\n",
+      "Iter 87040, Minibatch Loss= 419.967407, Training Accuracy= 0.96875\n",
+      "Iter 88320, Minibatch Loss= 160.920151, Training Accuracy= 0.96875\n",
+      "Iter 89600, Minibatch Loss= 586.063599, Training Accuracy= 0.96094\n",
+      "Iter 90880, Minibatch Loss= 345.598145, Training Accuracy= 0.96875\n",
+      "Iter 92160, Minibatch Loss= 931.361145, Training Accuracy= 0.92188\n",
+      "Iter 93440, Minibatch Loss= 170.107117, Training Accuracy= 0.97656\n",
+      "Iter 94720, Minibatch Loss= 497.162750, Training Accuracy= 0.93750\n",
+      "Iter 96000, Minibatch Loss= 906.600464, Training Accuracy= 0.94531\n",
+      "Iter 97280, Minibatch Loss= 303.382202, Training Accuracy= 0.92969\n",
+      "Iter 98560, Minibatch Loss= 509.161652, Training Accuracy= 0.97656\n",
+      "Iter 99840, Minibatch Loss= 359.561981, Training Accuracy= 0.97656\n",
+      "Iter 101120, Minibatch Loss= 136.516541, Training Accuracy= 0.97656\n",
+      "Iter 102400, Minibatch Loss= 517.199341, Training Accuracy= 0.96875\n",
+      "Iter 103680, Minibatch Loss= 487.793335, Training Accuracy= 0.95312\n",
+      "Iter 104960, Minibatch Loss= 407.351929, Training Accuracy= 0.96094\n",
+      "Iter 106240, Minibatch Loss= 70.495193, Training Accuracy= 0.98438\n",
+      "Iter 107520, Minibatch Loss= 344.783508, Training Accuracy= 0.96094\n",
+      "Iter 108800, Minibatch Loss= 242.682465, Training Accuracy= 0.95312\n",
+      "Iter 110080, Minibatch Loss= 169.181458, Training Accuracy= 0.96094\n",
+      "Iter 111360, Minibatch Loss= 152.638245, Training Accuracy= 0.98438\n",
+      "Iter 112640, Minibatch Loss= 170.795868, Training Accuracy= 0.96875\n",
+      "Iter 113920, Minibatch Loss= 133.262726, Training Accuracy= 0.98438\n",
+      "Iter 115200, Minibatch Loss= 296.063293, Training Accuracy= 0.95312\n",
+      "Iter 116480, Minibatch Loss= 254.247543, Training Accuracy= 0.96094\n",
+      "Iter 117760, Minibatch Loss= 506.795715, Training Accuracy= 0.94531\n",
+      "Iter 119040, Minibatch Loss= 446.006897, Training Accuracy= 0.96094\n",
+      "Iter 120320, Minibatch Loss= 149.467377, Training Accuracy= 0.97656\n",
+      "Iter 121600, Minibatch Loss= 52.783600, Training Accuracy= 0.98438\n",
+      "Iter 122880, Minibatch Loss= 49.041794, Training Accuracy= 0.98438\n",
+      "Iter 124160, Minibatch Loss= 184.371246, Training Accuracy= 0.97656\n",
+      "Iter 125440, Minibatch Loss= 129.838501, Training Accuracy= 0.97656\n",
+      "Iter 126720, Minibatch Loss= 288.006531, Training Accuracy= 0.96875\n",
+      "Iter 128000, Minibatch Loss= 187.284653, Training Accuracy= 0.97656\n",
+      "Iter 129280, Minibatch Loss= 197.969955, Training Accuracy= 0.96875\n",
+      "Iter 130560, Minibatch Loss= 299.969818, Training Accuracy= 0.96875\n",
+      "Iter 131840, Minibatch Loss= 537.602173, Training Accuracy= 0.96094\n",
+      "Iter 133120, Minibatch Loss= 4.519302, Training Accuracy= 0.99219\n",
+      "Iter 134400, Minibatch Loss= 133.264191, Training Accuracy= 0.97656\n",
+      "Iter 135680, Minibatch Loss= 89.662292, Training Accuracy= 0.97656\n",
+      "Iter 136960, Minibatch Loss= 107.774078, Training Accuracy= 0.96875\n",
+      "Iter 138240, Minibatch Loss= 335.904572, Training Accuracy= 0.96094\n",
+      "Iter 139520, Minibatch Loss= 457.494568, Training Accuracy= 0.96094\n",
+      "Iter 140800, Minibatch Loss= 259.131531, Training Accuracy= 0.95312\n",
+      "Iter 142080, Minibatch Loss= 152.205383, Training Accuracy= 0.96094\n",
+      "Iter 143360, Minibatch Loss= 252.535828, Training Accuracy= 0.95312\n",
+      "Iter 144640, Minibatch Loss= 109.477585, Training Accuracy= 0.96875\n",
+      "Iter 145920, Minibatch Loss= 24.468613, Training Accuracy= 0.99219\n",
+      "Iter 147200, Minibatch Loss= 51.722107, Training Accuracy= 0.97656\n",
+      "Iter 148480, Minibatch Loss= 69.715233, Training Accuracy= 0.97656\n",
+      "Iter 149760, Minibatch Loss= 405.289246, Training Accuracy= 0.92969\n",
+      "Iter 151040, Minibatch Loss= 282.976379, Training Accuracy= 0.95312\n",
+      "Iter 152320, Minibatch Loss= 134.991119, Training Accuracy= 0.97656\n",
+      "Iter 153600, Minibatch Loss= 491.618103, Training Accuracy= 0.92188\n",
+      "Iter 154880, Minibatch Loss= 154.299988, Training Accuracy= 0.99219\n",
+      "Iter 156160, Minibatch Loss= 79.480019, Training Accuracy= 0.96875\n",
+      "Iter 157440, Minibatch Loss= 68.093750, Training Accuracy= 0.99219\n",
+      "Iter 158720, Minibatch Loss= 459.739685, Training Accuracy= 0.92188\n",
+      "Iter 160000, Minibatch Loss= 168.076843, Training Accuracy= 0.94531\n",
+      "Iter 161280, Minibatch Loss= 256.141846, Training Accuracy= 0.97656\n",
+      "Iter 162560, Minibatch Loss= 236.400391, Training Accuracy= 0.94531\n",
+      "Iter 163840, Minibatch Loss= 177.011261, Training Accuracy= 0.96875\n",
+      "Iter 165120, Minibatch Loss= 48.583298, Training Accuracy= 0.97656\n",
+      "Iter 166400, Minibatch Loss= 413.800293, Training Accuracy= 0.96094\n",
+      "Iter 167680, Minibatch Loss= 209.587387, Training Accuracy= 0.96875\n",
+      "Iter 168960, Minibatch Loss= 239.407318, Training Accuracy= 0.98438\n",
+      "Iter 170240, Minibatch Loss= 183.567017, Training Accuracy= 0.96875\n",
+      "Iter 171520, Minibatch Loss= 87.937515, Training Accuracy= 0.96875\n",
+      "Iter 172800, Minibatch Loss= 203.777039, Training Accuracy= 0.98438\n",
+      "Iter 174080, Minibatch Loss= 566.378052, Training Accuracy= 0.94531\n",
+      "Iter 175360, Minibatch Loss= 325.170898, Training Accuracy= 0.95312\n",
+      "Iter 176640, Minibatch Loss= 300.142212, Training Accuracy= 0.97656\n",
+      "Iter 177920, Minibatch Loss= 205.370193, Training Accuracy= 0.95312\n",
+      "Iter 179200, Minibatch Loss= 5.594437, Training Accuracy= 0.99219\n",
+      "Iter 180480, Minibatch Loss= 110.732109, Training Accuracy= 0.98438\n",
+      "Iter 181760, Minibatch Loss= 33.320297, Training Accuracy= 0.99219\n",
+      "Iter 183040, Minibatch Loss= 6.885544, Training Accuracy= 0.99219\n",
+      "Iter 184320, Minibatch Loss= 221.144806, Training Accuracy= 0.96875\n",
+      "Iter 185600, Minibatch Loss= 365.337372, Training Accuracy= 0.94531\n",
+      "Iter 186880, Minibatch Loss= 186.558258, Training Accuracy= 0.96094\n",
+      "Iter 188160, Minibatch Loss= 149.720322, Training Accuracy= 0.98438\n",
+      "Iter 189440, Minibatch Loss= 105.281998, Training Accuracy= 0.97656\n",
+      "Iter 190720, Minibatch Loss= 289.980011, Training Accuracy= 0.96094\n",
+      "Iter 192000, Minibatch Loss= 214.382278, Training Accuracy= 0.96094\n",
+      "Iter 193280, Minibatch Loss= 461.044312, Training Accuracy= 0.93750\n",
+      "Iter 194560, Minibatch Loss= 138.653076, Training Accuracy= 0.98438\n",
+      "Iter 195840, Minibatch Loss= 112.004883, Training Accuracy= 0.98438\n",
+      "Iter 197120, Minibatch Loss= 212.691467, Training Accuracy= 0.97656\n",
+      "Iter 198400, Minibatch Loss= 57.642502, Training Accuracy= 0.97656\n",
+      "Iter 199680, Minibatch Loss= 80.503563, Training Accuracy= 0.96875\n",
       "Optimization Finished!\n",
       "Optimization Finished!\n",
-      "Testing Accuracy: 0.972656\n"
+      "Testing Accuracy: 0.984375\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -361,6 +350,15 @@
     "                                      y: mnist.test.labels[:256],\n",
     "                                      y: mnist.test.labels[:256],\n",
     "                                      keep_prob: 1.})"
     "                                      keep_prob: 1.})"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -372,14 +370,14 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,

+ 38 - 29
notebooks/3_NeuralNetworks/multilayer_perceptron.ipynb

@@ -29,17 +29,17 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
      ]
      ]
     }
     }
    ],
    ],
    "source": [
    "source": [
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n",
     "\n",
     "\n",
     "import tensorflow as tf"
     "import tensorflow as tf"
    ]
    ]
@@ -92,9 +92,9 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 5,
    "metadata": {
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
@@ -114,16 +114,16 @@
     "pred = multilayer_perceptron(x, weights, biases)\n",
     "pred = multilayer_perceptron(x, weights, biases)\n",
     "\n",
     "\n",
     "# Define loss and optimizer\n",
     "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 6,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -132,23 +132,23 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Epoch: 0001 cost= 185.342230390\n",
-      "Epoch: 0002 cost= 44.266946572\n",
-      "Epoch: 0003 cost= 27.999560453\n",
-      "Epoch: 0004 cost= 19.655567043\n",
-      "Epoch: 0005 cost= 14.284429696\n",
-      "Epoch: 0006 cost= 10.640310403\n",
-      "Epoch: 0007 cost= 7.904047886\n",
-      "Epoch: 0008 cost= 5.989115090\n",
-      "Epoch: 0009 cost= 4.689374613\n",
-      "Epoch: 0010 cost= 3.455884229\n",
-      "Epoch: 0011 cost= 2.733002625\n",
-      "Epoch: 0012 cost= 2.101091420\n",
-      "Epoch: 0013 cost= 1.496508092\n",
-      "Epoch: 0014 cost= 1.245452015\n",
-      "Epoch: 0015 cost= 0.912072906\n",
+      "Epoch: 0001 cost= 173.056566575\n",
+      "Epoch: 0002 cost= 44.054413928\n",
+      "Epoch: 0003 cost= 27.455470655\n",
+      "Epoch: 0004 cost= 19.008652363\n",
+      "Epoch: 0005 cost= 13.654873594\n",
+      "Epoch: 0006 cost= 10.059267435\n",
+      "Epoch: 0007 cost= 7.436018432\n",
+      "Epoch: 0008 cost= 5.587794416\n",
+      "Epoch: 0009 cost= 4.209882509\n",
+      "Epoch: 0010 cost= 3.203879515\n",
+      "Epoch: 0011 cost= 2.319920681\n",
+      "Epoch: 0012 cost= 1.676204545\n",
+      "Epoch: 0013 cost= 1.248805338\n",
+      "Epoch: 0014 cost= 1.052676844\n",
+      "Epoch: 0015 cost= 0.890117338\n",
       "Optimization Finished!\n",
       "Optimization Finished!\n",
-      "Accuracy: 0.9422\n"
+      "Accuracy: 0.9459\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -181,6 +181,15 @@
     "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
     "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
     "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})"
     "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -192,16 +201,16 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,
  "nbformat_minor": 0
  "nbformat_minor": 0
-}
+}

+ 119 - 113
notebooks/3_NeuralNetworks/recurrent_network.ipynb

@@ -1,10 +1,10 @@
 {
 {
  "cells": [
  "cells": [
   {
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
    "source": [
    "source": [
     "'''\n",
     "'''\n",
     "A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
     "A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n",
@@ -18,35 +18,26 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
    "source": [
    "source": [
     "import tensorflow as tf\n",
     "import tensorflow as tf\n",
-    "from tensorflow.python.ops import rnn, rnn_cell\n",
+    "from tensorflow.contrib import rnn\n",
     "import numpy as np\n",
     "import numpy as np\n",
     "\n",
     "\n",
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
    "source": [
    "source": [
     "'''\n",
     "'''\n",
     "To classify images using a reccurent neural network, we consider every image\n",
     "To classify images using a reccurent neural network, we consider every image\n",
@@ -58,7 +49,9 @@
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 2,
    "execution_count": 2,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
     "# Parameters\n",
     "# Parameters\n",
@@ -89,7 +82,9 @@
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 3,
    "execution_count": 3,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
     "def RNN(x, weights, biases):\n",
     "def RNN(x, weights, biases):\n",
@@ -103,13 +98,13 @@
     "    # Reshaping to (n_steps*batch_size, n_input)\n",
     "    # Reshaping to (n_steps*batch_size, n_input)\n",
     "    x = tf.reshape(x, [-1, n_input])\n",
     "    x = tf.reshape(x, [-1, n_input])\n",
     "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
     "    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
-    "    x = tf.split(0, n_steps, x)\n",
+    "    x = tf.split(x, n_steps, 0)\n",
     "\n",
     "\n",
     "    # Define a lstm cell with tensorflow\n",
     "    # Define a lstm cell with tensorflow\n",
-    "    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
+    "    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n",
     "\n",
     "\n",
     "    # Get lstm cell output\n",
     "    # Get lstm cell output\n",
-    "    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\n",
+    "    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n",
     "\n",
     "\n",
     "    # Linear activation, using rnn inner loop last output\n",
     "    # Linear activation, using rnn inner loop last output\n",
     "    return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
     "    return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
@@ -117,7 +112,7 @@
     "pred = RNN(x, weights, biases)\n",
     "pred = RNN(x, weights, biases)\n",
     "\n",
     "\n",
     "# Define loss and optimizer\n",
     "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Evaluate model\n",
     "# Evaluate model\n",
@@ -125,98 +120,100 @@
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
    "execution_count": 4,
    "execution_count": 4,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [
    "outputs": [
     {
     {
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Iter 1280, Minibatch Loss= 1.538532, Training Accuracy= 0.49219\n",
-      "Iter 2560, Minibatch Loss= 1.462834, Training Accuracy= 0.50781\n",
-      "Iter 3840, Minibatch Loss= 1.048393, Training Accuracy= 0.66406\n",
-      "Iter 5120, Minibatch Loss= 0.889872, Training Accuracy= 0.71875\n",
-      "Iter 6400, Minibatch Loss= 0.681855, Training Accuracy= 0.76562\n",
-      "Iter 7680, Minibatch Loss= 0.987207, Training Accuracy= 0.69531\n",
-      "Iter 8960, Minibatch Loss= 0.759543, Training Accuracy= 0.71094\n",
-      "Iter 10240, Minibatch Loss= 0.557055, Training Accuracy= 0.80469\n",
-      "Iter 11520, Minibatch Loss= 0.371352, Training Accuracy= 0.89844\n",
-      "Iter 12800, Minibatch Loss= 0.661293, Training Accuracy= 0.80469\n",
-      "Iter 14080, Minibatch Loss= 0.474259, Training Accuracy= 0.86719\n",
-      "Iter 15360, Minibatch Loss= 0.328436, Training Accuracy= 0.88281\n",
-      "Iter 16640, Minibatch Loss= 0.348017, Training Accuracy= 0.93750\n",
-      "Iter 17920, Minibatch Loss= 0.340086, Training Accuracy= 0.88281\n",
-      "Iter 19200, Minibatch Loss= 0.261532, Training Accuracy= 0.89844\n",
-      "Iter 20480, Minibatch Loss= 0.161785, Training Accuracy= 0.94531\n",
-      "Iter 21760, Minibatch Loss= 0.419619, Training Accuracy= 0.83594\n",
-      "Iter 23040, Minibatch Loss= 0.120714, Training Accuracy= 0.95312\n",
-      "Iter 24320, Minibatch Loss= 0.339519, Training Accuracy= 0.89062\n",
-      "Iter 25600, Minibatch Loss= 0.405463, Training Accuracy= 0.88281\n",
-      "Iter 26880, Minibatch Loss= 0.172193, Training Accuracy= 0.95312\n",
-      "Iter 28160, Minibatch Loss= 0.256769, Training Accuracy= 0.91406\n",
-      "Iter 29440, Minibatch Loss= 0.247753, Training Accuracy= 0.91406\n",
-      "Iter 30720, Minibatch Loss= 0.230820, Training Accuracy= 0.91406\n",
-      "Iter 32000, Minibatch Loss= 0.216861, Training Accuracy= 0.93750\n",
-      "Iter 33280, Minibatch Loss= 0.236337, Training Accuracy= 0.89062\n",
-      "Iter 34560, Minibatch Loss= 0.252351, Training Accuracy= 0.93750\n",
-      "Iter 35840, Minibatch Loss= 0.180090, Training Accuracy= 0.92188\n",
-      "Iter 37120, Minibatch Loss= 0.304125, Training Accuracy= 0.91406\n",
-      "Iter 38400, Minibatch Loss= 0.114474, Training Accuracy= 0.96094\n",
-      "Iter 39680, Minibatch Loss= 0.158405, Training Accuracy= 0.96875\n",
-      "Iter 40960, Minibatch Loss= 0.285858, Training Accuracy= 0.92188\n",
-      "Iter 42240, Minibatch Loss= 0.134199, Training Accuracy= 0.96094\n",
-      "Iter 43520, Minibatch Loss= 0.235847, Training Accuracy= 0.92969\n",
-      "Iter 44800, Minibatch Loss= 0.155971, Training Accuracy= 0.94531\n",
-      "Iter 46080, Minibatch Loss= 0.061549, Training Accuracy= 0.99219\n",
-      "Iter 47360, Minibatch Loss= 0.232569, Training Accuracy= 0.94531\n",
-      "Iter 48640, Minibatch Loss= 0.270348, Training Accuracy= 0.91406\n",
-      "Iter 49920, Minibatch Loss= 0.202416, Training Accuracy= 0.92188\n",
-      "Iter 51200, Minibatch Loss= 0.113857, Training Accuracy= 0.96094\n",
-      "Iter 52480, Minibatch Loss= 0.137900, Training Accuracy= 0.94531\n",
-      "Iter 53760, Minibatch Loss= 0.052416, Training Accuracy= 0.98438\n",
-      "Iter 55040, Minibatch Loss= 0.312064, Training Accuracy= 0.91406\n",
-      "Iter 56320, Minibatch Loss= 0.144335, Training Accuracy= 0.93750\n",
-      "Iter 57600, Minibatch Loss= 0.114723, Training Accuracy= 0.96875\n",
-      "Iter 58880, Minibatch Loss= 0.193597, Training Accuracy= 0.96094\n",
-      "Iter 60160, Minibatch Loss= 0.110877, Training Accuracy= 0.95312\n",
-      "Iter 61440, Minibatch Loss= 0.119864, Training Accuracy= 0.96094\n",
-      "Iter 62720, Minibatch Loss= 0.118780, Training Accuracy= 0.94531\n",
-      "Iter 64000, Minibatch Loss= 0.082259, Training Accuracy= 0.97656\n",
-      "Iter 65280, Minibatch Loss= 0.087364, Training Accuracy= 0.97656\n",
-      "Iter 66560, Minibatch Loss= 0.207975, Training Accuracy= 0.92969\n",
-      "Iter 67840, Minibatch Loss= 0.120612, Training Accuracy= 0.96875\n",
-      "Iter 69120, Minibatch Loss= 0.070608, Training Accuracy= 0.96875\n",
-      "Iter 70400, Minibatch Loss= 0.100786, Training Accuracy= 0.96094\n",
-      "Iter 71680, Minibatch Loss= 0.114746, Training Accuracy= 0.94531\n",
-      "Iter 72960, Minibatch Loss= 0.083427, Training Accuracy= 0.96875\n",
-      "Iter 74240, Minibatch Loss= 0.089978, Training Accuracy= 0.96094\n",
-      "Iter 75520, Minibatch Loss= 0.195322, Training Accuracy= 0.94531\n",
-      "Iter 76800, Minibatch Loss= 0.161109, Training Accuracy= 0.96094\n",
-      "Iter 78080, Minibatch Loss= 0.169762, Training Accuracy= 0.94531\n",
-      "Iter 79360, Minibatch Loss= 0.054240, Training Accuracy= 0.98438\n",
-      "Iter 80640, Minibatch Loss= 0.160100, Training Accuracy= 0.95312\n",
-      "Iter 81920, Minibatch Loss= 0.110728, Training Accuracy= 0.96875\n",
-      "Iter 83200, Minibatch Loss= 0.054918, Training Accuracy= 0.98438\n",
-      "Iter 84480, Minibatch Loss= 0.104170, Training Accuracy= 0.96875\n",
-      "Iter 85760, Minibatch Loss= 0.071871, Training Accuracy= 0.97656\n",
-      "Iter 87040, Minibatch Loss= 0.170529, Training Accuracy= 0.96094\n",
-      "Iter 88320, Minibatch Loss= 0.087350, Training Accuracy= 0.96875\n",
-      "Iter 89600, Minibatch Loss= 0.079943, Training Accuracy= 0.96875\n",
-      "Iter 90880, Minibatch Loss= 0.128451, Training Accuracy= 0.92969\n",
-      "Iter 92160, Minibatch Loss= 0.046963, Training Accuracy= 0.98438\n",
-      "Iter 93440, Minibatch Loss= 0.162998, Training Accuracy= 0.96875\n",
-      "Iter 94720, Minibatch Loss= 0.122588, Training Accuracy= 0.96094\n",
-      "Iter 96000, Minibatch Loss= 0.073954, Training Accuracy= 0.97656\n",
-      "Iter 97280, Minibatch Loss= 0.130790, Training Accuracy= 0.96094\n",
-      "Iter 98560, Minibatch Loss= 0.067689, Training Accuracy= 0.97656\n",
-      "Iter 99840, Minibatch Loss= 0.186411, Training Accuracy= 0.92188\n",
+      "Iter 1280, Minibatch Loss= 1.576423, Training Accuracy= 0.51562\n",
+      "Iter 2560, Minibatch Loss= 1.450179, Training Accuracy= 0.53906\n",
+      "Iter 3840, Minibatch Loss= 1.160066, Training Accuracy= 0.64844\n",
+      "Iter 5120, Minibatch Loss= 0.898589, Training Accuracy= 0.73438\n",
+      "Iter 6400, Minibatch Loss= 0.685712, Training Accuracy= 0.75781\n",
+      "Iter 7680, Minibatch Loss= 1.085666, Training Accuracy= 0.64844\n",
+      "Iter 8960, Minibatch Loss= 0.681488, Training Accuracy= 0.73438\n",
+      "Iter 10240, Minibatch Loss= 0.557049, Training Accuracy= 0.82812\n",
+      "Iter 11520, Minibatch Loss= 0.340857, Training Accuracy= 0.92188\n",
+      "Iter 12800, Minibatch Loss= 0.596482, Training Accuracy= 0.78906\n",
+      "Iter 14080, Minibatch Loss= 0.486564, Training Accuracy= 0.84375\n",
+      "Iter 15360, Minibatch Loss= 0.302493, Training Accuracy= 0.90625\n",
+      "Iter 16640, Minibatch Loss= 0.334277, Training Accuracy= 0.92188\n",
+      "Iter 17920, Minibatch Loss= 0.222026, Training Accuracy= 0.90625\n",
+      "Iter 19200, Minibatch Loss= 0.228581, Training Accuracy= 0.92188\n",
+      "Iter 20480, Minibatch Loss= 0.150356, Training Accuracy= 0.96094\n",
+      "Iter 21760, Minibatch Loss= 0.415417, Training Accuracy= 0.86719\n",
+      "Iter 23040, Minibatch Loss= 0.159742, Training Accuracy= 0.94531\n",
+      "Iter 24320, Minibatch Loss= 0.333764, Training Accuracy= 0.89844\n",
+      "Iter 25600, Minibatch Loss= 0.379070, Training Accuracy= 0.88281\n",
+      "Iter 26880, Minibatch Loss= 0.241612, Training Accuracy= 0.91406\n",
+      "Iter 28160, Minibatch Loss= 0.200397, Training Accuracy= 0.93750\n",
+      "Iter 29440, Minibatch Loss= 0.197994, Training Accuracy= 0.93750\n",
+      "Iter 30720, Minibatch Loss= 0.330214, Training Accuracy= 0.89062\n",
+      "Iter 32000, Minibatch Loss= 0.174626, Training Accuracy= 0.92969\n",
+      "Iter 33280, Minibatch Loss= 0.202369, Training Accuracy= 0.93750\n",
+      "Iter 34560, Minibatch Loss= 0.240835, Training Accuracy= 0.94531\n",
+      "Iter 35840, Minibatch Loss= 0.207867, Training Accuracy= 0.93750\n",
+      "Iter 37120, Minibatch Loss= 0.313306, Training Accuracy= 0.90625\n",
+      "Iter 38400, Minibatch Loss= 0.089850, Training Accuracy= 0.96875\n",
+      "Iter 39680, Minibatch Loss= 0.184803, Training Accuracy= 0.92188\n",
+      "Iter 40960, Minibatch Loss= 0.236523, Training Accuracy= 0.92969\n",
+      "Iter 42240, Minibatch Loss= 0.174834, Training Accuracy= 0.94531\n",
+      "Iter 43520, Minibatch Loss= 0.127905, Training Accuracy= 0.93750\n",
+      "Iter 44800, Minibatch Loss= 0.120045, Training Accuracy= 0.96875\n",
+      "Iter 46080, Minibatch Loss= 0.068337, Training Accuracy= 0.98438\n",
+      "Iter 47360, Minibatch Loss= 0.141118, Training Accuracy= 0.95312\n",
+      "Iter 48640, Minibatch Loss= 0.182404, Training Accuracy= 0.92188\n",
+      "Iter 49920, Minibatch Loss= 0.176778, Training Accuracy= 0.93750\n",
+      "Iter 51200, Minibatch Loss= 0.098927, Training Accuracy= 0.97656\n",
+      "Iter 52480, Minibatch Loss= 0.158776, Training Accuracy= 0.96094\n",
+      "Iter 53760, Minibatch Loss= 0.031863, Training Accuracy= 0.99219\n",
+      "Iter 55040, Minibatch Loss= 0.101799, Training Accuracy= 0.96094\n",
+      "Iter 56320, Minibatch Loss= 0.176387, Training Accuracy= 0.96094\n",
+      "Iter 57600, Minibatch Loss= 0.096277, Training Accuracy= 0.96875\n",
+      "Iter 58880, Minibatch Loss= 0.137416, Training Accuracy= 0.94531\n",
+      "Iter 60160, Minibatch Loss= 0.062801, Training Accuracy= 0.97656\n",
+      "Iter 61440, Minibatch Loss= 0.036346, Training Accuracy= 0.98438\n",
+      "Iter 62720, Minibatch Loss= 0.153030, Training Accuracy= 0.92969\n",
+      "Iter 64000, Minibatch Loss= 0.117716, Training Accuracy= 0.95312\n",
+      "Iter 65280, Minibatch Loss= 0.048387, Training Accuracy= 0.99219\n",
+      "Iter 66560, Minibatch Loss= 0.070802, Training Accuracy= 0.97656\n",
+      "Iter 67840, Minibatch Loss= 0.221085, Training Accuracy= 0.96875\n",
+      "Iter 69120, Minibatch Loss= 0.184049, Training Accuracy= 0.93750\n",
+      "Iter 70400, Minibatch Loss= 0.094883, Training Accuracy= 0.95312\n",
+      "Iter 71680, Minibatch Loss= 0.087278, Training Accuracy= 0.96875\n",
+      "Iter 72960, Minibatch Loss= 0.153267, Training Accuracy= 0.95312\n",
+      "Iter 74240, Minibatch Loss= 0.161794, Training Accuracy= 0.94531\n",
+      "Iter 75520, Minibatch Loss= 0.103779, Training Accuracy= 0.96875\n",
+      "Iter 76800, Minibatch Loss= 0.165586, Training Accuracy= 0.96094\n",
+      "Iter 78080, Minibatch Loss= 0.137721, Training Accuracy= 0.95312\n",
+      "Iter 79360, Minibatch Loss= 0.124014, Training Accuracy= 0.96094\n",
+      "Iter 80640, Minibatch Loss= 0.051460, Training Accuracy= 0.99219\n",
+      "Iter 81920, Minibatch Loss= 0.185836, Training Accuracy= 0.96094\n",
+      "Iter 83200, Minibatch Loss= 0.147694, Training Accuracy= 0.94531\n",
+      "Iter 84480, Minibatch Loss= 0.061550, Training Accuracy= 0.98438\n",
+      "Iter 85760, Minibatch Loss= 0.093457, Training Accuracy= 0.96875\n",
+      "Iter 87040, Minibatch Loss= 0.094497, Training Accuracy= 0.98438\n",
+      "Iter 88320, Minibatch Loss= 0.093934, Training Accuracy= 0.96094\n",
+      "Iter 89600, Minibatch Loss= 0.061550, Training Accuracy= 0.96875\n",
+      "Iter 90880, Minibatch Loss= 0.082452, Training Accuracy= 0.97656\n",
+      "Iter 92160, Minibatch Loss= 0.087423, Training Accuracy= 0.97656\n",
+      "Iter 93440, Minibatch Loss= 0.032694, Training Accuracy= 0.99219\n",
+      "Iter 94720, Minibatch Loss= 0.069597, Training Accuracy= 0.97656\n",
+      "Iter 96000, Minibatch Loss= 0.193636, Training Accuracy= 0.96094\n",
+      "Iter 97280, Minibatch Loss= 0.134405, Training Accuracy= 0.96094\n",
+      "Iter 98560, Minibatch Loss= 0.072992, Training Accuracy= 0.96875\n",
+      "Iter 99840, Minibatch Loss= 0.041049, Training Accuracy= 0.99219\n",
       "Optimization Finished!\n",
       "Optimization Finished!\n",
-      "Testing Accuracy: 0.976562\n"
+      "Testing Accuracy: 0.960938\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -250,6 +247,15 @@
     "    print \"Testing Accuracy:\", \\\n",
     "    print \"Testing Accuracy:\", \\\n",
     "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
     "        sess.run(accuracy, feed_dict={x: test_data, y: test_label})"
    ]
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -261,14 +267,14 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,

+ 28 - 30
notebooks/4_Utils/save_restore_model.ipynb

@@ -29,26 +29,26 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
      ]
      ]
     }
     }
    ],
    ],
    "source": [
    "source": [
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n",
     "\n",
     "\n",
     "import tensorflow as tf"
     "import tensorflow as tf"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 3,
    "metadata": {
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
@@ -97,16 +97,16 @@
     "pred = multilayer_perceptron(x, weights, biases)\n",
     "pred = multilayer_perceptron(x, weights, biases)\n",
     "\n",
     "\n",
     "# Define loss and optimizer\n",
     "# Define loss and optimizer\n",
-    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()"
+    "init = tf.global_variables_initializer()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 4,
    "metadata": {
    "metadata": {
     "collapsed": true
     "collapsed": true
    },
    },
@@ -118,7 +118,7 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 5,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -128,11 +128,11 @@
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
       "Starting 1st session...\n",
       "Starting 1st session...\n",
-      "Epoch: 0001 cost= 182.770135574\n",
-      "Epoch: 0002 cost= 44.863718596\n",
-      "Epoch: 0003 cost= 27.965412349\n",
+      "Epoch: 0001 cost= 187.778896380\n",
+      "Epoch: 0002 cost= 42.367902536\n",
+      "Epoch: 0003 cost= 26.488964058\n",
       "First Optimization Finished!\n",
       "First Optimization Finished!\n",
-      "Accuracy: 0.906\n",
+      "Accuracy: 0.9075\n",
       "Model saved in file: /tmp/model.ckpt\n"
       "Model saved in file: /tmp/model.ckpt\n"
      ]
      ]
     }
     }
@@ -175,7 +175,7 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 6,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -186,15 +186,15 @@
      "text": [
      "text": [
       "Starting 2nd session...\n",
       "Starting 2nd session...\n",
       "Model restored from file: /tmp/model.ckpt\n",
       "Model restored from file: /tmp/model.ckpt\n",
-      "Epoch: 0001 cost= 19.658836002\n",
-      "Epoch: 0002 cost= 14.354811554\n",
-      "Epoch: 0003 cost= 10.580801367\n",
-      "Epoch: 0004 cost= 8.012172253\n",
-      "Epoch: 0005 cost= 5.985675981\n",
-      "Epoch: 0006 cost= 4.572637980\n",
-      "Epoch: 0007 cost= 3.329074899\n",
+      "Epoch: 0001 cost= 18.292712951\n",
+      "Epoch: 0002 cost= 13.404136196\n",
+      "Epoch: 0003 cost= 9.855191723\n",
+      "Epoch: 0004 cost= 7.276933088\n",
+      "Epoch: 0005 cost= 5.564581285\n",
+      "Epoch: 0006 cost= 4.165259939\n",
+      "Epoch: 0007 cost= 3.139393926\n",
       "Second Optimization Finished!\n",
       "Second Optimization Finished!\n",
-      "Accuracy: 0.9371\n"
+      "Accuracy: 0.9385\n"
      ]
      ]
     }
     }
    ],
    ],
@@ -242,9 +242,7 @@
     "collapsed": true
     "collapsed": true
    },
    },
    "outputs": [],
    "outputs": [],
-   "source": [
-    ""
-   ]
+   "source": []
   }
   }
  ],
  ],
  "metadata": {
  "metadata": {
@@ -256,16 +254,16 @@
   "language_info": {
   "language_info": {
    "codemirror_mode": {
    "codemirror_mode": {
     "name": "ipython",
     "name": "ipython",
-    "version": 2.0
+    "version": 2
    },
    },
    "file_extension": ".py",
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "mimetype": "text/x-python",
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,
  "nbformat_minor": 0
  "nbformat_minor": 0
-}
+}

+ 39 - 50
notebooks/4_Utils/tensorboard_basic.ipynb

@@ -20,33 +20,22 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
-      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
    "source": [
     "import tensorflow as tf\n",
     "import tensorflow as tf\n",
     "\n",
     "\n",
     "# Import MINST data\n",
     "# Import MINST data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
     "from tensorflow.examples.tutorials.mnist import input_data\n",
-    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 6,
    "metadata": {
    "metadata": {
     "collapsed": true
     "collapsed": true
    },
    },
@@ -72,9 +61,9 @@
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 9,
    "metadata": {
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    },
    "outputs": [],
    "outputs": [],
    "source": [
    "source": [
@@ -95,19 +84,19 @@
     "    acc = tf.reduce_mean(tf.cast(acc, tf.float32))\n",
     "    acc = tf.reduce_mean(tf.cast(acc, tf.float32))\n",
     "\n",
     "\n",
     "# Initializing the variables\n",
     "# Initializing the variables\n",
-    "init = tf.initialize_all_variables()\n",
+    "init = tf.global_variables_initializer()\n",
     "\n",
     "\n",
     "# Create a summary to monitor cost tensor\n",
     "# Create a summary to monitor cost tensor\n",
-    "tf.scalar_summary(\"loss\", cost)\n",
+    "tf.summary.scalar(\"loss\", cost)\n",
     "# Create a summary to monitor accuracy tensor\n",
     "# Create a summary to monitor accuracy tensor\n",
-    "tf.scalar_summary(\"accuracy\", acc)\n",
+    "tf.summary.scalar(\"accuracy\", acc)\n",
     "# Merge all summaries into a single op\n",
     "# Merge all summaries into a single op\n",
-    "merged_summary_op = tf.merge_all_summaries()"
+    "merged_summary_op = tf.summary.merge_all()"
    ]
    ]
   },
   },
   {
   {
    "cell_type": "code",
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 11,
    "metadata": {
    "metadata": {
     "collapsed": false
     "collapsed": false
    },
    },
@@ -116,31 +105,31 @@
      "name": "stdout",
      "name": "stdout",
      "output_type": "stream",
      "output_type": "stream",
      "text": [
      "text": [
-      "Epoch: 0001 cost= 1.182138957\n",
-      "Epoch: 0002 cost= 0.664735104\n",
-      "Epoch: 0003 cost= 0.552622685\n",
-      "Epoch: 0004 cost= 0.498596912\n",
-      "Epoch: 0005 cost= 0.465510372\n",
-      "Epoch: 0006 cost= 0.442504281\n",
-      "Epoch: 0007 cost= 0.425473650\n",
-      "Epoch: 0008 cost= 0.412175615\n",
-      "Epoch: 0009 cost= 0.401374554\n",
-      "Epoch: 0010 cost= 0.392403109\n",
-      "Epoch: 0011 cost= 0.384748503\n",
-      "Epoch: 0012 cost= 0.378154479\n",
-      "Epoch: 0013 cost= 0.372405099\n",
-      "Epoch: 0014 cost= 0.367272844\n",
-      "Epoch: 0015 cost= 0.362745077\n",
-      "Epoch: 0016 cost= 0.358575674\n",
-      "Epoch: 0017 cost= 0.354862829\n",
-      "Epoch: 0018 cost= 0.351437834\n",
-      "Epoch: 0019 cost= 0.348300697\n",
-      "Epoch: 0020 cost= 0.345401101\n",
-      "Epoch: 0021 cost= 0.342762216\n",
-      "Epoch: 0022 cost= 0.340199728\n",
-      "Epoch: 0023 cost= 0.337916089\n",
-      "Epoch: 0024 cost= 0.335764083\n",
-      "Epoch: 0025 cost= 0.333645939\n",
+      "Epoch: 0001 cost= 1.182138961\n",
+      "Epoch: 0002 cost= 0.664609327\n",
+      "Epoch: 0003 cost= 0.552565036\n",
+      "Epoch: 0004 cost= 0.498541865\n",
+      "Epoch: 0005 cost= 0.465393374\n",
+      "Epoch: 0006 cost= 0.442491178\n",
+      "Epoch: 0007 cost= 0.425474149\n",
+      "Epoch: 0008 cost= 0.412152022\n",
+      "Epoch: 0009 cost= 0.401320939\n",
+      "Epoch: 0010 cost= 0.392305281\n",
+      "Epoch: 0011 cost= 0.384732356\n",
+      "Epoch: 0012 cost= 0.378109478\n",
+      "Epoch: 0013 cost= 0.372409370\n",
+      "Epoch: 0014 cost= 0.367236996\n",
+      "Epoch: 0015 cost= 0.362727492\n",
+      "Epoch: 0016 cost= 0.358627345\n",
+      "Epoch: 0017 cost= 0.354815522\n",
+      "Epoch: 0018 cost= 0.351413656\n",
+      "Epoch: 0019 cost= 0.348314827\n",
+      "Epoch: 0020 cost= 0.345429416\n",
+      "Epoch: 0021 cost= 0.342749324\n",
+      "Epoch: 0022 cost= 0.340224642\n",
+      "Epoch: 0023 cost= 0.337897302\n",
+      "Epoch: 0024 cost= 0.335720168\n",
+      "Epoch: 0025 cost= 0.333691911\n",
       "Optimization Finished!\n",
       "Optimization Finished!\n",
       "Accuracy: 0.9143\n",
       "Accuracy: 0.9143\n",
       "Run the command line:\n",
       "Run the command line:\n",
@@ -155,7 +144,7 @@
     "    sess.run(init)\n",
     "    sess.run(init)\n",
     "\n",
     "\n",
     "    # op to write logs to Tensorboard\n",
     "    # op to write logs to Tensorboard\n",
-    "    summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())\n",
+    "    summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n",
     "\n",
     "\n",
     "    # Training cycle\n",
     "    # Training cycle\n",
     "    for epoch in range(training_epochs):\n",
     "    for epoch in range(training_epochs):\n",
@@ -234,7 +223,7 @@
  ],
  ],
  "metadata": {
  "metadata": {
   "kernelspec": {
   "kernelspec": {
-   "display_name": "IPython (Python 2.7)",
+   "display_name": "Python 2",
    "language": "python",
    "language": "python",
    "name": "python2"
    "name": "python2"
   },
   },
@@ -248,7 +237,7 @@
    "name": "python",
    "name": "python",
    "nbconvert_exporter": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
    "pygments_lexer": "ipython2",
-   "version": "2.7.11"
+   "version": "2.7.13"
   }
   }
  },
  },
  "nbformat": 4,
  "nbformat": 4,