Aymeric Damien 5 lat temu
rodzic
commit
3a767b1e71
72 zmienionych plików z 14174 dodań i 48 usunięć
  1. 81 48
      README.md
  2. 5 0
      examples/README.md
  3. 5 0
      notebooks/README.md
  4. 93 0
      tensorflow_v1/README.md
  5. 68 0
      tensorflow_v1/examples/1_Introduction/basic_eager_api.py
  6. 75 0
      tensorflow_v1/examples/1_Introduction/basic_operations.py
  7. 25 0
      tensorflow_v1/examples/1_Introduction/helloworld.py
  8. 85 0
      tensorflow_v1/examples/2_BasicModels/gradient_boosted_decision_tree.py
  9. 93 0
      tensorflow_v1/examples/2_BasicModels/kmeans.py
  10. 89 0
      tensorflow_v1/examples/2_BasicModels/linear_regression.py
  11. 69 0
      tensorflow_v1/examples/2_BasicModels/linear_regression_eager_api.py
  12. 71 0
      tensorflow_v1/examples/2_BasicModels/logistic_regression.py
  13. 105 0
      tensorflow_v1/examples/2_BasicModels/logistic_regression_eager_api.py
  14. 55 0
      tensorflow_v1/examples/2_BasicModels/nearest_neighbor.py
  15. 77 0
      tensorflow_v1/examples/2_BasicModels/random_forest.py
  16. 195 0
      tensorflow_v1/examples/2_BasicModels/word2vec.py
  17. 142 0
      tensorflow_v1/examples/3_NeuralNetworks/autoencoder.py
  18. 126 0
      tensorflow_v1/examples/3_NeuralNetworks/bidirectional_rnn.py
  19. 125 0
      tensorflow_v1/examples/3_NeuralNetworks/convolutional_network.py
  20. 141 0
      tensorflow_v1/examples/3_NeuralNetworks/convolutional_network_raw.py
  21. 167 0
      tensorflow_v1/examples/3_NeuralNetworks/dcgan.py
  22. 193 0
      tensorflow_v1/examples/3_NeuralNetworks/dynamic_rnn.py
  23. 157 0
      tensorflow_v1/examples/3_NeuralNetworks/gan.py
  24. 104 0
      tensorflow_v1/examples/3_NeuralNetworks/multilayer_perceptron.py
  25. 103 0
      tensorflow_v1/examples/3_NeuralNetworks/neural_network.py
  26. 133 0
      tensorflow_v1/examples/3_NeuralNetworks/neural_network_eager_api.py
  27. 101 0
      tensorflow_v1/examples/3_NeuralNetworks/neural_network_raw.py
  28. 115 0
      tensorflow_v1/examples/3_NeuralNetworks/recurrent_network.py
  29. 143 0
      tensorflow_v1/examples/3_NeuralNetworks/variational_autoencoder.py
  30. 140 0
      tensorflow_v1/examples/4_Utils/save_restore_model.py
  31. 143 0
      tensorflow_v1/examples/4_Utils/tensorboard_advanced.py
  32. 97 0
      tensorflow_v1/examples/4_Utils/tensorboard_basic.py
  33. 212 0
      tensorflow_v1/examples/5_DataManagement/build_an_image_dataset.py
  34. 130 0
      tensorflow_v1/examples/5_DataManagement/tensorflow_dataset_api.py
  35. 94 0
      tensorflow_v1/examples/6_MultiGPU/multigpu_basics.py
  36. 198 0
      tensorflow_v1/examples/6_MultiGPU/multigpu_cnn.py
  37. 48 0
      tensorflow_v1/notebooks/0_Prerequisite/ml_introduction.ipynb
  38. 94 0
      tensorflow_v1/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb
  39. 238 0
      tensorflow_v1/notebooks/1_Introduction/basic_eager_api.ipynb
  40. 220 0
      tensorflow_v1/notebooks/1_Introduction/basic_operations.ipynb
  41. 87 0
      tensorflow_v1/notebooks/1_Introduction/helloworld.ipynb
  42. 266 0
      tensorflow_v1/notebooks/2_BasicModels/gradient_boosted_decision_tree.ipynb
  43. 226 0
      tensorflow_v1/notebooks/2_BasicModels/kmeans.ipynb
  44. 236 0
      tensorflow_v1/notebooks/2_BasicModels/linear_regression.ipynb
  45. 181 0
      tensorflow_v1/notebooks/2_BasicModels/linear_regression_eager_api.ipynb
  46. 174 0
      tensorflow_v1/notebooks/2_BasicModels/logistic_regression.ipynb
  47. 258 0
      tensorflow_v1/notebooks/2_BasicModels/logistic_regression_eager_api.ipynb
  48. 332 0
      tensorflow_v1/notebooks/2_BasicModels/nearest_neighbor.ipynb
  49. 229 0
      tensorflow_v1/notebooks/2_BasicModels/random_forest.ipynb
  50. 724 0
      tensorflow_v1/notebooks/2_BasicModels/word2vec.ipynb
  51. 310 0
      tensorflow_v1/notebooks/3_NeuralNetworks/autoencoder.ipynb
  52. 301 0
      tensorflow_v1/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb
  53. 423 0
      tensorflow_v1/notebooks/3_NeuralNetworks/convolutional_network.ipynb
  54. 303 0
      tensorflow_v1/notebooks/3_NeuralNetworks/convolutional_network_raw.ipynb
  55. 333 0
      tensorflow_v1/notebooks/3_NeuralNetworks/dcgan.ipynb
  56. 352 0
      tensorflow_v1/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb
  57. 323 0
      tensorflow_v1/notebooks/3_NeuralNetworks/gan.ipynb
  58. 390 0
      tensorflow_v1/notebooks/3_NeuralNetworks/neural_network.ipynb
  59. 287 0
      tensorflow_v1/notebooks/3_NeuralNetworks/neural_network_eager_api.ipynb
  60. 224 0
      tensorflow_v1/notebooks/3_NeuralNetworks/neural_network_raw.ipynb
  61. 292 0
      tensorflow_v1/notebooks/3_NeuralNetworks/recurrent_network.ipynb
  62. 316 0
      tensorflow_v1/notebooks/3_NeuralNetworks/variational_autoencoder.ipynb
  63. 252 0
      tensorflow_v1/notebooks/4_Utils/save_restore_model.ipynb
  64. 307 0
      tensorflow_v1/notebooks/4_Utils/tensorboard_advanced.ipynb
  65. 217 0
      tensorflow_v1/notebooks/4_Utils/tensorboard_basic.ipynb
  66. 291 0
      tensorflow_v1/notebooks/5_DataManagement/build_an_image_dataset.ipynb
  67. 418 0
      tensorflow_v1/notebooks/5_DataManagement/image_transformation.ipynb
  68. 577 0
      tensorflow_v1/notebooks/5_DataManagement/load_data.ipynb
  69. 222 0
      tensorflow_v1/notebooks/5_DataManagement/tensorflow_dataset_api.ipynb
  70. 261 0
      tensorflow_v1/notebooks/5_DataManagement/tfrecords.ipynb
  71. 179 0
      tensorflow_v1/notebooks/6_MultiGPU/multigpu_basics.ipynb
  72. 328 0
      tensorflow_v1/notebooks/6_MultiGPU/multigpu_cnn.ipynb

+ 81 - 48
README.md

@@ -4,69 +4,51 @@ This tutorial was designed for easily diving into TensorFlow, through examples.
 
 It is suitable for beginners who want to find clear and concise examples about TensorFlow. Besides the traditional 'raw' TensorFlow implementations, you can also find the latest TensorFlow API practices (such as `layers`, `estimator`, `dataset`, ...).
 
-**Update (08/17/2019):** Added new [TensorFlow 2.0 examples](tensorflow_v2)! (more coming soon).
-
-*If you are using older TensorFlow version (0.11 and under), please take a [look here](https://github.com/aymericdamien/TensorFlow-Examples/tree/0.11).*
+**Update (05/16/2020):** Moving all default examples to TF2. For TF v1 examples: [check here](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v1).
 
 ## Tutorial index
 
 #### 0 - Prerequisite
-- [Introduction to Machine Learning](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/ml_introduction.ipynb).
-- [Introduction to MNIST Dataset](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb).
+- [Introduction to Machine Learning](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/0_Prerequisite/ml_introduction.ipynb).
+- [Introduction to MNIST Dataset](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb).
 
 #### 1 - Introduction
-- **Hello World** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/helloworld.py)). Very simple example to learn how to print "hello world" using TensorFlow.
-- **Basic Operations** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_operations.py)). A simple example that cover TensorFlow basic operations.
-- **TensorFlow Eager API basics** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_eager_api.py)). Get started with TensorFlow's Eager API.
+- **Hello World** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/1_Introduction/helloworld.ipynb)). Very simple example to learn how to print "hello world" using TensorFlow 2.0.
+- **Basic Operations** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/1_Introduction/basic_operations.ipynb)). A simple example that cover TensorFlow 2.0 basic operations.
 
 #### 2 - Basic Models
-- **Linear Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py)). Implement a Linear Regression with TensorFlow.
-- **Linear Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression_eager_api.py)). Implement a Linear Regression using TensorFlow's Eager API.
-- **Logistic Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/logistic_regression.py)). Implement a Logistic Regression with TensorFlow.
-- **Logistic Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/logistic_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/logistic_regression_eager_api.py)). Implement a Logistic Regression using TensorFlow's Eager API.
-- **Nearest Neighbor** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/nearest_neighbor.py)). Implement Nearest Neighbor algorithm with TensorFlow.
-- **K-Means** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/kmeans.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/kmeans.py)). Build a K-Means classifier with TensorFlow.
-- **Random Forest** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/random_forest.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/random_forest.py)). Build a Random Forest classifier with TensorFlow.
-- **Gradient Boosted Decision Tree (GBDT)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/gradient_boosted_decision_tree.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/gradient_boosted_decision_tree.py)). Build a Gradient Boosted Decision Tree (GBDT) with TensorFlow.
-- **Word2Vec (Word Embedding)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/word2vec.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/word2vec.py)). Build a Word Embedding Model (Word2Vec) from Wikipedia data, with TensorFlow.
+- **Linear Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb)). Implement a Linear Regression with TensorFlow 2.0.
+- **Logistic Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/2_BasicModels/logistic_regression.ipynb)). Implement a Logistic Regression with TensorFlow 2.0.
+- **Word2Vec (Word Embedding)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/2_BasicModels/word2vec.ipynb)). Build a Word Embedding Model (Word2Vec) from Wikipedia data, with TensorFlow 2.0.
 
 #### 3 - Neural Networks
 ##### Supervised
 
-- **Simple Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/neural_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/neural_network_raw.py)). Build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset. Raw TensorFlow implementation.
-- **Simple Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/neural_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/neural_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
-- **Simple Neural Network (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/neural_network_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/neural_network_eager_api.py)). Use TensorFlow Eager API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
-- **Convolutional Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/convolutional_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network_raw.py)). Build a convolutional neural network to classify MNIST digits dataset. Raw TensorFlow implementation.
-- **Convolutional Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a convolutional neural network to classify MNIST digits dataset.
-- **Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/recurrent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py)). Build a recurrent neural network (LSTM) to classify MNIST digits dataset.
-- **Bi-directional Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py)). Build a bi-directional recurrent neural network (LSTM) to classify MNIST digits dataset.
-- **Dynamic Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/dynamic_rnn.py)). Build a recurrent neural network (LSTM) that performs dynamic calculation to classify sequences of different length.
+- **Simple Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network.ipynb)). Use TensorFlow 2.0 'layers' and 'model' API to build a simple neural network to classify MNIST digits dataset.
+- **Simple Neural Network (low-level)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/neural_network_raw.ipynb)). Raw implementation of a simple neural network to classify MNIST digits dataset.
+- **Convolutional Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network.ipynb)). Use TensorFlow 2.0 'layers' and 'model' API to build a convolutional neural network to classify MNIST digits dataset.
+- **Convolutional Neural Network (low-level)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/convolutional_network_raw.ipynb)). Raw implementation of a convolutional neural network to classify MNIST digits dataset.
+- **Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/recurrent_network.ipynb)). Build a recurrent neural network (LSTM) to classify MNIST digits dataset, using TensorFlow 2.0 'layers' and 'model' API.
+- **Bi-directional Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb)). Build a bi-directional recurrent neural network (LSTM) to classify MNIST digits dataset, using TensorFlow 2.0 'layers' and 'model' API.
+- **Dynamic Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb)). Build a recurrent neural network (LSTM) that performs dynamic calculation to classify sequences of variable length, using TensorFlow 2.0 'layers' and 'model' API.
 
 ##### Unsupervised
-- **Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py)). Build an auto-encoder to encode an image to a lower dimension and re-construct it.
-- **Variational Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/variational_autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/variational_autoencoder.py)). Build a variational auto-encoder (VAE), to encode and generate images from noise.
-- **GAN (Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/gan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/gan.py)). Build a Generative Adversarial Network (GAN) to generate images from noise.
-- **DCGAN (Deep Convolutional Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/dcgan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/dcgan.py)). Build a Deep Convolutional Generative Adversarial Network (DCGAN) to generate images from noise.
+- **Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/autoencoder.ipynb)). Build an auto-encoder to encode an image to a lower dimension and re-construct it.
+- **DCGAN (Deep Convolutional Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/3_NeuralNetworks/dcgan.ipynb)). Build a Deep Convolutional Generative Adversarial Network (DCGAN) to generate images from noise.
 
 #### 4 - Utilities
-- **Save and Restore a model** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/save_restore_model.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/save_restore_model.py)). Save and Restore a model with TensorFlow.
-- **Tensorboard - Graph and loss visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/tensorboard_basic.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/tensorboard_basic.py)). Use Tensorboard to visualize the computation Graph and plot the loss.
-- **Tensorboard - Advanced visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/tensorboard_advanced.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/tensorboard_advanced.py)). Going deeper into Tensorboard; visualize the variables, gradients, and more...
+- **Save and Restore a model** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/4_Utils/save_restore_model.ipynb)). Save and Restore a model with TensorFlow 2.0.
+- **Build Custom Layers & Modules** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/4_Utils/build_custom_layers.ipynb)). Learn how to build your own layers / modules and integrate them into TensorFlow 2.0 Models.
 
 #### 5 - Data Management
-- **Build an image dataset** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/build_an_image_dataset.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5_DataManagement/build_an_image_dataset.py)). Build your own images dataset with TensorFlow data queues, from image folders or a dataset file.
-- **TensorFlow Dataset API** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/tensorflow_dataset_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5_DataManagement/tensorflow_dataset_api.py)). Introducing TensorFlow Dataset API for optimizing the input data pipeline.
-- **Load and Parse data** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/load_data.ipynb)). Build efficient data pipeline (Numpy arrays, Images, CSV files, custom data, ...).
-- **Build and Load TFRecords** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/tfrecords.ipynb)). Convert data into TFRecords format, and load them.
-- **Image Transformation (i.e. Image Augmentation)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/image_transformation.ipynb)). Apply various image augmentation techniques, to generate distorted images for training.
+- **Load and Parse data** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/5_DataManagement/load_data.ipynb)). Build efficient data pipeline with TensorFlow 2.0 (Numpy arrays, Images, CSV files, custom data, ...).
+- **Build and Load TFRecords** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/5_DataManagement/tfrecords.ipynb)). Convert data into TFRecords format, and load them with TensorFlow 2.0.
+- **Image Transformation (i.e. Image Augmentation)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/5_DataManagement/image_transformation.ipynb)). Apply various image augmentation techniques with TensorFlow 2.0, to generate distorted images for training.
 
-#### 6 - Multi GPU
-- **Basic Operations on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/6_MultiGPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/6_MultiGPU/multigpu_basics.py)). A simple example to introduce multi-GPU in TensorFlow.
-- **Train a Neural Network on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/6_MultiGPU/multigpu_cnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/6_MultiGPU/multigpu_cnn.py)). A clear and simple TensorFlow implementation to train a convolutional neural network on multiple GPUs.
 
-## TensorFlow 2.0
+## TensorFlow v1
 
-The tutorial index for TF v2 is available here: [TensorFlow 2.0 Examples](tensorflow_v2).
+The tutorial index for TF v1 is available here: [TensorFlow v1.15 Examples](tensorflow_v1). Or see below for a list of the examples.
 
 ## Dataset
 Some examples require MNIST dataset for training and testing. Don't worry, this dataset will automatically be downloaded when running examples.
@@ -93,11 +75,62 @@ pip install tensorflow_gpu
 
 For more details about TensorFlow installation, you can check [TensorFlow Installation Guide](https://www.tensorflow.org/install/)
 
-## More Examples
-The following examples are coming from [TFLearn](https://github.com/tflearn/tflearn), a library that provides a simplified interface for TensorFlow. You can have a look, there are many [examples](https://github.com/tflearn/tflearn/tree/master/examples) and [pre-built operations and layers](http://tflearn.org/doc_index/#api).
 
-### Tutorials
-- [TFLearn Quickstart](https://github.com/tflearn/tflearn/blob/master/tutorials/intro/quickstart.md). Learn the basics of TFLearn through a concrete machine learning task. Build and train a deep neural network classifier.
+## TensorFlow v1 Examples - Index
+
+The tutorial index for TF v1 is available here: [TensorFlow v1.15 Examples](tensorflow_v1).
+
+#### 0 - Prerequisite
+- [Introduction to Machine Learning](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/0_Prerequisite/ml_introduction.ipynb).
+- [Introduction to MNIST Dataset](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/0_Prerequisite/mnist_dataset_intro.ipynb).
+
+#### 1 - Introduction
+- **Hello World** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/helloworld.py)). Very simple example to learn how to print "hello world" using TensorFlow.
+- **Basic Operations** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/basic_operations.py)). A simple example that cover TensorFlow basic operations.
+- **TensorFlow Eager API basics** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/basic_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/basic_eager_api.py)). Get started with TensorFlow's Eager API.
+
+#### 2 - Basic Models
+- **Linear Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/linear_regression.py)). Implement a Linear Regression with TensorFlow.
+- **Linear Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/linear_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/linear_regression_eager_api.py)). Implement a Linear Regression using TensorFlow's Eager API.
+- **Logistic Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/logistic_regression.py)). Implement a Logistic Regression with TensorFlow.
+- **Logistic Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/logistic_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/logistic_regression_eager_api.py)). Implement a Logistic Regression using TensorFlow's Eager API.
+- **Nearest Neighbor** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/nearest_neighbor.py)). Implement Nearest Neighbor algorithm with TensorFlow.
+- **K-Means** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/kmeans.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/kmeans.py)). Build a K-Means classifier with TensorFlow.
+- **Random Forest** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/random_forest.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/random_forest.py)). Build a Random Forest classifier with TensorFlow.
+- **Gradient Boosted Decision Tree (GBDT)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/gradient_boosted_decision_tree.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/gradient_boosted_decision_tree.py)). Build a Gradient Boosted Decision Tree (GBDT) with TensorFlow.
+- **Word2Vec (Word Embedding)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/word2vec.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/word2vec.py)). Build a Word Embedding Model (Word2Vec) from Wikipedia data, with TensorFlow.
+
+#### 3 - Neural Networks
+##### Supervised
+
+- **Simple Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network_raw.py)). Build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset. Raw TensorFlow implementation.
+- **Simple Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
+- **Simple Neural Network (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network_eager_api.py)). Use TensorFlow Eager API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
+- **Convolutional Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/convolutional_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/convolutional_network_raw.py)). Build a convolutional neural network to classify MNIST digits dataset. Raw TensorFlow implementation.
+- **Convolutional Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/convolutional_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a convolutional neural network to classify MNIST digits dataset.
+- **Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/recurrent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/recurrent_network.py)). Build a recurrent neural network (LSTM) to classify MNIST digits dataset.
+- **Bi-directional Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/bidirectional_rnn.py)). Build a bi-directional recurrent neural network (LSTM) to classify MNIST digits dataset.
+- **Dynamic Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/dynamic_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/dynamic_rnn.py)). Build a recurrent neural network (LSTM) that performs dynamic calculation to classify sequences of different length.
+
+##### Unsupervised
+- **Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/autoencoder.py)). Build an auto-encoder to encode an image to a lower dimension and re-construct it.
+- **Variational Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/variational_autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/variational_autoencoder.py)). Build a variational auto-encoder (VAE), to encode and generate images from noise.
+- **GAN (Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/gan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/gan.py)). Build a Generative Adversarial Network (GAN) to generate images from noise.
+- **DCGAN (Deep Convolutional Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/dcgan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/dcgan.py)). Build a Deep Convolutional Generative Adversarial Network (DCGAN) to generate images from noise.
+
+#### 4 - Utilities
+- **Save and Restore a model** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/save_restore_model.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/save_restore_model.py)). Save and Restore a model with TensorFlow.
+- **Tensorboard - Graph and loss visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/tensorboard_basic.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/tensorboard_basic.py)). Use Tensorboard to visualize the computation Graph and plot the loss.
+- **Tensorboard - Advanced visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/tensorboard_advanced.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/tensorboard_advanced.py)). Going deeper into Tensorboard; visualize the variables, gradients, and more...
+
+#### 5 - Data Management
+- **Build an image dataset** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/build_an_image_dataset.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/5_DataManagement/build_an_image_dataset.py)). Build your own images dataset with TensorFlow data queues, from image folders or a dataset file.
+- **TensorFlow Dataset API** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/tensorflow_dataset_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/5_DataManagement/tensorflow_dataset_api.py)). Introducing TensorFlow Dataset API for optimizing the input data pipeline.
+- **Load and Parse data** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/load_data.ipynb)). Build efficient data pipeline (Numpy arrays, Images, CSV files, custom data, ...).
+- **Build and Load TFRecords** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/tfrecords.ipynb)). Convert data into TFRecords format, and load them.
+- **Image Transformation (i.e. Image Augmentation)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/image_transformation.ipynb)). Apply various image augmentation techniques, to generate distorted images for training.
+
+#### 6 - Multi GPU
+- **Basic Operations on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/6_MultiGPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/6_MultiGPU/multigpu_basics.py)). A simple example to introduce multi-GPU in TensorFlow.
+- **Train a Neural Network on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/6_MultiGPU/multigpu_cnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/6_MultiGPU/multigpu_cnn.py)). A clear and simple TensorFlow implementation to train a convolutional neural network on multiple GPUs.
 
-### Examples
-- [TFLearn Examples](https://github.com/tflearn/tflearn/blob/master/examples). A large collection of examples using TFLearn.

+ 5 - 0
examples/README.md

@@ -0,0 +1,5 @@
+## Deprecated - Please Read
+
+Due to TensorFlow radically changing their API in v2, the examples index have been split between [v1](../tensorflow_v1) and [v2](../tensorflow_v2).
+
+The following examples are the original TF v1 examples, and will be deprecated entirely in favor of [tensorflow_v1](../tensorflow_v1) directory in a future release.

+ 5 - 0
notebooks/README.md

@@ -0,0 +1,5 @@
+## Deprecated - Please Read
+
+Due to TensorFlow radically changing their API in v2, the examples index have been split between [v1](../tensorflow_v1) and [v2](../tensorflow_v2).
+
+The following examples are the original TF v1 examples, and will be deprecated entirely in favor of [tensorflow_v1](../tensorflow_v1) directory in a future release.

+ 93 - 0
tensorflow_v1/README.md

@@ -0,0 +1,93 @@
+# TensorFlow v1 Examples
+
+All the following examples are the original TF v1 examples.
+
+*If you are using older TensorFlow version (0.11 and under), please take a [look here](https://github.com/aymericdamien/TensorFlow-Examples/tree/0.11).*
+
+#### 0 - Prerequisite
+- [Introduction to Machine Learning](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/0_Prerequisite/ml_introduction.ipynb).
+- [Introduction to MNIST Dataset](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/0_Prerequisite/mnist_dataset_intro.ipynb).
+
+#### 1 - Introduction
+- **Hello World** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/helloworld.py)). Very simple example to learn how to print "hello world" using TensorFlow.
+- **Basic Operations** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/basic_operations.py)). A simple example that cover TensorFlow basic operations.
+- **TensorFlow Eager API basics** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/1_Introduction/basic_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/1_Introduction/basic_eager_api.py)). Get started with TensorFlow's Eager API.
+
+#### 2 - Basic Models
+- **Linear Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/linear_regression.py)). Implement a Linear Regression with TensorFlow.
+- **Linear Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/linear_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/linear_regression_eager_api.py)). Implement a Linear Regression using TensorFlow's Eager API.
+- **Logistic Regression** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/logistic_regression.py)). Implement a Logistic Regression with TensorFlow.
+- **Logistic Regression (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/logistic_regression_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/logistic_regression_eager_api.py)). Implement a Logistic Regression using TensorFlow's Eager API.
+- **Nearest Neighbor** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/nearest_neighbor.py)). Implement Nearest Neighbor algorithm with TensorFlow.
+- **K-Means** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/kmeans.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/kmeans.py)). Build a K-Means classifier with TensorFlow.
+- **Random Forest** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/random_forest.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/random_forest.py)). Build a Random Forest classifier with TensorFlow.
+- **Gradient Boosted Decision Tree (GBDT)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/gradient_boosted_decision_tree.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/gradient_boosted_decision_tree.py)). Build a Gradient Boosted Decision Tree (GBDT) with TensorFlow.
+- **Word2Vec (Word Embedding)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/2_BasicModels/word2vec.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/2_BasicModels/word2vec.py)). Build a Word Embedding Model (Word2Vec) from Wikipedia data, with TensorFlow.
+
+#### 3 - Neural Networks
+##### Supervised
+
+- **Simple Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network_raw.py)). Build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset. Raw TensorFlow implementation.
+- **Simple Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
+- **Simple Neural Network (eager api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/neural_network_eager_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/neural_network_eager_api.py)). Use TensorFlow Eager API to build a simple neural network (a.k.a Multi-layer Perceptron) to classify MNIST digits dataset.
+- **Convolutional Neural Network** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/convolutional_network_raw.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/convolutional_network_raw.py)). Build a convolutional neural network to classify MNIST digits dataset. Raw TensorFlow implementation.
+- **Convolutional Neural Network (tf.layers/estimator api)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/convolutional_network.py)). Use TensorFlow 'layers' and 'estimator' API to build a convolutional neural network to classify MNIST digits dataset.
+- **Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/recurrent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/recurrent_network.py)). Build a recurrent neural network (LSTM) to classify MNIST digits dataset.
+- **Bi-directional Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/bidirectional_rnn.py)). Build a bi-directional recurrent neural network (LSTM) to classify MNIST digits dataset.
+- **Dynamic Recurrent Neural Network (LSTM)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/dynamic_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/dynamic_rnn.py)). Build a recurrent neural network (LSTM) that performs dynamic calculation to classify sequences of different length.
+
+##### Unsupervised
+- **Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/autoencoder.py)). Build an auto-encoder to encode an image to a lower dimension and re-construct it.
+- **Variational Auto-Encoder** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/variational_autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/variational_autoencoder.py)). Build a variational auto-encoder (VAE), to encode and generate images from noise.
+- **GAN (Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/gan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/gan.py)). Build a Generative Adversarial Network (GAN) to generate images from noise.
+- **DCGAN (Deep Convolutional Generative Adversarial Networks)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/3_NeuralNetworks/dcgan.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/3_NeuralNetworks/dcgan.py)). Build a Deep Convolutional Generative Adversarial Network (DCGAN) to generate images from noise.
+
+#### 4 - Utilities
+- **Save and Restore a model** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/save_restore_model.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/save_restore_model.py)). Save and Restore a model with TensorFlow.
+- **Tensorboard - Graph and loss visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/tensorboard_basic.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/tensorboard_basic.py)). Use Tensorboard to visualize the computation Graph and plot the loss.
+- **Tensorboard - Advanced visualization** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/4_Utils/tensorboard_advanced.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/4_Utils/tensorboard_advanced.py)). Going deeper into Tensorboard; visualize the variables, gradients, and more...
+
+#### 5 - Data Management
+- **Build an image dataset** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/build_an_image_dataset.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/5_DataManagement/build_an_image_dataset.py)). Build your own images dataset with TensorFlow data queues, from image folders or a dataset file.
+- **TensorFlow Dataset API** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/tensorflow_dataset_api.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/5_DataManagement/tensorflow_dataset_api.py)). Introducing TensorFlow Dataset API for optimizing the input data pipeline.
+- **Load and Parse data** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/load_data.ipynb)). Build efficient data pipeline (Numpy arrays, Images, CSV files, custom data, ...).
+- **Build and Load TFRecords** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/tfrecords.ipynb)). Convert data into TFRecords format, and load them.
+- **Image Transformation (i.e. Image Augmentation)** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/5_DataManagement/image_transformation.ipynb)). Apply various image augmentation techniques, to generate distorted images for training.
+
+#### 6 - Multi GPU
+- **Basic Operations on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/6_MultiGPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/6_MultiGPU/multigpu_basics.py)). A simple example to introduce multi-GPU in TensorFlow.
+- **Train a Neural Network on multi-GPU** ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/tensorflow_v1/6_MultiGPU/multigpu_cnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/tensorflow_v1/6_MultiGPU/multigpu_cnn.py)). A clear and simple TensorFlow implementation to train a convolutional neural network on multiple GPUs.
+
+## Installation
+
+To download all the examples, simply clone this repository:
+```
+git clone https://github.com/aymericdamien/TensorFlow-Examples
+```
+
+To run them, you also need the latest version of TensorFlow. To install it:
+```
+pip install tensorflow==1.15.0
+```
+
+or (with GPU support):
+```
+pip install tensorflow_gpu==1.15.0
+```
+
+For more details about TensorFlow installation, you can check [TensorFlow Installation Guide](https://www.tensorflow.org/install/)
+
+## Dataset
+Some examples require MNIST dataset for training and testing. Don't worry, this dataset will automatically be downloaded when running examples.
+MNIST is a database of handwritten digits, for a quick description of that dataset, you can check [this notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb).
+
+Official Website: [http://yann.lecun.com/exdb/mnist/](http://yann.lecun.com/exdb/mnist/).
+
+## More Examples
+The following examples are coming from [TFLearn](https://github.com/tflearn/tflearn), a library that provides a simplified interface for TensorFlow. You can have a look, there are many [examples](https://github.com/tflearn/tflearn/tree/master/examples) and [pre-built operations and layers](http://tflearn.org/doc_index/#api).
+
+### Tutorials
+- [TFLearn Quickstart](https://github.com/tflearn/tflearn/blob/master/tutorials/intro/quickstart.md). Learn the basics of TFLearn through a concrete machine learning task. Build and train a deep neural network classifier.
+
+### Examples
+- [TFLearn Examples](https://github.com/tflearn/tflearn/blob/master/examples). A large collection of examples using TFLearn.

+ 68 - 0
tensorflow_v1/examples/1_Introduction/basic_eager_api.py

@@ -0,0 +1,68 @@
+'''
+Basic introduction to TensorFlow's Eager API.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+
+What is Eager API?
+" Eager execution is an imperative, define-by-run interface where operations are
+executed immediately as they are called from Python. This makes it easier to
+get started with TensorFlow, and can make research and development more
+intuitive. A vast majority of the TensorFlow API remains the same whether eager
+execution is enabled or not. As a result, the exact same code that constructs
+TensorFlow graphs (e.g. using the layers API) can be executed imperatively
+by using eager execution. Conversely, most models written with Eager enabled
+can be converted to a graph that can be further optimized and/or extracted
+for deployment in production without changing code. " - Rajat Monga
+
+'''
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
+# Set Eager API
+print("Setting Eager mode...")
+tfe.enable_eager_execution()
+
+# Define constant tensors
+print("Define constant tensors")
+a = tf.constant(2)
+print("a = %i" % a)
+b = tf.constant(3)
+print("b = %i" % b)
+
+# Run the operation without the need for tf.Session
+print("Running operations, without tf.Session")
+c = a + b
+print("a + b = %i" % c)
+d = a * b
+print("a * b = %i" % d)
+
+
+# Full compatibility with Numpy
+print("Mixing operations with Tensors and Numpy Arrays")
+
+# Define constant tensors
+a = tf.constant([[2., 1.],
+                 [1., 0.]], dtype=tf.float32)
+print("Tensor:\n a = %s" % a)
+b = np.array([[3., 0.],
+              [5., 1.]], dtype=np.float32)
+print("NumpyArray:\n b = %s" % b)
+
+# Run the operation without the need for tf.Session
+print("Running operations, without tf.Session")
+
+c = a + b
+print("a + b = %s" % c)
+
+d = tf.matmul(a, b)
+print("a * b = %s" % d)
+
+print("Iterate through Tensor 'a':")
+for i in range(a.shape[0]):
+    for j in range(a.shape[1]):
+        print(a[i][j])
+

+ 75 - 0
tensorflow_v1/examples/1_Introduction/basic_operations.py

@@ -0,0 +1,75 @@
+'''
+Basic Operations example using TensorFlow library.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Basic constant operations
+# The value returned by the constructor represents the output
+# of the Constant op.
+a = tf.constant(2)
+b = tf.constant(3)
+
+# Launch the default graph.
+with tf.Session() as sess:
+    print("a=2, b=3")
+    print("Addition with constants: %i" % sess.run(a+b))
+    print("Multiplication with constants: %i" % sess.run(a*b))
+
+# Basic Operations with variable as graph input
+# The value returned by the constructor represents the output
+# of the Variable op. (define as input when running session)
+# tf Graph input
+a = tf.placeholder(tf.int16)
+b = tf.placeholder(tf.int16)
+
+# Define some operations
+add = tf.add(a, b)
+mul = tf.multiply(a, b)
+
+# Launch the default graph.
+with tf.Session() as sess:
+    # Run every operation with variable input
+    print("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3}))
+    print("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3}))
+
+
+# ----------------
+# More in details:
+# Matrix Multiplication from TensorFlow official tutorial
+
+# Create a Constant op that produces a 1x2 matrix.  The op is
+# added as a node to the default graph.
+#
+# The value returned by the constructor represents the output
+# of the Constant op.
+matrix1 = tf.constant([[3., 3.]])
+
+# Create another Constant that produces a 2x1 matrix.
+matrix2 = tf.constant([[2.],[2.]])
+
+# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
+# The returned value, 'product', represents the result of the matrix
+# multiplication.
+product = tf.matmul(matrix1, matrix2)
+
+# To run the matmul op we call the session 'run()' method, passing 'product'
+# which represents the output of the matmul op.  This indicates to the call
+# that we want to get the output of the matmul op back.
+#
+# All inputs needed by the op are run automatically by the session.  They
+# typically are run in parallel.
+#
+# The call 'run(product)' thus causes the execution of threes ops in the
+# graph: the two constants and matmul.
+#
+# The output of the op is returned in 'result' as a numpy `ndarray` object.
+with tf.Session() as sess:
+    result = sess.run(product)
+    print(result)
+    # ==> [[ 12.]]

+ 25 - 0
tensorflow_v1/examples/1_Introduction/helloworld.py

@@ -0,0 +1,25 @@
+'''
+HelloWorld example using TensorFlow library.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Simple hello world using TensorFlow
+
+# Create a Constant op
+# The op is added as a node to the default graph.
+#
+# The value returned by the constructor represents the output
+# of the Constant op.
+hello = tf.constant('Hello, TensorFlow!')
+
+# Start tf session
+sess = tf.Session()
+
+# Run the op
+print(sess.run(hello))

+ 85 - 0
tensorflow_v1/examples/2_BasicModels/gradient_boosted_decision_tree.py

@@ -0,0 +1,85 @@
+""" Gradient Boosted Decision Tree (GBDT).
+
+Implement a Gradient Boosted Decision tree with TensorFlow to classify
+handwritten digit images. This example is using the MNIST database of
+handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/).
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
+from tensorflow.contrib.boosted_trees.proto import learner_pb2 as gbdt_learner
+
+# Ignore all GPUs (current TF GBDT does not support GPU).
+import os
+os.environ["CUDA_VISIBLE_DEVICES"] = ""
+
+# Import MNIST data
+# Set verbosity to display errors only (Remove this line for showing warnings)
+tf.logging.set_verbosity(tf.logging.ERROR)
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False,
+                                  source_url='http://yann.lecun.com/exdb/mnist/')
+
+# Parameters
+batch_size = 4096 # The number of samples per batch
+num_classes = 10 # The 10 digits
+num_features = 784 # Each image is 28x28 pixels
+max_steps = 10000
+
+# GBDT Parameters
+learning_rate = 0.1
+l1_regul = 0.
+l2_regul = 1.
+examples_per_layer = 1000
+num_trees = 10
+max_depth = 16
+
+# Fill GBDT parameters into the config proto
+learner_config = gbdt_learner.LearnerConfig()
+learner_config.learning_rate_tuner.fixed.learning_rate = learning_rate
+learner_config.regularization.l1 = l1_regul
+learner_config.regularization.l2 = l2_regul / examples_per_layer
+learner_config.constraints.max_tree_depth = max_depth
+growing_mode = gbdt_learner.LearnerConfig.LAYER_BY_LAYER
+learner_config.growing_mode = growing_mode
+run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
+learner_config.multi_class_strategy = (
+    gbdt_learner.LearnerConfig.DIAGONAL_HESSIAN)\
+
+# Create a TensorFlor GBDT Estimator
+gbdt_model = GradientBoostedDecisionTreeClassifier(
+    model_dir=None, # No save directory specified
+    learner_config=learner_config,
+    n_classes=num_classes,
+    examples_per_layer=examples_per_layer,
+    num_trees=num_trees,
+    center_bias=False,
+    config=run_config)
+
+# Display TF info logs
+tf.logging.set_verbosity(tf.logging.INFO)
+
+# Define the input function for training
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.train.images}, y=mnist.train.labels,
+    batch_size=batch_size, num_epochs=None, shuffle=True)
+# Train the Model
+gbdt_model.fit(input_fn=input_fn, max_steps=max_steps)
+
+# Evaluate the Model
+# Define the input function for evaluating
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.test.images}, y=mnist.test.labels,
+    batch_size=batch_size, shuffle=False)
+# Use the Estimator 'evaluate' method
+e = gbdt_model.evaluate(input_fn=input_fn)
+
+print("Testing Accuracy:", e['accuracy'])

+ 93 - 0
tensorflow_v1/examples/2_BasicModels/kmeans.py

@@ -0,0 +1,93 @@
+""" K-Means.
+
+Implement K-Means algorithm with TensorFlow, and apply it to classify
+handwritten digit images. This example is using the MNIST database of
+handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/).
+
+Note: This example requires TensorFlow v1.1.0 or over.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.contrib.factorization import KMeans
+
+# Ignore all GPUs, tf k-means does not benefit from it.
+import os
+os.environ["CUDA_VISIBLE_DEVICES"] = ""
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+full_data_x = mnist.train.images
+
+# Parameters
+num_steps = 50 # Total steps to train
+batch_size = 1024 # The number of samples per batch
+k = 25 # The number of clusters
+num_classes = 10 # The 10 digits
+num_features = 784 # Each image is 28x28 pixels
+
+# Input images
+X = tf.placeholder(tf.float32, shape=[None, num_features])
+# Labels (for assigning a label to a centroid and testing)
+Y = tf.placeholder(tf.float32, shape=[None, num_classes])
+
+# K-Means Parameters
+kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine',
+                use_mini_batch=True)
+
+# Build KMeans graph
+training_graph = kmeans.training_graph()
+
+if len(training_graph) > 6: # Tensorflow 1.4+
+    (all_scores, cluster_idx, scores, cluster_centers_initialized,
+     cluster_centers_var, init_op, train_op) = training_graph
+else:
+    (all_scores, cluster_idx, scores, cluster_centers_initialized,
+     init_op, train_op) = training_graph
+
+cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
+avg_distance = tf.reduce_mean(scores)
+
+# Initialize the variables (i.e. assign their default value)
+init_vars = tf.global_variables_initializer()
+
+# Start TensorFlow session
+sess = tf.Session()
+
+# Run the initializer
+sess.run(init_vars, feed_dict={X: full_data_x})
+sess.run(init_op, feed_dict={X: full_data_x})
+
+# Training
+for i in range(1, num_steps + 1):
+    _, d, idx = sess.run([train_op, avg_distance, cluster_idx],
+                         feed_dict={X: full_data_x})
+    if i % 10 == 0 or i == 1:
+        print("Step %i, Avg Distance: %f" % (i, d))
+
+# Assign a label to each centroid
+# Count total number of labels per centroid, using the label of each training
+# sample to their closest centroid (given by 'idx')
+counts = np.zeros(shape=(k, num_classes))
+for i in range(len(idx)):
+    counts[idx[i]] += mnist.train.labels[i]
+# Assign the most frequent label to the centroid
+labels_map = [np.argmax(c) for c in counts]
+labels_map = tf.convert_to_tensor(labels_map)
+
+# Evaluation ops
+# Lookup: centroid_id -> label
+cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
+# Compute accuracy
+correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32))
+accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+
+# Test Model
+test_x, test_y = mnist.test.images, mnist.test.labels
+print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))

+ 89 - 0
tensorflow_v1/examples/2_BasicModels/linear_regression.py

@@ -0,0 +1,89 @@
+'''
+A linear regression learning algorithm example using TensorFlow library.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+import numpy
+import matplotlib.pyplot as plt
+rng = numpy.random
+
+# Parameters
+learning_rate = 0.01
+training_epochs = 1000
+display_step = 50
+
+# Training Data
+train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
+                         7.042,10.791,5.313,7.997,5.654,9.27,3.1])
+train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
+                         2.827,3.465,1.65,2.904,2.42,2.94,1.3])
+n_samples = train_X.shape[0]
+
+# tf Graph Input
+X = tf.placeholder("float")
+Y = tf.placeholder("float")
+
+# Set model weights
+W = tf.Variable(rng.randn(), name="weight")
+b = tf.Variable(rng.randn(), name="bias")
+
+# Construct a linear model
+pred = tf.add(tf.multiply(X, W), b)
+
+# Mean squared error
+cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
+# Gradient descent
+#  Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Fit all training data
+    for epoch in range(training_epochs):
+        for (x, y) in zip(train_X, train_Y):
+            sess.run(optimizer, feed_dict={X: x, Y: y})
+
+        # Display logs per epoch step
+        if (epoch+1) % display_step == 0:
+            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
+            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
+                "W=", sess.run(W), "b=", sess.run(b))
+
+    print("Optimization Finished!")
+    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
+    print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
+
+    # Graphic display
+    plt.plot(train_X, train_Y, 'ro', label='Original data')
+    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
+    plt.legend()
+    plt.show()
+
+    # Testing example, as requested (Issue #2)
+    test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
+    test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
+
+    print("Testing... (Mean square loss Comparison)")
+    testing_cost = sess.run(
+        tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
+        feed_dict={X: test_X, Y: test_Y})  # same function as cost above
+    print("Testing cost=", testing_cost)
+    print("Absolute mean square loss difference:", abs(
+        training_cost - testing_cost))
+
+    plt.plot(test_X, test_Y, 'bo', label='Testing data')
+    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
+    plt.legend()
+    plt.show()

+ 69 - 0
tensorflow_v1/examples/2_BasicModels/linear_regression_eager_api.py

@@ -0,0 +1,69 @@
+''' Linear Regression with Eager API.
+
+A linear regression learning algorithm example using TensorFlow's Eager API.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+from __future__ import absolute_import, division, print_function
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tensorflow as tf
+
+# Set Eager API
+tf.enable_eager_execution()
+tfe = tf.contrib.eager
+
+# Training Data
+train_X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
+           7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]
+train_Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
+           2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]
+n_samples = len(train_X)
+
+# Parameters
+learning_rate = 0.01
+display_step = 100
+num_steps = 1000
+
+# Weight and Bias
+W = tfe.Variable(np.random.randn())
+b = tfe.Variable(np.random.randn())
+
+
+# Linear regression (Wx + b)
+def linear_regression(inputs):
+    return inputs * W + b
+
+
+# Mean square error
+def mean_square_fn(model_fn, inputs, labels):
+    return tf.reduce_sum(tf.pow(model_fn(inputs) - labels, 2)) / (2 * n_samples)
+
+
+# SGD Optimizer
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(mean_square_fn)
+
+# Initial cost, before optimizing
+print("Initial cost= {:.9f}".format(
+    mean_square_fn(linear_regression, train_X, train_Y)),
+    "W=", W.numpy(), "b=", b.numpy())
+
+# Training
+for step in range(num_steps):
+
+    optimizer.apply_gradients(grad(linear_regression, train_X, train_Y))
+
+    if (step + 1) % display_step == 0 or step == 0:
+        print("Epoch:", '%04d' % (step + 1), "cost=",
+              "{:.9f}".format(mean_square_fn(linear_regression, train_X, train_Y)),
+              "W=", W.numpy(), "b=", b.numpy())
+
+# Graphic display
+plt.plot(train_X, train_Y, 'ro', label='Original data')
+plt.plot(train_X, np.array(W * train_X + b), label='Fitted line')
+plt.legend()
+plt.show()

+ 71 - 0
tensorflow_v1/examples/2_BasicModels/logistic_regression.py

@@ -0,0 +1,71 @@
+'''
+A logistic regression learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.01
+training_epochs = 25
+batch_size = 100
+display_step = 1
+
+# tf Graph Input
+x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
+y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
+
+# Set model weights
+W = tf.Variable(tf.zeros([784, 10]))
+b = tf.Variable(tf.zeros([10]))
+
+# Construct model
+pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
+
+# Minimize error using cross entropy
+cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
+# Gradient Descent
+optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Training cycle
+    for epoch in range(training_epochs):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
+                                                          y: batch_ys})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if (epoch+1) % display_step == 0:
+            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
+
+    print("Optimization Finished!")
+
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

+ 105 - 0
tensorflow_v1/examples/2_BasicModels/logistic_regression_eager_api.py

@@ -0,0 +1,105 @@
+''' Logistic Regression with Eager API.
+
+A logistic regression learning algorithm example using TensorFlow's Eager API.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+from __future__ import absolute_import, division, print_function
+
+import tensorflow as tf
+
+# Set Eager API
+tf.enable_eager_execution()
+tfe = tf.contrib.eager
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+# Parameters
+learning_rate = 0.1
+batch_size = 128
+num_steps = 1000
+display_step = 100
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (mnist.train.images, mnist.train.labels))
+dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
+dataset_iter = tfe.Iterator(dataset)
+
+# Variables
+W = tfe.Variable(tf.zeros([784, 10]), name='weights')
+b = tfe.Variable(tf.zeros([10]), name='bias')
+
+
+# Logistic regression (Wx + b)
+def logistic_regression(inputs):
+    return tf.matmul(inputs, W) + b
+
+
+# Cross-Entropy loss function
+def loss_fn(inference_fn, inputs, labels):
+    # Using sparse_softmax cross entropy
+    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=inference_fn(inputs), labels=labels))
+
+
+# Calculate accuracy
+def accuracy_fn(inference_fn, inputs, labels):
+    prediction = tf.nn.softmax(inference_fn(inputs))
+    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
+    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+
+# SGD Optimizer
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(loss_fn)
+
+# Training
+average_loss = 0.
+average_acc = 0.
+for step in range(num_steps):
+
+    # Iterate through the dataset
+    d = dataset_iter.next()
+
+    # Images
+    x_batch = d[0]
+    # Labels
+    y_batch = tf.cast(d[1], dtype=tf.int64)
+
+    # Compute the batch loss
+    batch_loss = loss_fn(logistic_regression, x_batch, y_batch)
+    average_loss += batch_loss
+    # Compute the batch accuracy
+    batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)
+    average_acc += batch_accuracy
+
+    if step == 0:
+        # Display the initial cost, before optimizing
+        print("Initial loss= {:.9f}".format(average_loss))
+
+    # Update the variables following gradients info
+    optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))
+
+    # Display info
+    if (step + 1) % display_step == 0 or step == 0:
+        if step > 0:
+            average_loss /= display_step
+            average_acc /= display_step
+        print("Step:", '%04d' % (step + 1), " loss=",
+              "{:.9f}".format(average_loss), " accuracy=",
+              "{:.4f}".format(average_acc))
+        average_loss = 0.
+        average_acc = 0.
+
+# Evaluate model on the test image set
+testX = mnist.test.images
+testY = mnist.test.labels
+
+test_acc = accuracy_fn(logistic_regression, testX, testY)
+print("Testset Accuracy: {:.4f}".format(test_acc))

+ 55 - 0
tensorflow_v1/examples/2_BasicModels/nearest_neighbor.py

@@ -0,0 +1,55 @@
+'''
+A nearest neighbor learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import numpy as np
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# In this example, we limit mnist data
+Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
+Xte, Yte = mnist.test.next_batch(200) #200 for testing
+
+# tf Graph Input
+xtr = tf.placeholder("float", [None, 784])
+xte = tf.placeholder("float", [784])
+
+# Nearest Neighbor calculation using L1 Distance
+# Calculate L1 Distance
+distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)
+# Prediction: Get min distance index (Nearest neighbor)
+pred = tf.arg_min(distance, 0)
+
+accuracy = 0.
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # loop over test data
+    for i in range(len(Xte)):
+        # Get nearest neighbor
+        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
+        # Get nearest neighbor class label and compare it to its true label
+        print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \
+            "True Class:", np.argmax(Yte[i]))
+        # Calculate accuracy
+        if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
+            accuracy += 1./len(Xte)
+    print("Done!")
+    print("Accuracy:", accuracy)

+ 77 - 0
tensorflow_v1/examples/2_BasicModels/random_forest.py

@@ -0,0 +1,77 @@
+""" Random Forest.
+
+Implement Random Forest algorithm with TensorFlow, and apply it to classify 
+handwritten digit images. This example is using the MNIST database of 
+handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.contrib.tensor_forest.python import tensor_forest
+from tensorflow.python.ops import resources
+
+# Ignore all GPUs, tf random forest does not benefit from it.
+import os
+os.environ["CUDA_VISIBLE_DEVICES"] = ""
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+# Parameters
+num_steps = 500 # Total steps to train
+batch_size = 1024 # The number of samples per batch
+num_classes = 10 # The 10 digits
+num_features = 784 # Each image is 28x28 pixels
+num_trees = 10
+max_nodes = 1000
+
+# Input and Target data
+X = tf.placeholder(tf.float32, shape=[None, num_features])
+# For random forest, labels must be integers (the class id)
+Y = tf.placeholder(tf.int32, shape=[None])
+
+# Random Forest Parameters
+hparams = tensor_forest.ForestHParams(num_classes=num_classes,
+                                      num_features=num_features,
+                                      num_trees=num_trees,
+                                      max_nodes=max_nodes).fill()
+
+# Build the Random Forest
+forest_graph = tensor_forest.RandomForestGraphs(hparams)
+# Get training graph and loss
+train_op = forest_graph.training_graph(X, Y)
+loss_op = forest_graph.training_loss(X, Y)
+
+# Measure the accuracy
+infer_op, _, _ = forest_graph.inference_graph(X)
+correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))
+accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+
+# Initialize the variables (i.e. assign their default value) and forest resources
+init_vars = tf.group(tf.global_variables_initializer(),
+    resources.initialize_resources(resources.shared_resources()))
+
+# Start TensorFlow session
+sess = tf.Session()
+
+# Run the initializer
+sess.run(init_vars)
+
+# Training
+for i in range(1, num_steps + 1):
+    # Prepare Data
+    # Get the next batch of MNIST data (only images are needed, not labels)
+    batch_x, batch_y = mnist.train.next_batch(batch_size)
+    _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
+    if i % 50 == 0 or i == 1:
+        acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
+        print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
+
+# Test Model
+test_x, test_y = mnist.test.images, mnist.test.labels
+print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))

+ 195 - 0
tensorflow_v1/examples/2_BasicModels/word2vec.py

@@ -0,0 +1,195 @@
+""" Word2Vec.
+
+Implement Word2Vec algorithm to compute vector representations of words.
+This example is using a small chunk of Wikipedia articles to train from.
+
+References:
+    - Mikolov, Tomas et al. "Efficient Estimation of Word Representations
+    in Vector Space.", 2013.
+
+Links:
+    - [Word2Vec] https://arxiv.org/pdf/1301.3781.pdf
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import division, print_function, absolute_import
+
+import collections
+import os
+import random
+import urllib
+import zipfile
+
+import numpy as np
+import tensorflow as tf
+
+# Training Parameters
+learning_rate = 0.1
+batch_size = 128
+num_steps = 3000000
+display_step = 10000
+eval_step = 200000
+
+# Evaluation Parameters
+eval_words = ['five', 'of', 'going', 'hardware', 'american', 'britain']
+
+# Word2Vec Parameters
+embedding_size = 200 # Dimension of the embedding vector
+max_vocabulary_size = 50000 # Total number of different words in the vocabulary
+min_occurrence = 10 # Remove all words that does not appears at least n times
+skip_window = 3 # How many words to consider left and right
+num_skips = 2 # How many times to reuse an input to generate a label
+num_sampled = 64 # Number of negative examples to sample
+
+
+# Download a small chunk of Wikipedia articles collection
+url = 'http://mattmahoney.net/dc/text8.zip'
+data_path = 'text8.zip'
+if not os.path.exists(data_path):
+    print("Downloading the dataset... (It may take some time)")
+    filename, _ = urllib.urlretrieve(url, data_path)
+    print("Done!")
+# Unzip the dataset file. Text has already been processed
+with zipfile.ZipFile(data_path) as f:
+    text_words = f.read(f.namelist()[0]).lower().split()
+
+# Build the dictionary and replace rare words with UNK token
+count = [('UNK', -1)]
+# Retrieve the most common words
+count.extend(collections.Counter(text_words).most_common(max_vocabulary_size - 1))
+# Remove samples with less than 'min_occurrence' occurrences
+for i in range(len(count) - 1, -1, -1):
+    if count[i][1] < min_occurrence:
+        count.pop(i)
+    else:
+        # The collection is ordered, so stop when 'min_occurrence' is reached
+        break
+# Compute the vocabulary size
+vocabulary_size = len(count)
+# Assign an id to each word
+word2id = dict()
+for i, (word, _)in enumerate(count):
+    word2id[word] = i
+
+data = list()
+unk_count = 0
+for word in text_words:
+    # Retrieve a word id, or assign it index 0 ('UNK') if not in dictionary
+    index = word2id.get(word, 0)
+    if index == 0:
+        unk_count += 1
+    data.append(index)
+count[0] = ('UNK', unk_count)
+id2word = dict(zip(word2id.values(), word2id.keys()))
+
+print("Words count:", len(text_words))
+print("Unique words:", len(set(text_words)))
+print("Vocabulary size:", vocabulary_size)
+print("Most common words:", count[:10])
+
+data_index = 0
+# Generate training batch for the skip-gram model
+def next_batch(batch_size, num_skips, skip_window):
+    global data_index
+    assert batch_size % num_skips == 0
+    assert num_skips <= 2 * skip_window
+    batch = np.ndarray(shape=(batch_size), dtype=np.int32)
+    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
+    # get window size (words left and right + current one)
+    span = 2 * skip_window + 1
+    buffer = collections.deque(maxlen=span)
+    if data_index + span > len(data):
+        data_index = 0
+    buffer.extend(data[data_index:data_index + span])
+    data_index += span
+    for i in range(batch_size // num_skips):
+        context_words = [w for w in range(span) if w != skip_window]
+        words_to_use = random.sample(context_words, num_skips)
+        for j, context_word in enumerate(words_to_use):
+            batch[i * num_skips + j] = buffer[skip_window]
+            labels[i * num_skips + j, 0] = buffer[context_word]
+        if data_index == len(data):
+            buffer.extend(data[0:span])
+            data_index = span
+        else:
+            buffer.append(data[data_index])
+            data_index += 1
+    # Backtrack a little bit to avoid skipping words in the end of a batch
+    data_index = (data_index + len(data) - span) % len(data)
+    return batch, labels
+
+
+# Input data
+X = tf.placeholder(tf.int32, shape=[None])
+# Input label
+Y = tf.placeholder(tf.int32, shape=[None, 1])
+
+# Ensure the following ops & var are assigned on CPU
+# (some ops are not compatible on GPU)
+with tf.device('/cpu:0'):
+    # Create the embedding variable (each row represent a word embedding vector)
+    embedding = tf.Variable(tf.random_normal([vocabulary_size, embedding_size]))
+    # Lookup the corresponding embedding vectors for each sample in X
+    X_embed = tf.nn.embedding_lookup(embedding, X)
+
+    # Construct the variables for the NCE loss
+    nce_weights = tf.Variable(tf.random_normal([vocabulary_size, embedding_size]))
+    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
+
+# Compute the average NCE loss for the batch
+loss_op = tf.reduce_mean(
+    tf.nn.nce_loss(weights=nce_weights,
+                   biases=nce_biases,
+                   labels=Y,
+                   inputs=X_embed,
+                   num_sampled=num_sampled,
+                   num_classes=vocabulary_size))
+
+# Define the optimizer
+optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluation
+# Compute the cosine similarity between input data embedding and every embedding vectors
+X_embed_norm = X_embed / tf.sqrt(tf.reduce_sum(tf.square(X_embed)))
+embedding_norm = embedding / tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keepdims=True))
+cosine_sim_op = tf.matmul(X_embed_norm, embedding_norm, transpose_b=True)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Testing data
+    x_test = np.array([word2id[w] for w in eval_words])
+
+    average_loss = 0
+    for step in xrange(1, num_steps + 1):
+        # Get a new batch of data
+        batch_x, batch_y = next_batch(batch_size, num_skips, skip_window)
+        # Run training op
+        _, loss = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
+        average_loss += loss
+
+        if step % display_step == 0 or step == 1:
+            if step > 1:
+                average_loss /= display_step
+            print("Step " + str(step) + ", Average Loss= " + \
+                  "{:.4f}".format(average_loss))
+            average_loss = 0
+
+        # Evaluation
+        if step % eval_step == 0 or step == 1:
+            print("Evaluation...")
+            sim = sess.run(cosine_sim_op, feed_dict={X: x_test})
+            for i in xrange(len(eval_words)):
+                top_k = 8  # number of nearest neighbors
+                nearest = (-sim[i, :]).argsort()[1:top_k + 1]
+                log_str = '"%s" nearest neighbors:' % eval_words[i]
+                for k in xrange(top_k):
+                    log_str = '%s %s,' % (log_str, id2word[nearest[k]])
+                print(log_str)

+ 142 - 0
tensorflow_v1/examples/3_NeuralNetworks/autoencoder.py

@@ -0,0 +1,142 @@
+""" Auto Encoder Example.
+
+Build a 2 layers auto-encoder with TensorFlow to compress images to a
+lower latent space and then reconstruct them.
+
+References:
+    Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
+    learning applied to document recognition." Proceedings of the IEEE,
+    86(11):2278-2324, November 1998.
+
+Links:
+    [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import division, print_function, absolute_import
+
+import tensorflow as tf
+import numpy as np
+import matplotlib.pyplot as plt
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Training Parameters
+learning_rate = 0.01
+num_steps = 30000
+batch_size = 256
+
+display_step = 1000
+examples_to_show = 10
+
+# Network Parameters
+num_hidden_1 = 256 # 1st layer num features
+num_hidden_2 = 128 # 2nd layer num features (the latent dim)
+num_input = 784 # MNIST data input (img shape: 28*28)
+
+# tf Graph input (only pictures)
+X = tf.placeholder("float", [None, num_input])
+
+weights = {
+    'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])),
+    'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])),
+    'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])),
+    'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input])),
+}
+biases = {
+    'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
+    'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])),
+    'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
+    'decoder_b2': tf.Variable(tf.random_normal([num_input])),
+}
+
+# Building the encoder
+def encoder(x):
+    # Encoder Hidden layer with sigmoid activation #1
+    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
+                                   biases['encoder_b1']))
+    # Encoder Hidden layer with sigmoid activation #2
+    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
+                                   biases['encoder_b2']))
+    return layer_2
+
+
+# Building the decoder
+def decoder(x):
+    # Decoder Hidden layer with sigmoid activation #1
+    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
+                                   biases['decoder_b1']))
+    # Decoder Hidden layer with sigmoid activation #2
+    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
+                                   biases['decoder_b2']))
+    return layer_2
+
+# Construct model
+encoder_op = encoder(X)
+decoder_op = decoder(encoder_op)
+
+# Prediction
+y_pred = decoder_op
+# Targets (Labels) are the input data.
+y_true = X
+
+# Define loss and optimizer, minimize the squared error
+loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
+optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start Training
+# Start a new TF session
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Training
+    for i in range(1, num_steps+1):
+        # Prepare Data
+        # Get the next batch of MNIST data (only images are needed, not labels)
+        batch_x, _ = mnist.train.next_batch(batch_size)
+
+        # Run optimization op (backprop) and cost op (to get loss value)
+        _, l = sess.run([optimizer, loss], feed_dict={X: batch_x})
+        # Display logs per step
+        if i % display_step == 0 or i == 1:
+            print('Step %i: Minibatch Loss: %f' % (i, l))
+
+    # Testing
+    # Encode and decode images from test set and visualize their reconstruction.
+    n = 4
+    canvas_orig = np.empty((28 * n, 28 * n))
+    canvas_recon = np.empty((28 * n, 28 * n))
+    for i in range(n):
+        # MNIST test set
+        batch_x, _ = mnist.test.next_batch(n)
+        # Encode and decode the digit image
+        g = sess.run(decoder_op, feed_dict={X: batch_x})
+
+        # Display original images
+        for j in range(n):
+            # Draw the original digits
+            canvas_orig[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \
+                batch_x[j].reshape([28, 28])
+        # Display reconstructed images
+        for j in range(n):
+            # Draw the reconstructed digits
+            canvas_recon[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \
+                g[j].reshape([28, 28])
+
+    print("Original Images")
+    plt.figure(figsize=(n, n))
+    plt.imshow(canvas_orig, origin="upper", cmap="gray")
+    plt.show()
+
+    print("Reconstructed Images")
+    plt.figure(figsize=(n, n))
+    plt.imshow(canvas_recon, origin="upper", cmap="gray")
+    plt.show()

+ 126 - 0
tensorflow_v1/examples/3_NeuralNetworks/bidirectional_rnn.py

@@ -0,0 +1,126 @@
+""" Bi-directional Recurrent Neural Network.
+
+A Bi-directional Recurrent Neural Network (LSTM) implementation example using 
+TensorFlow library. This example is using the MNIST database of handwritten 
+digits (http://yann.lecun.com/exdb/mnist/)
+
+Links:
+    [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.contrib import rnn
+import numpy as np
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+'''
+To classify images using a bidirectional recurrent neural network, we consider
+every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
+we will then handle 28 sequences of 28 steps for every sample.
+'''
+
+# Training Parameters
+learning_rate = 0.001
+training_steps = 10000
+batch_size = 128
+display_step = 200
+
+# Network Parameters
+num_input = 28 # MNIST data input (img shape: 28*28)
+timesteps = 28 # timesteps
+num_hidden = 128 # hidden layer num of features
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+X = tf.placeholder("float", [None, timesteps, num_input])
+Y = tf.placeholder("float", [None, num_classes])
+
+# Define weights
+weights = {
+    # Hidden layer weights => 2*n_hidden because of forward + backward cells
+    'out': tf.Variable(tf.random_normal([2*num_hidden, num_classes]))
+}
+biases = {
+    'out': tf.Variable(tf.random_normal([num_classes]))
+}
+
+
+def BiRNN(x, weights, biases):
+
+    # Prepare data shape to match `rnn` function requirements
+    # Current data input shape: (batch_size, timesteps, n_input)
+    # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)
+
+    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)
+    x = tf.unstack(x, timesteps, 1)
+
+    # Define lstm cells with tensorflow
+    # Forward direction cell
+    lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
+    # Backward direction cell
+    lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
+
+    # Get lstm cell output
+    try:
+        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
+                                              dtype=tf.float32)
+    except Exception: # Old TensorFlow version only returns outputs not states
+        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
+                                        dtype=tf.float32)
+
+    # Linear activation, using rnn inner loop last output
+    return tf.matmul(outputs[-1], weights['out']) + biases['out']
+
+logits = BiRNN(X, weights, biases)
+prediction = tf.nn.softmax(logits)
+
+# Define loss and optimizer
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits, labels=Y))
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluate model (with test logits, for dropout to be disabled)
+correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for step in range(1, training_steps+1):
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Reshape data to get 28 seq of 28 elements
+        batch_x = batch_x.reshape((batch_size, timesteps, num_input))
+        # Run optimization op (backprop)
+        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
+        if step % display_step == 0 or step == 1:
+            # Calculate batch loss and accuracy
+            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
+                                                                 Y: batch_y})
+            print("Step " + str(step) + ", Minibatch Loss= " + \
+                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.3f}".format(acc))
+
+    print("Optimization Finished!")
+
+    # Calculate accuracy for 128 mnist test images
+    test_len = 128
+    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
+    test_label = mnist.test.labels[:test_len]
+    print("Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))

+ 125 - 0
tensorflow_v1/examples/3_NeuralNetworks/convolutional_network.py

@@ -0,0 +1,125 @@
+""" Convolutional Neural Network.
+
+Build and train a convolutional neural network with TensorFlow.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+This example is using TensorFlow layers API, see 'convolutional_network_raw' 
+example for a raw implementation with variables.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import division, print_function, absolute_import
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+import tensorflow as tf
+
+# Training Parameters
+learning_rate = 0.001
+num_steps = 2000
+batch_size = 128
+
+# Network Parameters
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+dropout = 0.25 # Dropout, probability to drop a unit
+
+
+# Create the neural network
+def conv_net(x_dict, n_classes, dropout, reuse, is_training):
+    # Define a scope for reusing the variables
+    with tf.variable_scope('ConvNet', reuse=reuse):
+        # TF Estimator input is a dict, in case of multiple inputs
+        x = x_dict['images']
+
+        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
+        # Reshape to match picture format [Height x Width x Channel]
+        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
+        x = tf.reshape(x, shape=[-1, 28, 28, 1])
+
+        # Convolution Layer with 32 filters and a kernel size of 5
+        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
+
+        # Convolution Layer with 64 filters and a kernel size of 3
+        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
+
+        # Flatten the data to a 1-D vector for the fully connected layer
+        fc1 = tf.contrib.layers.flatten(conv2)
+
+        # Fully connected layer (in tf contrib folder for now)
+        fc1 = tf.layers.dense(fc1, 1024)
+        # Apply Dropout (if is_training is False, dropout is not applied)
+        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
+
+        # Output layer, class prediction
+        out = tf.layers.dense(fc1, n_classes)
+
+    return out
+
+
+# Define the model function (following TF Estimator Template)
+def model_fn(features, labels, mode):
+    # Build the neural network
+    # Because Dropout have different behavior at training and prediction time, we
+    # need to create 2 distinct computation graphs that still share the same weights.
+    logits_train = conv_net(features, num_classes, dropout, reuse=False,
+                            is_training=True)
+    logits_test = conv_net(features, num_classes, dropout, reuse=True,
+                           is_training=False)
+
+    # Predictions
+    pred_classes = tf.argmax(logits_test, axis=1)
+    pred_probas = tf.nn.softmax(logits_test)
+
+    # If prediction mode, early return
+    if mode == tf.estimator.ModeKeys.PREDICT:
+        return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
+
+        # Define loss and optimizer
+    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
+    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+    train_op = optimizer.minimize(loss_op,
+                                  global_step=tf.train.get_global_step())
+
+    # Evaluate the accuracy of the model
+    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
+
+    # TF Estimators requires to return a EstimatorSpec, that specify
+    # the different ops for training, evaluating, ...
+    estim_specs = tf.estimator.EstimatorSpec(
+        mode=mode,
+        predictions=pred_classes,
+        loss=loss_op,
+        train_op=train_op,
+        eval_metric_ops={'accuracy': acc_op})
+
+    return estim_specs
+
+# Build the Estimator
+model = tf.estimator.Estimator(model_fn)
+
+# Define the input function for training
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.train.images}, y=mnist.train.labels,
+    batch_size=batch_size, num_epochs=None, shuffle=True)
+# Train the Model
+model.train(input_fn, steps=num_steps)
+
+# Evaluate the Model
+# Define the input function for evaluating
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.test.images}, y=mnist.test.labels,
+    batch_size=batch_size, shuffle=False)
+# Use the Estimator 'evaluate' method
+e = model.evaluate(input_fn)
+
+print("Testing Accuracy:", e['accuracy'])

+ 141 - 0
tensorflow_v1/examples/3_NeuralNetworks/convolutional_network_raw.py

@@ -0,0 +1,141 @@
+""" Convolutional Neural Network.
+
+Build and train a convolutional neural network with TensorFlow.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import division, print_function, absolute_import
+
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Training Parameters
+learning_rate = 0.001
+num_steps = 200
+batch_size = 128
+display_step = 10
+
+# Network Parameters
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+dropout = 0.75 # Dropout, probability to keep units
+
+# tf Graph input
+X = tf.placeholder(tf.float32, [None, num_input])
+Y = tf.placeholder(tf.float32, [None, num_classes])
+keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
+
+
+# Create some wrappers for simplicity
+def conv2d(x, W, b, strides=1):
+    # Conv2D wrapper, with bias and relu activation
+    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
+    x = tf.nn.bias_add(x, b)
+    return tf.nn.relu(x)
+
+
+def maxpool2d(x, k=2):
+    # MaxPool2D wrapper
+    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
+                          padding='SAME')
+
+
+# Create model
+def conv_net(x, weights, biases, dropout):
+    # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
+    # Reshape to match picture format [Height x Width x Channel]
+    # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
+    x = tf.reshape(x, shape=[-1, 28, 28, 1])
+
+    # Convolution Layer
+    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
+    # Max Pooling (down-sampling)
+    conv1 = maxpool2d(conv1, k=2)
+
+    # Convolution Layer
+    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
+    # Max Pooling (down-sampling)
+    conv2 = maxpool2d(conv2, k=2)
+
+    # Fully connected layer
+    # Reshape conv2 output to fit fully connected layer input
+    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
+    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
+    fc1 = tf.nn.relu(fc1)
+    # Apply Dropout
+    fc1 = tf.nn.dropout(fc1, dropout)
+
+    # Output, class prediction
+    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
+    return out
+
+# Store layers weight & bias
+weights = {
+    # 5x5 conv, 1 input, 32 outputs
+    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
+    # 5x5 conv, 32 inputs, 64 outputs
+    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
+    # fully connected, 7*7*64 inputs, 1024 outputs
+    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
+    # 1024 inputs, 10 outputs (class prediction)
+    'out': tf.Variable(tf.random_normal([1024, num_classes]))
+}
+
+biases = {
+    'bc1': tf.Variable(tf.random_normal([32])),
+    'bc2': tf.Variable(tf.random_normal([64])),
+    'bd1': tf.Variable(tf.random_normal([1024])),
+    'out': tf.Variable(tf.random_normal([num_classes]))
+}
+
+# Construct model
+logits = conv_net(X, weights, biases, keep_prob)
+prediction = tf.nn.softmax(logits)
+
+# Define loss and optimizer
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits, labels=Y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+
+# Evaluate model
+correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for step in range(1, num_steps+1):
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Run optimization op (backprop)
+        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.8})
+        if step % display_step == 0 or step == 1:
+            # Calculate batch loss and accuracy
+            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
+                                                                 Y: batch_y,
+                                                                 keep_prob: 1.0})
+            print("Step " + str(step) + ", Minibatch Loss= " + \
+                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.3f}".format(acc))
+
+    print("Optimization Finished!")
+
+    # Calculate accuracy for 256 MNIST test images
+    print("Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={X: mnist.test.images[:256],
+                                      Y: mnist.test.labels[:256],
+                                      keep_prob: 1.0}))

+ 167 - 0
tensorflow_v1/examples/3_NeuralNetworks/dcgan.py

@@ -0,0 +1,167 @@
+""" Deep Convolutional Generative Adversarial Network (DCGAN).
+
+Using deep convolutional generative adversarial networks (DCGAN) to generate
+digit images from a noise distribution.
+
+References:
+    - Unsupervised representation learning with deep convolutional generative
+    adversarial networks. A Radford, L Metz, S Chintala. arXiv:1511.06434.
+
+Links:
+    - [DCGAN Paper](https://arxiv.org/abs/1511.06434).
+    - [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import division, print_function, absolute_import
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Training Params
+num_steps = 20000
+batch_size = 32
+
+# Network Params
+image_dim = 784 # 28*28 pixels * 1 channel
+gen_hidden_dim = 256
+disc_hidden_dim = 256
+noise_dim = 200 # Noise data points
+
+
+# Generator Network
+# Input: Noise, Output: Image
+def generator(x, reuse=False):
+    with tf.variable_scope('Generator', reuse=reuse):
+        # TensorFlow Layers automatically create variables and calculate their
+        # shape, based on the input.
+        x = tf.layers.dense(x, units=6 * 6 * 128)
+        x = tf.nn.tanh(x)
+        # Reshape to a 4-D array of images: (batch, height, width, channels)
+        # New shape: (batch, 6, 6, 128)
+        x = tf.reshape(x, shape=[-1, 6, 6, 128])
+        # Deconvolution, image shape: (batch, 14, 14, 64)
+        x = tf.layers.conv2d_transpose(x, 64, 4, strides=2)
+        # Deconvolution, image shape: (batch, 28, 28, 1)
+        x = tf.layers.conv2d_transpose(x, 1, 2, strides=2)
+        # Apply sigmoid to clip values between 0 and 1
+        x = tf.nn.sigmoid(x)
+        return x
+
+
+# Discriminator Network
+# Input: Image, Output: Prediction Real/Fake Image
+def discriminator(x, reuse=False):
+    with tf.variable_scope('Discriminator', reuse=reuse):
+        # Typical convolutional neural network to classify images.
+        x = tf.layers.conv2d(x, 64, 5)
+        x = tf.nn.tanh(x)
+        x = tf.layers.average_pooling2d(x, 2, 2)
+        x = tf.layers.conv2d(x, 128, 5)
+        x = tf.nn.tanh(x)
+        x = tf.layers.average_pooling2d(x, 2, 2)
+        x = tf.contrib.layers.flatten(x)
+        x = tf.layers.dense(x, 1024)
+        x = tf.nn.tanh(x)
+        # Output 2 classes: Real and Fake images
+        x = tf.layers.dense(x, 2)
+    return x
+
+# Build Networks
+# Network Inputs
+noise_input = tf.placeholder(tf.float32, shape=[None, noise_dim])
+real_image_input = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
+
+# Build Generator Network
+gen_sample = generator(noise_input)
+
+# Build 2 Discriminator Networks (one from real image input, one from generated samples)
+disc_real = discriminator(real_image_input)
+disc_fake = discriminator(gen_sample, reuse=True)
+disc_concat = tf.concat([disc_real, disc_fake], axis=0)
+
+# Build the stacked generator/discriminator
+stacked_gan = discriminator(gen_sample, reuse=True)
+
+# Build Targets (real or fake images)
+disc_target = tf.placeholder(tf.int32, shape=[None])
+gen_target = tf.placeholder(tf.int32, shape=[None])
+
+# Build Loss
+disc_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+    logits=disc_concat, labels=disc_target))
+gen_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+    logits=stacked_gan, labels=gen_target))
+
+# Build Optimizers
+optimizer_gen = tf.train.AdamOptimizer(learning_rate=0.001)
+optimizer_disc = tf.train.AdamOptimizer(learning_rate=0.001)
+
+# Training Variables for each optimizer
+# By default in TensorFlow, all variables are updated by each optimizer, so we
+# need to precise for each one of them the specific variables to update.
+# Generator Network Variables
+gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator')
+# Discriminator Network Variables
+disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator')
+
+# Create training operations
+train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
+train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for i in range(1, num_steps+1):
+
+        # Prepare Input Data
+        # Get the next batch of MNIST data (only images are needed, not labels)
+        batch_x, _ = mnist.train.next_batch(batch_size)
+        batch_x = np.reshape(batch_x, newshape=[-1, 28, 28, 1])
+        # Generate noise to feed to the generator
+        z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
+
+        # Prepare Targets (Real image: 1, Fake image: 0)
+        # The first half of data fed to the discriminator are real images,
+        # the other half are fake images (coming from the generator).
+        batch_disc_y = np.concatenate(
+            [np.ones([batch_size]), np.zeros([batch_size])], axis=0)
+        # Generator tries to fool the discriminator, thus targets are 1.
+        batch_gen_y = np.ones([batch_size])
+
+        # Training
+        feed_dict = {real_image_input: batch_x, noise_input: z,
+                     disc_target: batch_disc_y, gen_target: batch_gen_y}
+        _, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss],
+                                feed_dict=feed_dict)
+        if i % 100 == 0 or i == 1:
+            print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
+
+    # Generate images from noise, using the generator network.
+    f, a = plt.subplots(4, 10, figsize=(10, 4))
+    for i in range(10):
+        # Noise input.
+        z = np.random.uniform(-1., 1., size=[4, noise_dim])
+        g = sess.run(gen_sample, feed_dict={noise_input: z})
+        for j in range(4):
+            # Generate image from noise. Extend to 3 channels for matplot figure.
+            img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
+                             newshape=(28, 28, 3))
+            a[j][i].imshow(img)
+
+    f.show()
+    plt.draw()
+    plt.waitforbuttonpress()

+ 193 - 0
tensorflow_v1/examples/3_NeuralNetworks/dynamic_rnn.py

@@ -0,0 +1,193 @@
+""" Dynamic Recurrent Neural Network.
+
+TensorFlow implementation of a Recurrent Neural Network (LSTM) that performs
+dynamic computation over sequences with variable length. This example is using
+a toy dataset to classify linear sequences. The generated sequences have
+variable length.
+
+Links:
+    [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import tensorflow as tf
+import random
+
+
+# ====================
+#  TOY DATA GENERATOR
+# ====================
+class ToySequenceData(object):
+    """ Generate sequence of data with dynamic length.
+    This class generate samples for training:
+    - Class 0: linear sequences (i.e. [0, 1, 2, 3,...])
+    - Class 1: random sequences (i.e. [1, 3, 10, 7,...])
+
+    NOTICE:
+    We have to pad each sequence to reach 'max_seq_len' for TensorFlow
+    consistency (we cannot feed a numpy array with inconsistent
+    dimensions). The dynamic calculation will then be perform thanks to
+    'seqlen' attribute that records every actual sequence length.
+    """
+    def __init__(self, n_samples=1000, max_seq_len=20, min_seq_len=3,
+                 max_value=1000):
+        self.data = []
+        self.labels = []
+        self.seqlen = []
+        for i in range(n_samples):
+            # Random sequence length
+            len = random.randint(min_seq_len, max_seq_len)
+            # Monitor sequence length for TensorFlow dynamic calculation
+            self.seqlen.append(len)
+            # Add a random or linear int sequence (50% prob)
+            if random.random() < .5:
+                # Generate a linear sequence
+                rand_start = random.randint(0, max_value - len)
+                s = [[float(i)/max_value] for i in
+                     range(rand_start, rand_start + len)]
+                # Pad sequence for dimension consistency
+                s += [[0.] for i in range(max_seq_len - len)]
+                self.data.append(s)
+                self.labels.append([1., 0.])
+            else:
+                # Generate a random sequence
+                s = [[float(random.randint(0, max_value))/max_value]
+                     for i in range(len)]
+                # Pad sequence for dimension consistency
+                s += [[0.] for i in range(max_seq_len - len)]
+                self.data.append(s)
+                self.labels.append([0., 1.])
+        self.batch_id = 0
+
+    def next(self, batch_size):
+        """ Return a batch of data. When dataset end is reached, start over.
+        """
+        if self.batch_id == len(self.data):
+            self.batch_id = 0
+        batch_data = (self.data[self.batch_id:min(self.batch_id +
+                                                  batch_size, len(self.data))])
+        batch_labels = (self.labels[self.batch_id:min(self.batch_id +
+                                                  batch_size, len(self.data))])
+        batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id +
+                                                  batch_size, len(self.data))])
+        self.batch_id = min(self.batch_id + batch_size, len(self.data))
+        return batch_data, batch_labels, batch_seqlen
+
+
+# ==========
+#   MODEL
+# ==========
+
+# Parameters
+learning_rate = 0.01
+training_steps = 10000
+batch_size = 128
+display_step = 200
+
+# Network Parameters
+seq_max_len = 20 # Sequence max length
+n_hidden = 64 # hidden layer num of features
+n_classes = 2 # linear sequence or not
+
+trainset = ToySequenceData(n_samples=1000, max_seq_len=seq_max_len)
+testset = ToySequenceData(n_samples=500, max_seq_len=seq_max_len)
+
+# tf Graph input
+x = tf.placeholder("float", [None, seq_max_len, 1])
+y = tf.placeholder("float", [None, n_classes])
+# A placeholder for indicating each sequence length
+seqlen = tf.placeholder(tf.int32, [None])
+
+# Define weights
+weights = {
+    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
+}
+biases = {
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+
+def dynamicRNN(x, seqlen, weights, biases):
+
+    # Prepare data shape to match `rnn` function requirements
+    # Current data input shape: (batch_size, n_steps, n_input)
+    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
+    
+    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
+    x = tf.unstack(x, seq_max_len, 1)
+
+    # Define a lstm cell with tensorflow
+    lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
+
+    # Get lstm cell output, providing 'sequence_length' will perform dynamic
+    # calculation.
+    outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32,
+                                sequence_length=seqlen)
+
+    # When performing dynamic calculation, we must retrieve the last
+    # dynamically computed output, i.e., if a sequence length is 10, we need
+    # to retrieve the 10th output.
+    # However TensorFlow doesn't support advanced indexing yet, so we build
+    # a custom op that for each sample in batch size, get its length and
+    # get the corresponding relevant output.
+
+    # 'outputs' is a list of output at every timestep, we pack them in a Tensor
+    # and change back dimension to [batch_size, n_step, n_input]
+    outputs = tf.stack(outputs)
+    outputs = tf.transpose(outputs, [1, 0, 2])
+
+    # Hack to build the indexing and retrieve the right output.
+    batch_size = tf.shape(outputs)[0]
+    # Start indices for each sample
+    index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1)
+    # Indexing
+    outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)
+
+    # Linear activation, using outputs computed above
+    return tf.matmul(outputs, weights['out']) + biases['out']
+
+pred = dynamicRNN(x, seqlen, weights, biases)
+
+# Define loss and optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
+
+# Evaluate model
+correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for step in range(1, training_steps + 1):
+        batch_x, batch_y, batch_seqlen = trainset.next(batch_size)
+        # Run optimization op (backprop)
+        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
+                                       seqlen: batch_seqlen})
+        if step % display_step == 0 or step == 1:
+            # Calculate batch accuracy & loss
+            acc, loss = sess.run([accuracy, cost], feed_dict={x: batch_x, y: batch_y,
+                                                seqlen: batch_seqlen})
+            print("Step " + str(step*batch_size) + ", Minibatch Loss= " + \
+                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.5f}".format(acc))
+
+    print("Optimization Finished!")
+
+    # Calculate accuracy
+    test_data = testset.data
+    test_label = testset.labels
+    test_seqlen = testset.seqlen
+    print("Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={x: test_data, y: test_label,
+                                      seqlen: test_seqlen}))

+ 157 - 0
tensorflow_v1/examples/3_NeuralNetworks/gan.py

@@ -0,0 +1,157 @@
+""" Generative Adversarial Networks (GAN).
+
+Using generative adversarial networks (GAN) to generate digit images from a
+noise distribution.
+
+References:
+    - Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
+    B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
+    processing systems, 2672-2680.
+    - Understanding the difficulty of training deep feedforward neural networks.
+    X Glorot, Y Bengio. Aistats 9, 249-256
+
+Links:
+    - [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
+    - [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+    - [Xavier Glorot Init](www.cs.cmu.edu/~bhiksha/courses/deeplearning/Fall.../AISTATS2010_Glorot.pdf).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import division, print_function, absolute_import
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Training Params
+num_steps = 100000
+batch_size = 128
+learning_rate = 0.0002
+
+# Network Params
+image_dim = 784 # 28*28 pixels
+gen_hidden_dim = 256
+disc_hidden_dim = 256
+noise_dim = 100 # Noise data points
+
+# A custom initialization (see Xavier Glorot init)
+def glorot_init(shape):
+    return tf.random_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))
+
+# Store layers weight & bias
+weights = {
+    'gen_hidden1': tf.Variable(glorot_init([noise_dim, gen_hidden_dim])),
+    'gen_out': tf.Variable(glorot_init([gen_hidden_dim, image_dim])),
+    'disc_hidden1': tf.Variable(glorot_init([image_dim, disc_hidden_dim])),
+    'disc_out': tf.Variable(glorot_init([disc_hidden_dim, 1])),
+}
+biases = {
+    'gen_hidden1': tf.Variable(tf.zeros([gen_hidden_dim])),
+    'gen_out': tf.Variable(tf.zeros([image_dim])),
+    'disc_hidden1': tf.Variable(tf.zeros([disc_hidden_dim])),
+    'disc_out': tf.Variable(tf.zeros([1])),
+}
+
+
+# Generator
+def generator(x):
+    hidden_layer = tf.matmul(x, weights['gen_hidden1'])
+    hidden_layer = tf.add(hidden_layer, biases['gen_hidden1'])
+    hidden_layer = tf.nn.relu(hidden_layer)
+    out_layer = tf.matmul(hidden_layer, weights['gen_out'])
+    out_layer = tf.add(out_layer, biases['gen_out'])
+    out_layer = tf.nn.sigmoid(out_layer)
+    return out_layer
+
+
+# Discriminator
+def discriminator(x):
+    hidden_layer = tf.matmul(x, weights['disc_hidden1'])
+    hidden_layer = tf.add(hidden_layer, biases['disc_hidden1'])
+    hidden_layer = tf.nn.relu(hidden_layer)
+    out_layer = tf.matmul(hidden_layer, weights['disc_out'])
+    out_layer = tf.add(out_layer, biases['disc_out'])
+    out_layer = tf.nn.sigmoid(out_layer)
+    return out_layer
+
+# Build Networks
+# Network Inputs
+gen_input = tf.placeholder(tf.float32, shape=[None, noise_dim], name='input_noise')
+disc_input = tf.placeholder(tf.float32, shape=[None, image_dim], name='disc_input')
+
+# Build Generator Network
+gen_sample = generator(gen_input)
+
+# Build 2 Discriminator Networks (one from noise input, one from generated samples)
+disc_real = discriminator(disc_input)
+disc_fake = discriminator(gen_sample)
+
+# Build Loss
+gen_loss = -tf.reduce_mean(tf.log(disc_fake))
+disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
+
+# Build Optimizers
+optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate)
+optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)
+
+# Training Variables for each optimizer
+# By default in TensorFlow, all variables are updated by each optimizer, so we
+# need to precise for each one of them the specific variables to update.
+# Generator Network Variables
+gen_vars = [weights['gen_hidden1'], weights['gen_out'],
+            biases['gen_hidden1'], biases['gen_out']]
+# Discriminator Network Variables
+disc_vars = [weights['disc_hidden1'], weights['disc_out'],
+            biases['disc_hidden1'], biases['disc_out']]
+
+# Create training operations
+train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
+train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for i in range(1, num_steps+1):
+        # Prepare Data
+        # Get the next batch of MNIST data (only images are needed, not labels)
+        batch_x, _ = mnist.train.next_batch(batch_size)
+        # Generate noise to feed to the generator
+        z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
+
+        # Train
+        feed_dict = {disc_input: batch_x, gen_input: z}
+        _, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss],
+                                feed_dict=feed_dict)
+        if i % 1000 == 0 or i == 1:
+            print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
+
+    # Generate images from noise, using the generator network.
+    f, a = plt.subplots(4, 10, figsize=(10, 4))
+    for i in range(10):
+        # Noise input.
+        z = np.random.uniform(-1., 1., size=[4, noise_dim])
+        g = sess.run([gen_sample], feed_dict={gen_input: z})
+        g = np.reshape(g, newshape=(4, 28, 28, 1))
+        # Reverse colours for better display
+        g = -1 * (g - 1)
+        for j in range(4):
+            # Generate image from noise. Extend to 3 channels for matplot figure.
+            img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
+                             newshape=(28, 28, 3))
+            a[j][i].imshow(img)
+
+    f.show()
+    plt.draw()
+    plt.waitforbuttonpress()

+ 104 - 0
tensorflow_v1/examples/3_NeuralNetworks/multilayer_perceptron.py

@@ -0,0 +1,104 @@
+""" Multilayer Perceptron.
+
+A Multilayer Perceptron (Neural Network) implementation example using
+TensorFlow library. This example is using the MNIST database of handwritten
+digits (http://yann.lecun.com/exdb/mnist/).
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+# ------------------------------------------------------------------
+#
+# THIS EXAMPLE HAS BEEN RENAMED 'neural_network.py', FOR SIMPLICITY.
+#
+# ------------------------------------------------------------------
+
+
+from __future__ import print_function
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+import tensorflow as tf
+
+# Parameters
+learning_rate = 0.001
+training_epochs = 15
+batch_size = 100
+display_step = 1
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of neurons
+n_hidden_2 = 256 # 2nd layer number of neurons
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+X = tf.placeholder("float", [None, n_input])
+Y = tf.placeholder("float", [None, n_classes])
+
+# Store layers weight & bias
+weights = {
+    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
+    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
+}
+biases = {
+    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
+    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+
+# Create model
+def multilayer_perceptron(x):
+    # Hidden fully connected layer with 256 neurons
+    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
+    # Hidden fully connected layer with 256 neurons
+    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
+    # Output fully connected layer with a neuron for each class
+    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
+    return out_layer
+
+# Construct model
+logits = multilayer_perceptron(X)
+
+# Define loss and optimizer
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits, labels=Y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+# Initializing the variables
+init = tf.global_variables_initializer()
+
+with tf.Session() as sess:
+    sess.run(init)
+
+    # Training cycle
+    for epoch in range(training_epochs):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,
+                                                            Y: batch_y})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if epoch % display_step == 0:
+            print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
+    print("Optimization Finished!")
+
+    # Test model
+    pred = tf.nn.softmax(logits)  # Apply softmax to logits
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))

+ 103 - 0
tensorflow_v1/examples/3_NeuralNetworks/neural_network.py

@@ -0,0 +1,103 @@
+""" Neural Network.
+
+A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
+implementation with TensorFlow. This example is using the MNIST database
+of handwritten digits (http://yann.lecun.com/exdb/mnist/).
+
+This example is using TensorFlow layers, see 'neural_network_raw' example for
+a raw implementation with variables.
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+import tensorflow as tf
+
+# Parameters
+learning_rate = 0.1
+num_steps = 1000
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of neurons
+n_hidden_2 = 256 # 2nd layer number of neurons
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+
+# Define the neural network
+def neural_net(x_dict):
+    # TF Estimator input is a dict, in case of multiple inputs
+    x = x_dict['images']
+    # Hidden fully connected layer with 256 neurons
+    layer_1 = tf.layers.dense(x, n_hidden_1)
+    # Hidden fully connected layer with 256 neurons
+    layer_2 = tf.layers.dense(layer_1, n_hidden_2)
+    # Output fully connected layer with a neuron for each class
+    out_layer = tf.layers.dense(layer_2, num_classes)
+    return out_layer
+
+
+# Define the model function (following TF Estimator Template)
+def model_fn(features, labels, mode):
+    # Build the neural network
+    logits = neural_net(features)
+
+    # Predictions
+    pred_classes = tf.argmax(logits, axis=1)
+    pred_probas = tf.nn.softmax(logits)
+
+    # If prediction mode, early return
+    if mode == tf.estimator.ModeKeys.PREDICT:
+        return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
+
+    # Define loss and optimizer
+    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+    train_op = optimizer.minimize(loss_op,
+                                  global_step=tf.train.get_global_step())
+
+    # Evaluate the accuracy of the model
+    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
+
+    # TF Estimators requires to return a EstimatorSpec, that specify
+    # the different ops for training, evaluating, ...
+    estim_specs = tf.estimator.EstimatorSpec(
+        mode=mode,
+        predictions=pred_classes,
+        loss=loss_op,
+        train_op=train_op,
+        eval_metric_ops={'accuracy': acc_op})
+
+    return estim_specs
+
+# Build the Estimator
+model = tf.estimator.Estimator(model_fn)
+
+# Define the input function for training
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.train.images}, y=mnist.train.labels,
+    batch_size=batch_size, num_epochs=None, shuffle=True)
+# Train the Model
+model.train(input_fn, steps=num_steps)
+
+# Evaluate the Model
+# Define the input function for evaluating
+input_fn = tf.estimator.inputs.numpy_input_fn(
+    x={'images': mnist.test.images}, y=mnist.test.labels,
+    batch_size=batch_size, shuffle=False)
+# Use the Estimator 'evaluate' method
+e = model.evaluate(input_fn)
+
+print("Testing Accuracy:", e['accuracy'])

+ 133 - 0
tensorflow_v1/examples/3_NeuralNetworks/neural_network_eager_api.py

@@ -0,0 +1,133 @@
+""" Neural Network with Eager API.
+
+A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
+implementation with TensorFlow's Eager API. This example is using the MNIST database
+of handwritten digits (http://yann.lecun.com/exdb/mnist/).
+
+This example is using TensorFlow layers, see 'neural_network_raw' example for
+a raw implementation with variables.
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Set Eager API
+tf.enable_eager_execution()
+tfe = tf.contrib.eager
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+# Parameters
+learning_rate = 0.001
+num_steps = 1000
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of neurons
+n_hidden_2 = 256 # 2nd layer number of neurons
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+# Using TF Dataset to split data into batches
+dataset = tf.data.Dataset.from_tensor_slices(
+    (mnist.train.images, mnist.train.labels))
+dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
+dataset_iter = tfe.Iterator(dataset)
+
+
+# Define the neural network. To use eager API and tf.layers API together,
+# we must instantiate a tfe.Network class as follow:
+class NeuralNet(tfe.Network):
+    def __init__(self):
+        # Define each layer
+        super(NeuralNet, self).__init__()
+        # Hidden fully connected layer with 256 neurons
+        self.layer1 = self.track_layer(
+            tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
+        # Hidden fully connected layer with 256 neurons
+        self.layer2 = self.track_layer(
+            tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
+        # Output fully connected layer with a neuron for each class
+        self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
+
+    def call(self, x):
+        x = self.layer1(x)
+        x = self.layer2(x)
+        return self.out_layer(x)
+
+
+neural_net = NeuralNet()
+
+
+# Cross-Entropy loss function
+def loss_fn(inference_fn, inputs, labels):
+    # Using sparse_softmax cross entropy
+    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=inference_fn(inputs), labels=labels))
+
+
+# Calculate accuracy
+def accuracy_fn(inference_fn, inputs, labels):
+    prediction = tf.nn.softmax(inference_fn(inputs))
+    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
+    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+
+# SGD Optimizer
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(loss_fn)
+
+# Training
+average_loss = 0.
+average_acc = 0.
+for step in range(num_steps):
+
+    # Iterate through the dataset
+    d = dataset_iter.next()
+
+    # Images
+    x_batch = d[0]
+    # Labels
+    y_batch = tf.cast(d[1], dtype=tf.int64)
+
+    # Compute the batch loss
+    batch_loss = loss_fn(neural_net, x_batch, y_batch)
+    average_loss += batch_loss
+    # Compute the batch accuracy
+    batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
+    average_acc += batch_accuracy
+
+    if step == 0:
+        # Display the initial cost, before optimizing
+        print("Initial loss= {:.9f}".format(average_loss))
+
+    # Update the variables following gradients info
+    optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
+
+    # Display info
+    if (step + 1) % display_step == 0 or step == 0:
+        if step > 0:
+            average_loss /= display_step
+            average_acc /= display_step
+        print("Step:", '%04d' % (step + 1), " loss=",
+              "{:.9f}".format(average_loss), " accuracy=",
+              "{:.4f}".format(average_acc))
+        average_loss = 0.
+        average_acc = 0.
+
+# Evaluate model on the test image set
+testX = mnist.test.images
+testY = mnist.test.labels
+
+test_acc = accuracy_fn(neural_net, testX, testY)
+print("Testset Accuracy: {:.4f}".format(test_acc))

+ 101 - 0
tensorflow_v1/examples/3_NeuralNetworks/neural_network_raw.py

@@ -0,0 +1,101 @@
+""" Neural Network.
+
+A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
+implementation with TensorFlow. This example is using the MNIST database
+of handwritten digits (http://yann.lecun.com/exdb/mnist/).
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+import tensorflow as tf
+
+# Parameters
+learning_rate = 0.1
+num_steps = 500
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of neurons
+n_hidden_2 = 256 # 2nd layer number of neurons
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+X = tf.placeholder("float", [None, num_input])
+Y = tf.placeholder("float", [None, num_classes])
+
+# Store layers weight & bias
+weights = {
+    'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
+    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
+}
+biases = {
+    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
+    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([num_classes]))
+}
+
+
+# Create model
+def neural_net(x):
+    # Hidden fully connected layer with 256 neurons
+    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
+    # Hidden fully connected layer with 256 neurons
+    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
+    # Output fully connected layer with a neuron for each class
+    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
+    return out_layer
+
+# Construct model
+logits = neural_net(X)
+prediction = tf.nn.softmax(logits)
+
+# Define loss and optimizer
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits, labels=Y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluate model
+correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for step in range(1, num_steps+1):
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Run optimization op (backprop)
+        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
+        if step % display_step == 0 or step == 1:
+            # Calculate batch loss and accuracy
+            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
+                                                                 Y: batch_y})
+            print("Step " + str(step) + ", Minibatch Loss= " + \
+                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.3f}".format(acc))
+
+    print("Optimization Finished!")
+
+    # Calculate accuracy for MNIST test images
+    print("Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={X: mnist.test.images,
+                                      Y: mnist.test.labels}))

+ 115 - 0
tensorflow_v1/examples/3_NeuralNetworks/recurrent_network.py

@@ -0,0 +1,115 @@
+""" Recurrent Neural Network.
+
+A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
+This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
+
+Links:
+    [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.contrib import rnn
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+'''
+To classify images using a recurrent neural network, we consider every image
+row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
+handle 28 sequences of 28 steps for every sample.
+'''
+
+# Training Parameters
+learning_rate = 0.001
+training_steps = 10000
+batch_size = 128
+display_step = 200
+
+# Network Parameters
+num_input = 28 # MNIST data input (img shape: 28*28)
+timesteps = 28 # timesteps
+num_hidden = 128 # hidden layer num of features
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+X = tf.placeholder("float", [None, timesteps, num_input])
+Y = tf.placeholder("float", [None, num_classes])
+
+# Define weights
+weights = {
+    'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
+}
+biases = {
+    'out': tf.Variable(tf.random_normal([num_classes]))
+}
+
+
+def RNN(x, weights, biases):
+
+    # Prepare data shape to match `rnn` function requirements
+    # Current data input shape: (batch_size, timesteps, n_input)
+    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
+
+    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
+    x = tf.unstack(x, timesteps, 1)
+
+    # Define a lstm cell with tensorflow
+    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
+
+    # Get lstm cell output
+    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
+
+    # Linear activation, using rnn inner loop last output
+    return tf.matmul(outputs[-1], weights['out']) + biases['out']
+
+logits = RNN(X, weights, biases)
+prediction = tf.nn.softmax(logits)
+
+# Define loss and optimizer
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits, labels=Y))
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluate model (with test logits, for dropout to be disabled)
+correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for step in range(1, training_steps+1):
+        batch_x, batch_y = mnist.train.next_batch(batch_size)
+        # Reshape data to get 28 seq of 28 elements
+        batch_x = batch_x.reshape((batch_size, timesteps, num_input))
+        # Run optimization op (backprop)
+        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
+        if step % display_step == 0 or step == 1:
+            # Calculate batch loss and accuracy
+            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
+                                                                 Y: batch_y})
+            print("Step " + str(step) + ", Minibatch Loss= " + \
+                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.3f}".format(acc))
+
+    print("Optimization Finished!")
+
+    # Calculate accuracy for 128 mnist test images
+    test_len = 128
+    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
+    test_label = mnist.test.labels[:test_len]
+    print("Testing Accuracy:", \
+        sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))

+ 143 - 0
tensorflow_v1/examples/3_NeuralNetworks/variational_autoencoder.py

@@ -0,0 +1,143 @@
+""" Variational Auto-Encoder Example.
+
+Using a variational auto-encoder to generate digits images from noise.
+MNIST handwritten digits are used as training examples.
+
+References:
+    - Auto-Encoding Variational Bayes The International Conference on Learning
+    Representations (ICLR), Banff, 2014. D.P. Kingma, M. Welling
+    - Understanding the difficulty of training deep feedforward neural networks.
+    X Glorot, Y Bengio. Aistats 9, 249-256
+    - Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
+    learning applied to document recognition." Proceedings of the IEEE,
+    86(11):2278-2324, November 1998.
+
+Links:
+    - [VAE Paper] https://arxiv.org/abs/1312.6114
+    - [Xavier Glorot Init](www.cs.cmu.edu/~bhiksha/courses/deeplearning/Fall.../AISTATS2010_Glorot.pdf).
+    - [MNIST Dataset] http://yann.lecun.com/exdb/mnist/
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import division, print_function, absolute_import
+
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.stats import norm
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.001
+num_steps = 30000
+batch_size = 64
+
+# Network Parameters
+image_dim = 784 # MNIST images are 28x28 pixels
+hidden_dim = 512
+latent_dim = 2
+
+# A custom initialization (see Xavier Glorot init)
+def glorot_init(shape):
+    return tf.random_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))
+
+# Variables
+weights = {
+    'encoder_h1': tf.Variable(glorot_init([image_dim, hidden_dim])),
+    'z_mean': tf.Variable(glorot_init([hidden_dim, latent_dim])),
+    'z_std': tf.Variable(glorot_init([hidden_dim, latent_dim])),
+    'decoder_h1': tf.Variable(glorot_init([latent_dim, hidden_dim])),
+    'decoder_out': tf.Variable(glorot_init([hidden_dim, image_dim]))
+}
+biases = {
+    'encoder_b1': tf.Variable(glorot_init([hidden_dim])),
+    'z_mean': tf.Variable(glorot_init([latent_dim])),
+    'z_std': tf.Variable(glorot_init([latent_dim])),
+    'decoder_b1': tf.Variable(glorot_init([hidden_dim])),
+    'decoder_out': tf.Variable(glorot_init([image_dim]))
+}
+
+# Building the encoder
+input_image = tf.placeholder(tf.float32, shape=[None, image_dim])
+encoder = tf.matmul(input_image, weights['encoder_h1']) + biases['encoder_b1']
+encoder = tf.nn.tanh(encoder)
+z_mean = tf.matmul(encoder, weights['z_mean']) + biases['z_mean']
+z_std = tf.matmul(encoder, weights['z_std']) + biases['z_std']
+
+# Sampler: Normal (gaussian) random distribution
+eps = tf.random_normal(tf.shape(z_std), dtype=tf.float32, mean=0., stddev=1.0,
+                       name='epsilon')
+z = z_mean + tf.exp(z_std / 2) * eps
+
+# Building the decoder (with scope to re-use these layers later)
+decoder = tf.matmul(z, weights['decoder_h1']) + biases['decoder_b1']
+decoder = tf.nn.tanh(decoder)
+decoder = tf.matmul(decoder, weights['decoder_out']) + biases['decoder_out']
+decoder = tf.nn.sigmoid(decoder)
+
+
+# Define VAE Loss
+def vae_loss(x_reconstructed, x_true):
+    # Reconstruction loss
+    encode_decode_loss = x_true * tf.log(1e-10 + x_reconstructed) \
+                         + (1 - x_true) * tf.log(1e-10 + 1 - x_reconstructed)
+    encode_decode_loss = -tf.reduce_sum(encode_decode_loss, 1)
+    # KL Divergence loss
+    kl_div_loss = 1 + z_std - tf.square(z_mean) - tf.exp(z_std)
+    kl_div_loss = -0.5 * tf.reduce_sum(kl_div_loss, 1)
+    return tf.reduce_mean(encode_decode_loss + kl_div_loss)
+
+loss_op = vae_loss(decoder, input_image)
+optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    for i in range(1, num_steps+1):
+        # Prepare Data
+        # Get the next batch of MNIST data (only images are needed, not labels)
+        batch_x, _ = mnist.train.next_batch(batch_size)
+
+        # Train
+        feed_dict = {input_image: batch_x}
+        _, l = sess.run([train_op, loss_op], feed_dict=feed_dict)
+        if i % 1000 == 0 or i == 1:
+            print('Step %i, Loss: %f' % (i, l))
+
+    # Testing
+    # Generator takes noise as input
+    noise_input = tf.placeholder(tf.float32, shape=[None, latent_dim])
+    # Rebuild the decoder to create image from noise
+    decoder = tf.matmul(noise_input, weights['decoder_h1']) + biases['decoder_b1']
+    decoder = tf.nn.tanh(decoder)
+    decoder = tf.matmul(decoder, weights['decoder_out']) + biases['decoder_out']
+    decoder = tf.nn.sigmoid(decoder)
+
+    # Building a manifold of generated digits
+    n = 20
+    x_axis = np.linspace(-3, 3, n)
+    y_axis = np.linspace(-3, 3, n)
+
+    canvas = np.empty((28 * n, 28 * n))
+    for i, yi in enumerate(x_axis):
+        for j, xi in enumerate(y_axis):
+            z_mu = np.array([[xi, yi]] * batch_size)
+            x_mean = sess.run(decoder, feed_dict={noise_input: z_mu})
+            canvas[(n - i - 1) * 28:(n - i) * 28, j * 28:(j + 1) * 28] = \
+            x_mean[0].reshape(28, 28)
+
+    plt.figure(figsize=(8, 10))
+    Xi, Yi = np.meshgrid(x_axis, y_axis)
+    plt.imshow(canvas, origin="upper", cmap="gray")
+    plt.show()

+ 140 - 0
tensorflow_v1/examples/4_Utils/save_restore_model.py

@@ -0,0 +1,140 @@
+'''
+Save and Restore a model using TensorFlow.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
+
+import tensorflow as tf
+
+# Parameters
+learning_rate = 0.001
+batch_size = 100
+display_step = 1
+model_path = "/tmp/model.ckpt"
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of features
+n_hidden_2 = 256 # 2nd layer number of features
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph input
+x = tf.placeholder("float", [None, n_input])
+y = tf.placeholder("float", [None, n_classes])
+
+
+# Create model
+def multilayer_perceptron(x, weights, biases):
+    # Hidden layer with RELU activation
+    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
+    layer_1 = tf.nn.relu(layer_1)
+    # Hidden layer with RELU activation
+    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
+    layer_2 = tf.nn.relu(layer_2)
+    # Output layer with linear activation
+    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
+    return out_layer
+
+# Store layers weight & bias
+weights = {
+    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
+    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
+}
+biases = {
+    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
+    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
+    'out': tf.Variable(tf.random_normal([n_classes]))
+}
+
+# Construct model
+pred = multilayer_perceptron(x, weights, biases)
+
+# Define loss and optimizer
+cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# 'Saver' op to save and restore all the variables
+saver = tf.train.Saver()
+
+# Running first session
+print("Starting 1st session...")
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Training cycle
+    for epoch in range(3):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
+                                                          y: batch_y})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if epoch % display_step == 0:
+            print("Epoch:", '%04d' % (epoch+1), "cost=", \
+                "{:.9f}".format(avg_cost))
+    print("First Optimization Finished!")
+
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
+
+    # Save model weights to disk
+    save_path = saver.save(sess, model_path)
+    print("Model saved in file: %s" % save_path)
+
+# Running a new session
+print("Starting 2nd session...")
+with tf.Session() as sess:
+    # Initialize variables
+    sess.run(init)
+
+    # Restore model weights from previously saved model
+    saver.restore(sess, model_path)
+    print("Model restored from file: %s" % save_path)
+
+    # Resume training
+    for epoch in range(7):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples / batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_x, batch_y = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop) and cost op (to get loss value)
+            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
+                                                          y: batch_y})
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if epoch % display_step == 0:
+            print("Epoch:", '%04d' % (epoch + 1), "cost=", \
+                "{:.9f}".format(avg_cost))
+    print("Second Optimization Finished!")
+
+    # Test model
+    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    # Calculate accuracy
+    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
+    print("Accuracy:", accuracy.eval(
+        {x: mnist.test.images, y: mnist.test.labels}))

+ 143 - 0
tensorflow_v1/examples/4_Utils/tensorboard_advanced.py

@@ -0,0 +1,143 @@
+'''
+Graph and Loss visualization using Tensorboard.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.01
+training_epochs = 25
+batch_size = 100
+display_step = 1
+logs_path = '/tmp/tensorflow_logs/example/'
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of features
+n_hidden_2 = 256 # 2nd layer number of features
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+
+# tf Graph Input
+# mnist data image of shape 28*28=784
+x = tf.placeholder(tf.float32, [None, 784], name='InputData')
+# 0-9 digits recognition => 10 classes
+y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
+
+
+# Create model
+def multilayer_perceptron(x, weights, biases):
+    # Hidden layer with RELU activation
+    layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
+    layer_1 = tf.nn.relu(layer_1)
+    # Create a summary to visualize the first layer ReLU activation
+    tf.summary.histogram("relu1", layer_1)
+    # Hidden layer with RELU activation
+    layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
+    layer_2 = tf.nn.relu(layer_2)
+    # Create another summary to visualize the second layer ReLU activation
+    tf.summary.histogram("relu2", layer_2)
+    # Output layer
+    out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
+    return out_layer
+
+# Store layers weight & bias
+weights = {
+    'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),
+    'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
+    'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')
+}
+biases = {
+    'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
+    'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
+    'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')
+}
+
+# Encapsulating all ops into scopes, making Tensorboard's Graph
+# Visualization more convenient
+with tf.name_scope('Model'):
+    # Build model
+    pred = multilayer_perceptron(x, weights, biases)
+
+with tf.name_scope('Loss'):
+    # Softmax Cross entropy (cost function)
+    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
+
+with tf.name_scope('SGD'):
+    # Gradient Descent
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+    # Op to calculate every variable gradient
+    grads = tf.gradients(loss, tf.trainable_variables())
+    grads = list(zip(grads, tf.trainable_variables()))
+    # Op to update all variables according to their gradient
+    apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
+
+with tf.name_scope('Accuracy'):
+    # Accuracy
+    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    acc = tf.reduce_mean(tf.cast(acc, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Create a summary to monitor cost tensor
+tf.summary.scalar("loss", loss)
+# Create a summary to monitor accuracy tensor
+tf.summary.scalar("accuracy", acc)
+# Create summaries to visualize weights
+for var in tf.trainable_variables():
+    tf.summary.histogram(var.name, var)
+# Summarize all gradients
+for grad, var in grads:
+    tf.summary.histogram(var.name + '/gradient', grad)
+# Merge all summaries into a single op
+merged_summary_op = tf.summary.merge_all()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # op to write logs to Tensorboard
+    summary_writer = tf.summary.FileWriter(logs_path,
+                                            graph=tf.get_default_graph())
+
+    # Training cycle
+    for epoch in range(training_epochs):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop), cost op (to get loss value)
+            # and summary nodes
+            _, c, summary = sess.run([apply_grads, loss, merged_summary_op],
+                                     feed_dict={x: batch_xs, y: batch_ys})
+            # Write logs at every iteration
+            summary_writer.add_summary(summary, epoch * total_batch + i)
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if (epoch+1) % display_step == 0:
+            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
+
+    print("Optimization Finished!")
+
+    # Test model
+    # Calculate accuracy
+    print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
+
+    print("Run the command line:\n" \
+          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
+          "\nThen open http://0.0.0.0:6006/ into your web browser")

+ 97 - 0
tensorflow_v1/examples/4_Utils/tensorboard_basic.py

@@ -0,0 +1,97 @@
+'''
+Graph and Loss visualization using Tensorboard.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.01
+training_epochs = 25
+batch_size = 100
+display_epoch = 1
+logs_path = '/tmp/tensorflow_logs/example/'
+
+# tf Graph Input
+# mnist data image of shape 28*28=784
+x = tf.placeholder(tf.float32, [None, 784], name='InputData')
+# 0-9 digits recognition => 10 classes
+y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
+
+# Set model weights
+W = tf.Variable(tf.zeros([784, 10]), name='Weights')
+b = tf.Variable(tf.zeros([10]), name='Bias')
+
+# Construct model and encapsulating all ops into scopes, making
+# Tensorboard's Graph visualization more convenient
+with tf.name_scope('Model'):
+    # Model
+    pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
+with tf.name_scope('Loss'):
+    # Minimize error using cross entropy
+    cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
+with tf.name_scope('SGD'):
+    # Gradient Descent
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
+with tf.name_scope('Accuracy'):
+    # Accuracy
+    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
+    acc = tf.reduce_mean(tf.cast(acc, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Create a summary to monitor cost tensor
+tf.summary.scalar("loss", cost)
+# Create a summary to monitor accuracy tensor
+tf.summary.scalar("accuracy", acc)
+# Merge all summaries into a single op
+merged_summary_op = tf.summary.merge_all()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # op to write logs to Tensorboard
+    summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
+
+    # Training cycle
+    for epoch in range(training_epochs):
+        avg_cost = 0.
+        total_batch = int(mnist.train.num_examples/batch_size)
+        # Loop over all batches
+        for i in range(total_batch):
+            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
+            # Run optimization op (backprop), cost op (to get loss value)
+            # and summary nodes
+            _, c, summary = sess.run([optimizer, cost, merged_summary_op],
+                                     feed_dict={x: batch_xs, y: batch_ys})
+            # Write logs at every iteration
+            summary_writer.add_summary(summary, epoch * total_batch + i)
+            # Compute average loss
+            avg_cost += c / total_batch
+        # Display logs per epoch step
+        if (epoch+1) % display_epoch == 0:
+            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
+
+    print("Optimization Finished!")
+
+    # Test model
+    # Calculate accuracy
+    print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))
+
+    print("Run the command line:\n" \
+          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
+          "\nThen open http://0.0.0.0:6006/ into your web browser")

+ 212 - 0
tensorflow_v1/examples/5_DataManagement/build_an_image_dataset.py

@@ -0,0 +1,212 @@
+""" Build an Image Dataset in TensorFlow.
+
+For this example, you need to make your own set of images (JPEG).
+We will show 2 different ways to build that dataset:
+
+- From a root folder, that will have a sub-folder containing images for each class
+    ```
+    ROOT_FOLDER
+       |-------- SUBFOLDER (CLASS 0)
+       |             |
+       |             | ----- image1.jpg
+       |             | ----- image2.jpg
+       |             | ----- etc...
+       |             
+       |-------- SUBFOLDER (CLASS 1)
+       |             |
+       |             | ----- image1.jpg
+       |             | ----- image2.jpg
+       |             | ----- etc...
+    ```
+
+- From a plain text file, that will list all images with their class ID:
+    ```
+    /path/to/image/1.jpg CLASS_ID
+    /path/to/image/2.jpg CLASS_ID
+    /path/to/image/3.jpg CLASS_ID
+    /path/to/image/4.jpg CLASS_ID
+    etc...
+    ```
+
+Below, there are some parameters that you need to change (Marked 'CHANGE HERE'), 
+such as the dataset path.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import print_function
+
+import tensorflow as tf
+import os
+
+# Dataset Parameters - CHANGE HERE
+MODE = 'folder' # or 'file', if you choose a plain text file (see above).
+DATASET_PATH = '/path/to/dataset/' # the dataset file or root folder path.
+
+# Image Parameters
+N_CLASSES = 2 # CHANGE HERE, total number of classes
+IMG_HEIGHT = 64 # CHANGE HERE, the image height to be resized to
+IMG_WIDTH = 64 # CHANGE HERE, the image width to be resized to
+CHANNELS = 3 # The 3 color channels, change to 1 if grayscale
+
+
+# Reading the dataset
+# 2 modes: 'file' or 'folder'
+def read_images(dataset_path, mode, batch_size):
+    imagepaths, labels = list(), list()
+    if mode == 'file':
+        # Read dataset file
+        with open(dataset_path) as f:
+            data = f.read().splitlines()
+        for d in data:
+            imagepaths.append(d.split(' ')[0])
+            labels.append(int(d.split(' ')[1]))
+    elif mode == 'folder':
+        # An ID will be affected to each sub-folders by alphabetical order
+        label = 0
+        # List the directory
+        try:  # Python 2
+            classes = sorted(os.walk(dataset_path).next()[1])
+        except Exception:  # Python 3
+            classes = sorted(os.walk(dataset_path).__next__()[1])
+        # List each sub-directory (the classes)
+        for c in classes:
+            c_dir = os.path.join(dataset_path, c)
+            try:  # Python 2
+                walk = os.walk(c_dir).next()
+            except Exception:  # Python 3
+                walk = os.walk(c_dir).__next__()
+            # Add each image to the training set
+            for sample in walk[2]:
+                # Only keeps jpeg images
+                if sample.endswith('.jpg') or sample.endswith('.jpeg'):
+                    imagepaths.append(os.path.join(c_dir, sample))
+                    labels.append(label)
+            label += 1
+    else:
+        raise Exception("Unknown mode.")
+
+    # Convert to Tensor
+    imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
+    labels = tf.convert_to_tensor(labels, dtype=tf.int32)
+    # Build a TF Queue, shuffle data
+    image, label = tf.train.slice_input_producer([imagepaths, labels],
+                                                 shuffle=True)
+
+    # Read images from disk
+    image = tf.read_file(image)
+    image = tf.image.decode_jpeg(image, channels=CHANNELS)
+
+    # Resize images to a common size
+    image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])
+
+    # Normalize
+    image = image * 1.0/127.5 - 1.0
+
+    # Create batches
+    X, Y = tf.train.batch([image, label], batch_size=batch_size,
+                          capacity=batch_size * 8,
+                          num_threads=4)
+
+    return X, Y
+
+# -----------------------------------------------
+# THIS IS A CLASSIC CNN (see examples, section 3)
+# -----------------------------------------------
+# Note that a few elements have changed (usage of queues).
+
+# Parameters
+learning_rate = 0.001
+num_steps = 10000
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+dropout = 0.75 # Dropout, probability to keep units
+
+# Build the data input
+X, Y = read_images(DATASET_PATH, MODE, batch_size)
+
+
+# Create model
+def conv_net(x, n_classes, dropout, reuse, is_training):
+    # Define a scope for reusing the variables
+    with tf.variable_scope('ConvNet', reuse=reuse):
+
+        # Convolution Layer with 32 filters and a kernel size of 5
+        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
+
+        # Convolution Layer with 32 filters and a kernel size of 5
+        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
+
+        # Flatten the data to a 1-D vector for the fully connected layer
+        fc1 = tf.contrib.layers.flatten(conv2)
+
+        # Fully connected layer (in contrib folder for now)
+        fc1 = tf.layers.dense(fc1, 1024)
+        # Apply Dropout (if is_training is False, dropout is not applied)
+        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
+
+        # Output layer, class prediction
+        out = tf.layers.dense(fc1, n_classes)
+        # Because 'softmax_cross_entropy_with_logits' already apply softmax,
+        # we only apply softmax to testing network
+        out = tf.nn.softmax(out) if not is_training else out
+
+    return out
+
+
+# Because Dropout have different behavior at training and prediction time, we
+# need to create 2 distinct computation graphs that share the same weights.
+
+# Create a graph for training
+logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True)
+# Create another graph for testing that reuse the same weights
+logits_test = conv_net(X, N_CLASSES, dropout, reuse=True, is_training=False)
+
+# Define loss and optimizer (with train logits, for dropout to take effect)
+loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+    logits=logits_train, labels=Y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluate model (with test logits, for dropout to be disabled)
+correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Saver object
+saver = tf.train.Saver()
+
+# Start training
+with tf.Session() as sess:
+
+    # Run the initializer
+    sess.run(init)
+
+    # Start the data queue
+    tf.train.start_queue_runners()
+
+    # Training cycle
+    for step in range(1, num_steps+1):
+
+        if step % display_step == 0:
+            # Run optimization and calculate batch loss and accuracy
+            _, loss, acc = sess.run([train_op, loss_op, accuracy])
+            print("Step " + str(step) + ", Minibatch Loss= " + \
+                  "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                  "{:.3f}".format(acc))
+        else:
+            # Only run the optimization op (backprop)
+            sess.run(train_op)
+
+    print("Optimization Finished!")
+
+    # Save your model
+    saver.save(sess, 'my_tf_model')

+ 130 - 0
tensorflow_v1/examples/5_DataManagement/tensorflow_dataset_api.py

@@ -0,0 +1,130 @@
+""" TensorFlow Dataset API.
+
+In this example, we will show how to load numpy array data into the new 
+TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline
+with queues, that make data processing and training faster (especially on GPU).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import print_function
+
+import tensorflow as tf
+
+# Import MNIST data (Numpy format)
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Parameters
+learning_rate = 0.001
+num_steps = 2000
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+n_input = 784 # MNIST data input (img shape: 28*28)
+n_classes = 10 # MNIST total classes (0-9 digits)
+dropout = 0.75 # Dropout, probability to keep units
+
+sess = tf.Session()
+
+# Create a dataset tensor from the images and the labels
+dataset = tf.data.Dataset.from_tensor_slices(
+    (mnist.train.images, mnist.train.labels))
+# Automatically refill the data queue when empty
+dataset = dataset.repeat()
+# Create batches of data
+dataset = dataset.batch(batch_size)
+# Prefetch data for faster consumption
+dataset = dataset.prefetch(batch_size)
+
+# Create an iterator over the dataset
+iterator = dataset.make_initializable_iterator()
+# Initialize the iterator
+sess.run(iterator.initializer)
+
+# Neural Net Input (images, labels)
+X, Y = iterator.get_next()
+
+
+# -----------------------------------------------
+# THIS IS A CLASSIC CNN (see examples, section 3)
+# -----------------------------------------------
+# Note that a few elements have changed (usage of sess run).
+
+# Create model
+def conv_net(x, n_classes, dropout, reuse, is_training):
+    # Define a scope for reusing the variables
+    with tf.variable_scope('ConvNet', reuse=reuse):
+        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
+        # Reshape to match picture format [Height x Width x Channel]
+        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
+        x = tf.reshape(x, shape=[-1, 28, 28, 1])
+
+        # Convolution Layer with 32 filters and a kernel size of 5
+        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
+
+        # Convolution Layer with 32 filters and a kernel size of 5
+        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
+
+        # Flatten the data to a 1-D vector for the fully connected layer
+        fc1 = tf.contrib.layers.flatten(conv2)
+
+        # Fully connected layer (in contrib folder for now)
+        fc1 = tf.layers.dense(fc1, 1024)
+        # Apply Dropout (if is_training is False, dropout is not applied)
+        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
+
+        # Output layer, class prediction
+        out = tf.layers.dense(fc1, n_classes)
+        # Because 'softmax_cross_entropy_with_logits' already apply softmax,
+        # we only apply softmax to testing network
+        out = tf.nn.softmax(out) if not is_training else out
+
+    return out
+
+
+# Because Dropout have different behavior at training and prediction time, we
+# need to create 2 distinct computation graphs that share the same weights.
+
+# Create a graph for training
+logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True)
+# Create another graph for testing that reuse the same weights, but has
+# different behavior for 'dropout' (not applied).
+logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False)
+
+# Define loss and optimizer (with train logits, for dropout to take effect)
+loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+    logits=logits_train, labels=Y))
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+train_op = optimizer.minimize(loss_op)
+
+# Evaluate model (with test logits, for dropout to be disabled)
+correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+# Initialize the variables (i.e. assign their default value)
+init = tf.global_variables_initializer()
+
+# Run the initializer
+sess.run(init)
+
+# Training cycle
+for step in range(1, num_steps + 1):
+
+    # Run optimization
+    sess.run(train_op)
+
+    if step % display_step == 0 or step == 1:
+        # Calculate batch loss and accuracy
+        # (note that this consume a new batch of data)
+        loss, acc = sess.run([loss_op, accuracy])
+        print("Step " + str(step) + ", Minibatch Loss= " + \
+              "{:.4f}".format(loss) + ", Training Accuracy= " + \
+              "{:.3f}".format(acc))
+
+print("Optimization Finished!")

+ 94 - 0
tensorflow_v1/examples/6_MultiGPU/multigpu_basics.py

@@ -0,0 +1,94 @@
+from __future__ import print_function
+'''
+Basic Multi GPU computation example using TensorFlow library.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+'''
+This tutorial requires your machine to have 2 GPUs
+"/cpu:0": The CPU of your machine.
+"/gpu:0": The first GPU of your machine
+"/gpu:1": The second GPU of your machine
+'''
+
+
+
+import numpy as np
+import tensorflow as tf
+import datetime
+
+# Processing Units logs
+log_device_placement = True
+
+# Num of multiplications to perform
+n = 10
+
+'''
+Example: compute A^n + B^n on 2 GPUs
+Results on 8 cores with 2 GTX-980:
+ * Single GPU computation time: 0:00:11.277449
+ * Multi GPU computation time: 0:00:07.131701
+'''
+# Create random large matrix
+A = np.random.rand(10000, 10000).astype('float32')
+B = np.random.rand(10000, 10000).astype('float32')
+
+# Create a graph to store results
+c1 = []
+c2 = []
+
+def matpow(M, n):
+    if n < 1: #Abstract cases where n < 1
+        return M
+    else:
+        return tf.matmul(M, matpow(M, n-1))
+
+'''
+Single GPU computing
+'''
+with tf.device('/gpu:0'):
+    a = tf.placeholder(tf.float32, [10000, 10000])
+    b = tf.placeholder(tf.float32, [10000, 10000])
+    # Compute A^n and B^n and store results in c1
+    c1.append(matpow(a, n))
+    c1.append(matpow(b, n))
+
+with tf.device('/cpu:0'):
+  sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
+
+t1_1 = datetime.datetime.now()
+with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
+    # Run the op.
+    sess.run(sum, {a:A, b:B})
+t2_1 = datetime.datetime.now()
+
+
+'''
+Multi GPU computing
+'''
+# GPU:0 computes A^n
+with tf.device('/gpu:0'):
+    # Compute A^n and store result in c2
+    a = tf.placeholder(tf.float32, [10000, 10000])
+    c2.append(matpow(a, n))
+
+# GPU:1 computes B^n
+with tf.device('/gpu:1'):
+    # Compute B^n and store result in c2
+    b = tf.placeholder(tf.float32, [10000, 10000])
+    c2.append(matpow(b, n))
+
+with tf.device('/cpu:0'):
+  sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n
+
+t1_2 = datetime.datetime.now()
+with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
+    # Run the op.
+    sess.run(sum, {a:A, b:B})
+t2_2 = datetime.datetime.now()
+
+
+print("Single GPU computation time: " + str(t2_1-t1_1))
+print("Multi GPU computation time: " + str(t2_2-t1_2))

+ 198 - 0
tensorflow_v1/examples/6_MultiGPU/multigpu_cnn.py

@@ -0,0 +1,198 @@
+''' Multi-GPU Training Example.
+
+Train a convolutional neural network on multiple GPU with TensorFlow.
+
+This example is using TensorFlow layers, see 'convolutional_network_raw' example
+for a raw TensorFlow implementation with variables.
+
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+
+from __future__ import division, print_function, absolute_import
+
+import numpy as np
+import tensorflow as tf
+import time
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
+
+# Training Parameters
+num_gpus = 2
+num_steps = 200
+learning_rate = 0.001
+batch_size = 1024
+display_step = 10
+
+# Network Parameters
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+dropout = 0.75 # Dropout, probability to keep units
+
+
+# Build a convolutional neural network
+def conv_net(x, n_classes, dropout, reuse, is_training):
+    # Define a scope for reusing the variables
+    with tf.variable_scope('ConvNet', reuse=reuse):
+        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)
+        # Reshape to match picture format [Height x Width x Channel]
+        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
+        x = tf.reshape(x, shape=[-1, 28, 28, 1])
+
+        # Convolution Layer with 64 filters and a kernel size of 5
+        x = tf.layers.conv2d(x, 64, 5, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        x = tf.layers.max_pooling2d(x, 2, 2)
+
+        # Convolution Layer with 256 filters and a kernel size of 5
+        x = tf.layers.conv2d(x, 256, 3, activation=tf.nn.relu)
+        # Convolution Layer with 512 filters and a kernel size of 5
+        x = tf.layers.conv2d(x, 512, 3, activation=tf.nn.relu)
+        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
+        x = tf.layers.max_pooling2d(x, 2, 2)
+
+        # Flatten the data to a 1-D vector for the fully connected layer
+        x = tf.contrib.layers.flatten(x)
+
+        # Fully connected layer (in contrib folder for now)
+        x = tf.layers.dense(x, 2048)
+        # Apply Dropout (if is_training is False, dropout is not applied)
+        x = tf.layers.dropout(x, rate=dropout, training=is_training)
+
+        # Fully connected layer (in contrib folder for now)
+        x = tf.layers.dense(x, 1024)
+        # Apply Dropout (if is_training is False, dropout is not applied)
+        x = tf.layers.dropout(x, rate=dropout, training=is_training)
+
+        # Output layer, class prediction
+        out = tf.layers.dense(x, n_classes)
+        # Because 'softmax_cross_entropy_with_logits' loss already apply
+        # softmax, we only apply softmax to testing network
+        out = tf.nn.softmax(out) if not is_training else out
+
+    return out
+
+
+def average_gradients(tower_grads):
+    average_grads = []
+    for grad_and_vars in zip(*tower_grads):
+        # Note that each grad_and_vars looks like the following:
+        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
+        grads = []
+        for g, _ in grad_and_vars:
+            # Add 0 dimension to the gradients to represent the tower.
+            expanded_g = tf.expand_dims(g, 0)
+
+            # Append on a 'tower' dimension which we will average over below.
+            grads.append(expanded_g)
+
+        # Average over the 'tower' dimension.
+        grad = tf.concat(grads, 0)
+        grad = tf.reduce_mean(grad, 0)
+
+        # Keep in mind that the Variables are redundant because they are shared
+        # across towers. So .. we will just return the first tower's pointer to
+        # the Variable.
+        v = grad_and_vars[0][1]
+        grad_and_var = (grad, v)
+        average_grads.append(grad_and_var)
+    return average_grads
+
+
+# By default, all variables will be placed on '/gpu:0'
+# So we need a custom device function, to assign all variables to '/cpu:0'
+# Note: If GPUs are peered, '/gpu:0' can be a faster option
+PS_OPS = ['Variable', 'VariableV2', 'AutoReloadVariable']
+
+def assign_to_device(device, ps_device='/cpu:0'):
+    def _assign(op):
+        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
+        if node_def.op in PS_OPS:
+            return "/" + ps_device
+        else:
+            return device
+
+    return _assign
+
+
+# Place all ops on CPU by default
+with tf.device('/cpu:0'):
+    tower_grads = []
+    reuse_vars = False
+
+    # tf Graph input
+    X = tf.placeholder(tf.float32, [None, num_input])
+    Y = tf.placeholder(tf.float32, [None, num_classes])
+
+    # Loop over all GPUs and construct their own computation graph
+    for i in range(num_gpus):
+        with tf.device(assign_to_device('/gpu:{}'.format(i), ps_device='/cpu:0')):
+
+            # Split data between GPUs
+            _x = X[i * batch_size: (i+1) * batch_size]
+            _y = Y[i * batch_size: (i+1) * batch_size]
+
+            # Because Dropout have different behavior at training and prediction time, we
+            # need to create 2 distinct computation graphs that share the same weights.
+
+            # Create a graph for training
+            logits_train = conv_net(_x, num_classes, dropout,
+                                    reuse=reuse_vars, is_training=True)
+            # Create another graph for testing that reuse the same weights
+            logits_test = conv_net(_x, num_classes, dropout,
+                                   reuse=True, is_training=False)
+
+            # Define loss and optimizer (with train logits, for dropout to take effect)
+            loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
+                logits=logits_train, labels=_y))
+            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+            grads = optimizer.compute_gradients(loss_op)
+
+            # Only first GPU compute accuracy
+            if i == 0:
+                # Evaluate model (with test logits, for dropout to be disabled)
+                correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(_y, 1))
+                accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+            reuse_vars = True
+            tower_grads.append(grads)
+
+    tower_grads = average_gradients(tower_grads)
+    train_op = optimizer.apply_gradients(tower_grads)
+
+    # Initialize the variables (i.e. assign their default value)
+    init = tf.global_variables_initializer()
+
+    # Start Training
+    with tf.Session() as sess:
+
+        # Run the initializer
+        sess.run(init)
+
+        # Keep training until reach max iterations
+        for step in range(1, num_steps + 1):
+            # Get a batch for each GPU
+            batch_x, batch_y = mnist.train.next_batch(batch_size * num_gpus)
+            # Run optimization op (backprop)
+            ts = time.time()
+            sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
+            te = time.time() - ts
+            if step % display_step == 0 or step == 1:
+                # Calculate batch loss and accuracy
+                loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
+                                                                     Y: batch_y})
+                print("Step " + str(step) + ": Minibatch Loss= " + \
+                      "{:.4f}".format(loss) + ", Training Accuracy= " + \
+                      "{:.3f}".format(acc) + ", %i Examples/sec" % int(len(batch_x)/te))
+            step += 1
+        print("Optimization Finished!")
+
+        # Calculate accuracy for MNIST test images
+        print("Testing Accuracy:", \
+            np.mean([sess.run(accuracy, feed_dict={X: mnist.test.images[i:i+batch_size],
+            Y: mnist.test.labels[i:i+batch_size]}) for i in range(0, len(mnist.test.images), batch_size)]))

+ 48 - 0
tensorflow_v1/notebooks/0_Prerequisite/ml_introduction.ipynb

@@ -0,0 +1,48 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Machine Learning\n",
+    "\n",
+    "Prior to start browsing the examples, it may be useful that you get familiar with machine learning, as TensorFlow is mostly used for machine learning tasks (especially Neural Networks). You can find below a list of useful links, that can give you the basic knowledge required for this TensorFlow Tutorial.\n",
+    "\n",
+    "## Machine Learning\n",
+    "\n",
+    "- [An Introduction to Machine Learning Theory and Its Applications: A Visual Tutorial with Examples](https://www.toptal.com/machine-learning/machine-learning-theory-an-introductory-primer)\n",
+    "- [A Gentle Guide to Machine Learning](https://blog.monkeylearn.com/a-gentle-guide-to-machine-learning/)\n",
+    "- [A Visual Introduction to Machine Learning](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/)\n",
+    "- [Introduction to Machine Learning](http://alex.smola.org/drafts/thebook.pdf)\n",
+    "\n",
+    "## Deep Learning & Neural Networks\n",
+    "\n",
+    "- [An Introduction to Neural Networks](http://www.cs.stir.ac.uk/~lss/NNIntro/InvSlides.html)\n",
+    "- [An Introduction to Image Recognition with Deep Learning](https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721)\n",
+    "- [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/index.html)\n",
+    "\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "IPython (Python 2.7)",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 94 - 0
tensorflow_v1/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb

@@ -0,0 +1,94 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "# MNIST Dataset Introduction\n",
+    "\n",
+    "Most examples are using MNIST dataset of handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "## Overview\n",
+    "\n",
+    "![MNIST Digits](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "## Usage\n",
+    "In our examples, we are using TensorFlow [input_data.py](https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/mnist/input_data.py) script to load that dataset.\n",
+    "It is quite useful for managing our data, and handle:\n",
+    "\n",
+    "- Dataset downloading\n",
+    "\n",
+    "- Loading the entire dataset into numpy array: \n",
+    "\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Import MNIST\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "\n",
+    "# Load data\n",
+    "X_train = mnist.train.images\n",
+    "Y_train = mnist.train.labels\n",
+    "X_test = mnist.test.images\n",
+    "Y_test = mnist.test.labels"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "- A `next_batch` function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Get the next 64 images array and labels\n",
+    "batch_X, batch_Y = mnist.train.next_batch(64)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Link: http://yann.lecun.com/exdb/mnist/"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 238 - 0
tensorflow_v1/notebooks/1_Introduction/basic_eager_api.ipynb

@@ -0,0 +1,238 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Basic introduction to TensorFlow's Eager API\n",
+    "\n",
+    "A simple introduction to get started with TensorFlow's Eager API.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### What is TensorFlow's Eager API ?\n",
+    "\n",
+    "*Eager execution is an imperative, define-by-run interface where operations are\n",
+    "executed immediately as they are called from Python. This makes it easier to\n",
+    "get started with TensorFlow, and can make research and development more\n",
+    "intuitive. A vast majority of the TensorFlow API remains the same whether eager\n",
+    "execution is enabled or not. As a result, the exact same code that constructs\n",
+    "TensorFlow graphs (e.g. using the layers API) can be executed imperatively\n",
+    "by using eager execution. Conversely, most models written with Eager enabled\n",
+    "can be converted to a graph that can be further optimized and/or extracted\n",
+    "for deployment in production without changing code. - Rajat Monga*\n",
+    "\n",
+    "More info: https://research.googleblog.com/2017/10/eager-execution-imperative-define-by.html"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import absolute_import, division, print_function\n",
+    "\n",
+    "import numpy as np\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Setting Eager mode...\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Set Eager API\n",
+    "print(\"Setting Eager mode...\")\n",
+    "tf.enable_eager_execution()\n",
+    "tfe = tf.contrib.eager"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Define constant tensors\n",
+      "a = 2\n",
+      "b = 3\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Define constant tensors\n",
+    "print(\"Define constant tensors\")\n",
+    "a = tf.constant(2)\n",
+    "print(\"a = %i\" % a)\n",
+    "b = tf.constant(3)\n",
+    "print(\"b = %i\" % b)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Running operations, without tf.Session\n",
+      "a + b = 5\n",
+      "a * b = 6\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Run the operation without the need for tf.Session\n",
+    "print(\"Running operations, without tf.Session\")\n",
+    "c = a + b\n",
+    "print(\"a + b = %i\" % c)\n",
+    "d = a * b\n",
+    "print(\"a * b = %i\" % d)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Mixing operations with Tensors and Numpy Arrays\n",
+      "Tensor:\n",
+      " a = tf.Tensor(\n",
+      "[[2. 1.]\n",
+      " [1. 0.]], shape=(2, 2), dtype=float32)\n",
+      "NumpyArray:\n",
+      " b = [[3. 0.]\n",
+      " [5. 1.]]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Full compatibility with Numpy\n",
+    "print(\"Mixing operations with Tensors and Numpy Arrays\")\n",
+    "\n",
+    "# Define constant tensors\n",
+    "a = tf.constant([[2., 1.],\n",
+    "                 [1., 0.]], dtype=tf.float32)\n",
+    "print(\"Tensor:\\n a = %s\" % a)\n",
+    "b = np.array([[3., 0.],\n",
+    "              [5., 1.]], dtype=np.float32)\n",
+    "print(\"NumpyArray:\\n b = %s\" % b)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Running operations, without tf.Session\n",
+      "a + b = tf.Tensor(\n",
+      "[[5. 1.]\n",
+      " [6. 1.]], shape=(2, 2), dtype=float32)\n",
+      "a * b = tf.Tensor(\n",
+      "[[11.  1.]\n",
+      " [ 3.  0.]], shape=(2, 2), dtype=float32)\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Run the operation without the need for tf.Session\n",
+    "print(\"Running operations, without tf.Session\")\n",
+    "\n",
+    "c = a + b\n",
+    "print(\"a + b = %s\" % c)\n",
+    "\n",
+    "d = tf.matmul(a, b)\n",
+    "print(\"a * b = %s\" % d)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Iterate through Tensor 'a':\n",
+      "tf.Tensor(2.0, shape=(), dtype=float32)\n",
+      "tf.Tensor(1.0, shape=(), dtype=float32)\n",
+      "tf.Tensor(1.0, shape=(), dtype=float32)\n",
+      "tf.Tensor(0.0, shape=(), dtype=float32)\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"Iterate through Tensor 'a':\")\n",
+    "for i in range(a.shape[0]):\n",
+    "    for j in range(a.shape[1]):\n",
+    "        print(a[i][j])"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 220 - 0
tensorflow_v1/notebooks/1_Introduction/basic_operations.ipynb

@@ -0,0 +1,220 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Basic Operations example using TensorFlow library.\n",
+    "# Author: Aymeric Damien\n",
+    "# Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Basic constant operations\n",
+    "# The value returned by the constructor represents the output\n",
+    "# of the Constant op.\n",
+    "a = tf.constant(2)\n",
+    "b = tf.constant(3)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "a=2, b=3\n",
+      "Addition with constants: 5\n",
+      "Multiplication with constants: 6\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the default graph.\n",
+    "with tf.Session() as sess:\n",
+    "    print \"a: %i\" % sess.run(a), \"b: %i\" % sess.run(b)\n",
+    "    print \"Addition with constants: %i\" % sess.run(a+b)\n",
+    "    print \"Multiplication with constants: %i\" % sess.run(a*b)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Basic Operations with variable as graph input\n",
+    "# The value returned by the constructor represents the output\n",
+    "# of the Variable op. (define as input when running session)\n",
+    "# tf Graph input\n",
+    "a = tf.placeholder(tf.int16)\n",
+    "b = tf.placeholder(tf.int16)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Define some operations\n",
+    "add = tf.add(a, b)\n",
+    "mul = tf.multiply(a, b)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Addition with variables: 5\n",
+      "Multiplication with variables: 6\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Launch the default graph.\n",
+    "with tf.Session() as sess:\n",
+    "    # Run every operation with variable input\n",
+    "    print \"Addition with variables: %i\" % sess.run(add, feed_dict={a: 2, b: 3})\n",
+    "    print \"Multiplication with variables: %i\" % sess.run(mul, feed_dict={a: 2, b: 3})"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# ----------------\n",
+    "# More in details:\n",
+    "# Matrix Multiplication from TensorFlow official tutorial\n",
+    "\n",
+    "# Create a Constant op that produces a 1x2 matrix.  The op is\n",
+    "# added as a node to the default graph.\n",
+    "#\n",
+    "# The value returned by the constructor represents the output\n",
+    "# of the Constant op.\n",
+    "matrix1 = tf.constant([[3., 3.]])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create another Constant that produces a 2x1 matrix.\n",
+    "matrix2 = tf.constant([[2.],[2.]])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n",
+    "# The returned value, 'product', represents the result of the matrix\n",
+    "# multiplication.\n",
+    "product = tf.matmul(matrix1, matrix2)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[[ 12.]]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# To run the matmul op we call the session 'run()' method, passing 'product'\n",
+    "# which represents the output of the matmul op.  This indicates to the call\n",
+    "# that we want to get the output of the matmul op back.\n",
+    "#\n",
+    "# All inputs needed by the op are run automatically by the session.  They\n",
+    "# typically are run in parallel.\n",
+    "#\n",
+    "# The call 'run(product)' thus causes the execution of threes ops in the\n",
+    "# graph: the two constants and matmul.\n",
+    "#\n",
+    "# The output of the op is returned in 'result' as a numpy `ndarray` object.\n",
+    "with tf.Session() as sess:\n",
+    "    result = sess.run(product)\n",
+    "    print result"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "IPython (Python 2.7)",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.8"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 87 - 0
tensorflow_v1/notebooks/1_Introduction/helloworld.ipynb

@@ -0,0 +1,87 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Simple hello world using TensorFlow\n",
+    "\n",
+    "# Create a Constant op\n",
+    "# The op is added as a node to the default graph.\n",
+    "#\n",
+    "# The value returned by the constructor represents the output\n",
+    "# of the Constant op.\n",
+    "\n",
+    "hello = tf.constant('Hello, TensorFlow!')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Start tf session\n",
+    "sess = tf.Session()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Hello, TensorFlow!\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Run graph\n",
+    "print(sess.run(hello))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "IPython (Python 2.7)",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2.0
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.8"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Plik diff jest za duży
+ 266 - 0
tensorflow_v1/notebooks/2_BasicModels/gradient_boosted_decision_tree.ipynb


+ 226 - 0
tensorflow_v1/notebooks/2_BasicModels/kmeans.ipynb

@@ -0,0 +1,226 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# K-Means Example\n",
+    "\n",
+    "Implement K-Means algorithm with TensorFlow, and apply it to classify\n",
+    "handwritten digit images. This example is using the MNIST database of\n",
+    "handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/).\n",
+    "\n",
+    "Note: This example requires TensorFlow v1.1.0 or over.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "from tensorflow.contrib.factorization import KMeans\n",
+    "\n",
+    "# Ignore all GPUs, tf random forest does not benefit from it.\n",
+    "import os\n",
+    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "full_data_x = mnist.train.images"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "num_steps = 50 # Total steps to train\n",
+    "batch_size = 1024 # The number of samples per batch\n",
+    "k = 25 # The number of clusters\n",
+    "num_classes = 10 # The 10 digits\n",
+    "num_features = 784 # Each image is 28x28 pixels\n",
+    "\n",
+    "# Input images\n",
+    "X = tf.placeholder(tf.float32, shape=[None, num_features])\n",
+    "# Labels (for assigning a label to a centroid and testing)\n",
+    "Y = tf.placeholder(tf.float32, shape=[None, num_classes])\n",
+    "\n",
+    "# K-Means Parameters\n",
+    "kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine',\n",
+    "                use_mini_batch=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Build KMeans graph\n",
+    "(all_scores, cluster_idx, scores, cluster_centers_initialized, \n",
+    " cluster_centers_vars,init_op,train_op) = kmeans.training_graph()\n",
+    "cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple\n",
+    "avg_distance = tf.reduce_mean(scores)\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init_vars = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Avg Distance: 0.341471\n",
+      "Step 10, Avg Distance: 0.221609\n",
+      "Step 20, Avg Distance: 0.220328\n",
+      "Step 30, Avg Distance: 0.219776\n",
+      "Step 40, Avg Distance: 0.219419\n",
+      "Step 50, Avg Distance: 0.219154\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start TensorFlow session\n",
+    "sess = tf.Session()\n",
+    "\n",
+    "# Run the initializer\n",
+    "sess.run(init_vars, feed_dict={X: full_data_x})\n",
+    "sess.run(init_op, feed_dict={X: full_data_x})\n",
+    "\n",
+    "# Training\n",
+    "for i in range(1, num_steps + 1):\n",
+    "    _, d, idx = sess.run([train_op, avg_distance, cluster_idx],\n",
+    "                         feed_dict={X: full_data_x})\n",
+    "    if i % 10 == 0 or i == 1:\n",
+    "        print(\"Step %i, Avg Distance: %f\" % (i, d))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test Accuracy: 0.7127\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Assign a label to each centroid\n",
+    "# Count total number of labels per centroid, using the label of each training\n",
+    "# sample to their closest centroid (given by 'idx')\n",
+    "counts = np.zeros(shape=(k, num_classes))\n",
+    "for i in range(len(idx)):\n",
+    "    counts[idx[i]] += mnist.train.labels[i]\n",
+    "# Assign the most frequent label to the centroid\n",
+    "labels_map = [np.argmax(c) for c in counts]\n",
+    "labels_map = tf.convert_to_tensor(labels_map)\n",
+    "\n",
+    "# Evaluation ops\n",
+    "# Lookup: centroid_id -> label\n",
+    "cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)\n",
+    "# Compute accuracy\n",
+    "correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32))\n",
+    "accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
+    "\n",
+    "# Test Model\n",
+    "test_x, test_y = mnist.test.images, mnist.test.labels\n",
+    "print(\"Test Accuracy:\", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  },
+  "varInspector": {
+   "cols": {
+    "lenName": 16,
+    "lenType": 16,
+    "lenVar": 40
+   },
+   "kernels_config": {
+    "python": {
+     "delete_cmd_postfix": "",
+     "delete_cmd_prefix": "del ",
+     "library": "var_list.py",
+     "varRefreshCmd": "print(var_dic_list())"
+    },
+    "r": {
+     "delete_cmd_postfix": ") ",
+     "delete_cmd_prefix": "rm(",
+     "library": "var_list.r",
+     "varRefreshCmd": "cat(var_dic_list()) "
+    }
+   },
+   "types_to_exclude": [
+    "module",
+    "function",
+    "builtin_function_or_method",
+    "instance",
+    "_Feature"
+   ],
+   "window_display": false
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

Plik diff jest za duży
+ 236 - 0
tensorflow_v1/notebooks/2_BasicModels/linear_regression.ipynb


Plik diff jest za duży
+ 181 - 0
tensorflow_v1/notebooks/2_BasicModels/linear_regression_eager_api.ipynb


+ 174 - 0
tensorflow_v1/notebooks/2_BasicModels/logistic_regression.ipynb

@@ -0,0 +1,174 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Logistic Regression Example\n",
+    "\n",
+    "A logistic regression learning algorithm example using TensorFlow library.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 25\n",
+    "batch_size = 100\n",
+    "display_step = 1\n",
+    "\n",
+    "# tf Graph Input\n",
+    "x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784\n",
+    "y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes\n",
+    "\n",
+    "# Set model weights\n",
+    "W = tf.Variable(tf.zeros([784, 10]))\n",
+    "b = tf.Variable(tf.zeros([10]))\n",
+    "\n",
+    "# Construct model\n",
+    "pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n",
+    "\n",
+    "# Minimize error using cross entropy\n",
+    "cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n",
+    "# Gradient Descent\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0001 cost= 1.182138959\n",
+      "Epoch: 0002 cost= 0.664778162\n",
+      "Epoch: 0003 cost= 0.552686284\n",
+      "Epoch: 0004 cost= 0.498628905\n",
+      "Epoch: 0005 cost= 0.465469866\n",
+      "Epoch: 0006 cost= 0.442537872\n",
+      "Epoch: 0007 cost= 0.425462044\n",
+      "Epoch: 0008 cost= 0.412185303\n",
+      "Epoch: 0009 cost= 0.401311587\n",
+      "Epoch: 0010 cost= 0.392326203\n",
+      "Epoch: 0011 cost= 0.384736038\n",
+      "Epoch: 0012 cost= 0.378137191\n",
+      "Epoch: 0013 cost= 0.372363752\n",
+      "Epoch: 0014 cost= 0.367308579\n",
+      "Epoch: 0015 cost= 0.362704660\n",
+      "Epoch: 0016 cost= 0.358588599\n",
+      "Epoch: 0017 cost= 0.354823110\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(training_epochs):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
+    "            # Fit training using batch data\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n",
+    "                                                          y: batch_ys})\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if (epoch+1) % display_step == 0:\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n",
+    "\n",
+    "    print \"Optimization Finished!\"\n",
+    "\n",
+    "    # Test model\n",
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy for 3000 examples\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
+    "    print \"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]})"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 258 - 0
tensorflow_v1/notebooks/2_BasicModels/logistic_regression_eager_api.ipynb

@@ -0,0 +1,258 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Logistic Regression with Eager API\n",
+    "\n",
+    "A logistic regression implemented using TensorFlow's Eager API.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import absolute_import, division, print_function\n",
+    "\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Set Eager API\n",
+    "tf.enable_eager_execution()\n",
+    "tfe = tf.contrib.eager"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.1\n",
+    "batch_size = 128\n",
+    "num_steps = 1000\n",
+    "display_step = 100"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Iterator for the dataset\n",
+    "dataset = tf.data.Dataset.from_tensor_slices(\n",
+    "    (mnist.train.images, mnist.train.labels))\n",
+    "dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n",
+    "dataset_iter = tfe.Iterator(dataset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Variables\n",
+    "W = tfe.Variable(tf.zeros([784, 10]), name='weights')\n",
+    "b = tfe.Variable(tf.zeros([10]), name='bias')\n",
+    "\n",
+    "# Logistic regression (Wx + b)\n",
+    "def logistic_regression(inputs):\n",
+    "    return tf.matmul(inputs, W) + b\n",
+    "\n",
+    "# Cross-Entropy loss function\n",
+    "def loss_fn(inference_fn, inputs, labels):\n",
+    "    # Using sparse_softmax cross entropy\n",
+    "    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
+    "        logits=inference_fn(inputs), labels=labels))\n",
+    "\n",
+    "# Calculate accuracy\n",
+    "def accuracy_fn(inference_fn, inputs, labels):\n",
+    "    prediction = tf.nn.softmax(inference_fn(inputs))\n",
+    "    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)\n",
+    "    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# SGD Optimizer\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
+    "\n",
+    "# Compute gradients\n",
+    "grad = tfe.implicit_gradients(loss_fn)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Initial loss= 2.302584887\n",
+      "Step: 0001  loss= 2.302584887  accuracy= 0.1172\n",
+      "Step: 0100  loss= 0.952338457  accuracy= 0.7955\n",
+      "Step: 0200  loss= 0.535867393  accuracy= 0.8712\n",
+      "Step: 0300  loss= 0.485415280  accuracy= 0.8757\n",
+      "Step: 0400  loss= 0.433947206  accuracy= 0.8843\n",
+      "Step: 0500  loss= 0.381990731  accuracy= 0.8971\n",
+      "Step: 0600  loss= 0.394154936  accuracy= 0.8947\n",
+      "Step: 0700  loss= 0.391497582  accuracy= 0.8905\n",
+      "Step: 0800  loss= 0.386373103  accuracy= 0.8945\n",
+      "Step: 0900  loss= 0.332039326  accuracy= 0.9096\n",
+      "Step: 1000  loss= 0.358993769  accuracy= 0.9002\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Training\n",
+    "average_loss = 0.\n",
+    "average_acc = 0.\n",
+    "for step in range(num_steps):\n",
+    "\n",
+    "    # Iterate through the dataset\n",
+    "    d = dataset_iter.next()\n",
+    "\n",
+    "    # Images\n",
+    "    x_batch = d[0]\n",
+    "    # Labels\n",
+    "    y_batch = tf.cast(d[1], dtype=tf.int64)\n",
+    "\n",
+    "    # Compute the batch loss\n",
+    "    batch_loss = loss_fn(logistic_regression, x_batch, y_batch)\n",
+    "    average_loss += batch_loss\n",
+    "    # Compute the batch accuracy\n",
+    "    batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)\n",
+    "    average_acc += batch_accuracy\n",
+    "\n",
+    "    if step == 0:\n",
+    "        # Display the initial cost, before optimizing\n",
+    "        print(\"Initial loss= {:.9f}\".format(average_loss))\n",
+    "\n",
+    "    # Update the variables following gradients info\n",
+    "    optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))\n",
+    "\n",
+    "    # Display info\n",
+    "    if (step + 1) % display_step == 0 or step == 0:\n",
+    "        if step > 0:\n",
+    "            average_loss /= display_step\n",
+    "            average_acc /= display_step\n",
+    "        print(\"Step:\", '%04d' % (step + 1), \" loss=\",\n",
+    "              \"{:.9f}\".format(average_loss), \" accuracy=\",\n",
+    "              \"{:.4f}\".format(average_acc))\n",
+    "        average_loss = 0.\n",
+    "        average_acc = 0."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Testset Accuracy: 0.9083\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Evaluate model on the test image set\n",
+    "testX = mnist.test.images\n",
+    "testY = mnist.test.labels\n",
+    "\n",
+    "test_acc = accuracy_fn(logistic_regression, testX, testY)\n",
+    "print(\"Testset Accuracy: {:.4f}\".format(test_acc))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 332 - 0
tensorflow_v1/notebooks/2_BasicModels/nearest_neighbor.ipynb

@@ -0,0 +1,332 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Nearest Neighbor Example\n",
+    "\n",
+    "A nearest neighbor learning algorithm example using TensorFlow library.\n",
+    "This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# In this example, we limit mnist data\n",
+    "Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)\n",
+    "Xte, Yte = mnist.test.next_batch(200) #200 for testing\n",
+    "\n",
+    "# tf Graph Input\n",
+    "xtr = tf.placeholder(\"float\", [None, 784])\n",
+    "xte = tf.placeholder(\"float\", [784])\n",
+    "\n",
+    "# Nearest Neighbor calculation using L1 Distance\n",
+    "# Calculate L1 Distance\n",
+    "distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)\n",
+    "# Prediction: Get min distance index (Nearest neighbor)\n",
+    "pred = tf.argmin(distance, 0)\n",
+    "\n",
+    "accuracy = 0.\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Test 0 Prediction: 7 True Class: 7\n",
+      "Test 1 Prediction: 2 True Class: 2\n",
+      "Test 2 Prediction: 1 True Class: 1\n",
+      "Test 3 Prediction: 0 True Class: 0\n",
+      "Test 4 Prediction: 4 True Class: 4\n",
+      "Test 5 Prediction: 1 True Class: 1\n",
+      "Test 6 Prediction: 4 True Class: 4\n",
+      "Test 7 Prediction: 9 True Class: 9\n",
+      "Test 8 Prediction: 8 True Class: 5\n",
+      "Test 9 Prediction: 9 True Class: 9\n",
+      "Test 10 Prediction: 0 True Class: 0\n",
+      "Test 11 Prediction: 0 True Class: 6\n",
+      "Test 12 Prediction: 9 True Class: 9\n",
+      "Test 13 Prediction: 0 True Class: 0\n",
+      "Test 14 Prediction: 1 True Class: 1\n",
+      "Test 15 Prediction: 5 True Class: 5\n",
+      "Test 16 Prediction: 4 True Class: 9\n",
+      "Test 17 Prediction: 7 True Class: 7\n",
+      "Test 18 Prediction: 3 True Class: 3\n",
+      "Test 19 Prediction: 4 True Class: 4\n",
+      "Test 20 Prediction: 9 True Class: 9\n",
+      "Test 21 Prediction: 6 True Class: 6\n",
+      "Test 22 Prediction: 6 True Class: 6\n",
+      "Test 23 Prediction: 5 True Class: 5\n",
+      "Test 24 Prediction: 4 True Class: 4\n",
+      "Test 25 Prediction: 0 True Class: 0\n",
+      "Test 26 Prediction: 7 True Class: 7\n",
+      "Test 27 Prediction: 4 True Class: 4\n",
+      "Test 28 Prediction: 0 True Class: 0\n",
+      "Test 29 Prediction: 1 True Class: 1\n",
+      "Test 30 Prediction: 3 True Class: 3\n",
+      "Test 31 Prediction: 1 True Class: 1\n",
+      "Test 32 Prediction: 3 True Class: 3\n",
+      "Test 33 Prediction: 4 True Class: 4\n",
+      "Test 34 Prediction: 7 True Class: 7\n",
+      "Test 35 Prediction: 2 True Class: 2\n",
+      "Test 36 Prediction: 7 True Class: 7\n",
+      "Test 37 Prediction: 1 True Class: 1\n",
+      "Test 38 Prediction: 2 True Class: 2\n",
+      "Test 39 Prediction: 1 True Class: 1\n",
+      "Test 40 Prediction: 1 True Class: 1\n",
+      "Test 41 Prediction: 7 True Class: 7\n",
+      "Test 42 Prediction: 4 True Class: 4\n",
+      "Test 43 Prediction: 1 True Class: 2\n",
+      "Test 44 Prediction: 3 True Class: 3\n",
+      "Test 45 Prediction: 5 True Class: 5\n",
+      "Test 46 Prediction: 1 True Class: 1\n",
+      "Test 47 Prediction: 2 True Class: 2\n",
+      "Test 48 Prediction: 4 True Class: 4\n",
+      "Test 49 Prediction: 4 True Class: 4\n",
+      "Test 50 Prediction: 6 True Class: 6\n",
+      "Test 51 Prediction: 3 True Class: 3\n",
+      "Test 52 Prediction: 5 True Class: 5\n",
+      "Test 53 Prediction: 5 True Class: 5\n",
+      "Test 54 Prediction: 6 True Class: 6\n",
+      "Test 55 Prediction: 0 True Class: 0\n",
+      "Test 56 Prediction: 4 True Class: 4\n",
+      "Test 57 Prediction: 1 True Class: 1\n",
+      "Test 58 Prediction: 9 True Class: 9\n",
+      "Test 59 Prediction: 5 True Class: 5\n",
+      "Test 60 Prediction: 7 True Class: 7\n",
+      "Test 61 Prediction: 8 True Class: 8\n",
+      "Test 62 Prediction: 9 True Class: 9\n",
+      "Test 63 Prediction: 3 True Class: 3\n",
+      "Test 64 Prediction: 7 True Class: 7\n",
+      "Test 65 Prediction: 4 True Class: 4\n",
+      "Test 66 Prediction: 6 True Class: 6\n",
+      "Test 67 Prediction: 4 True Class: 4\n",
+      "Test 68 Prediction: 3 True Class: 3\n",
+      "Test 69 Prediction: 0 True Class: 0\n",
+      "Test 70 Prediction: 7 True Class: 7\n",
+      "Test 71 Prediction: 0 True Class: 0\n",
+      "Test 72 Prediction: 2 True Class: 2\n",
+      "Test 73 Prediction: 7 True Class: 9\n",
+      "Test 74 Prediction: 1 True Class: 1\n",
+      "Test 75 Prediction: 7 True Class: 7\n",
+      "Test 76 Prediction: 3 True Class: 3\n",
+      "Test 77 Prediction: 7 True Class: 2\n",
+      "Test 78 Prediction: 9 True Class: 9\n",
+      "Test 79 Prediction: 7 True Class: 7\n",
+      "Test 80 Prediction: 7 True Class: 7\n",
+      "Test 81 Prediction: 6 True Class: 6\n",
+      "Test 82 Prediction: 2 True Class: 2\n",
+      "Test 83 Prediction: 7 True Class: 7\n",
+      "Test 84 Prediction: 8 True Class: 8\n",
+      "Test 85 Prediction: 4 True Class: 4\n",
+      "Test 86 Prediction: 7 True Class: 7\n",
+      "Test 87 Prediction: 3 True Class: 3\n",
+      "Test 88 Prediction: 6 True Class: 6\n",
+      "Test 89 Prediction: 1 True Class: 1\n",
+      "Test 90 Prediction: 3 True Class: 3\n",
+      "Test 91 Prediction: 6 True Class: 6\n",
+      "Test 92 Prediction: 9 True Class: 9\n",
+      "Test 93 Prediction: 3 True Class: 3\n",
+      "Test 94 Prediction: 1 True Class: 1\n",
+      "Test 95 Prediction: 4 True Class: 4\n",
+      "Test 96 Prediction: 1 True Class: 1\n",
+      "Test 97 Prediction: 7 True Class: 7\n",
+      "Test 98 Prediction: 6 True Class: 6\n",
+      "Test 99 Prediction: 9 True Class: 9\n",
+      "Test 100 Prediction: 6 True Class: 6\n",
+      "Test 101 Prediction: 0 True Class: 0\n",
+      "Test 102 Prediction: 5 True Class: 5\n",
+      "Test 103 Prediction: 4 True Class: 4\n",
+      "Test 104 Prediction: 9 True Class: 9\n",
+      "Test 105 Prediction: 9 True Class: 9\n",
+      "Test 106 Prediction: 2 True Class: 2\n",
+      "Test 107 Prediction: 1 True Class: 1\n",
+      "Test 108 Prediction: 9 True Class: 9\n",
+      "Test 109 Prediction: 4 True Class: 4\n",
+      "Test 110 Prediction: 8 True Class: 8\n",
+      "Test 111 Prediction: 7 True Class: 7\n",
+      "Test 112 Prediction: 3 True Class: 3\n",
+      "Test 113 Prediction: 9 True Class: 9\n",
+      "Test 114 Prediction: 7 True Class: 7\n",
+      "Test 115 Prediction: 9 True Class: 4\n",
+      "Test 116 Prediction: 9 True Class: 4\n",
+      "Test 117 Prediction: 4 True Class: 4\n",
+      "Test 118 Prediction: 9 True Class: 9\n",
+      "Test 119 Prediction: 7 True Class: 2\n",
+      "Test 120 Prediction: 5 True Class: 5\n",
+      "Test 121 Prediction: 4 True Class: 4\n",
+      "Test 122 Prediction: 7 True Class: 7\n",
+      "Test 123 Prediction: 6 True Class: 6\n",
+      "Test 124 Prediction: 7 True Class: 7\n",
+      "Test 125 Prediction: 9 True Class: 9\n",
+      "Test 126 Prediction: 0 True Class: 0\n",
+      "Test 127 Prediction: 5 True Class: 5\n",
+      "Test 128 Prediction: 8 True Class: 8\n",
+      "Test 129 Prediction: 5 True Class: 5\n",
+      "Test 130 Prediction: 6 True Class: 6\n",
+      "Test 131 Prediction: 6 True Class: 6\n",
+      "Test 132 Prediction: 5 True Class: 5\n",
+      "Test 133 Prediction: 7 True Class: 7\n",
+      "Test 134 Prediction: 8 True Class: 8\n",
+      "Test 135 Prediction: 1 True Class: 1\n",
+      "Test 136 Prediction: 0 True Class: 0\n",
+      "Test 137 Prediction: 1 True Class: 1\n",
+      "Test 138 Prediction: 6 True Class: 6\n",
+      "Test 139 Prediction: 4 True Class: 4\n",
+      "Test 140 Prediction: 6 True Class: 6\n",
+      "Test 141 Prediction: 7 True Class: 7\n",
+      "Test 142 Prediction: 2 True Class: 3\n",
+      "Test 143 Prediction: 1 True Class: 1\n",
+      "Test 144 Prediction: 7 True Class: 7\n",
+      "Test 145 Prediction: 1 True Class: 1\n",
+      "Test 146 Prediction: 8 True Class: 8\n",
+      "Test 147 Prediction: 2 True Class: 2\n",
+      "Test 148 Prediction: 0 True Class: 0\n",
+      "Test 149 Prediction: 1 True Class: 2\n",
+      "Test 150 Prediction: 9 True Class: 9\n",
+      "Test 151 Prediction: 9 True Class: 9\n",
+      "Test 152 Prediction: 5 True Class: 5\n",
+      "Test 153 Prediction: 5 True Class: 5\n",
+      "Test 154 Prediction: 1 True Class: 1\n",
+      "Test 155 Prediction: 5 True Class: 5\n",
+      "Test 156 Prediction: 6 True Class: 6\n",
+      "Test 157 Prediction: 0 True Class: 0\n",
+      "Test 158 Prediction: 3 True Class: 3\n",
+      "Test 159 Prediction: 4 True Class: 4\n",
+      "Test 160 Prediction: 4 True Class: 4\n",
+      "Test 161 Prediction: 6 True Class: 6\n",
+      "Test 162 Prediction: 5 True Class: 5\n",
+      "Test 163 Prediction: 4 True Class: 4\n",
+      "Test 164 Prediction: 6 True Class: 6\n",
+      "Test 165 Prediction: 5 True Class: 5\n",
+      "Test 166 Prediction: 4 True Class: 4\n",
+      "Test 167 Prediction: 5 True Class: 5\n",
+      "Test 168 Prediction: 1 True Class: 1\n",
+      "Test 169 Prediction: 4 True Class: 4\n",
+      "Test 170 Prediction: 9 True Class: 4\n",
+      "Test 171 Prediction: 7 True Class: 7\n",
+      "Test 172 Prediction: 2 True Class: 2\n",
+      "Test 173 Prediction: 3 True Class: 3\n",
+      "Test 174 Prediction: 2 True Class: 2\n",
+      "Test 175 Prediction: 1 True Class: 7\n",
+      "Test 176 Prediction: 1 True Class: 1\n",
+      "Test 177 Prediction: 8 True Class: 8\n",
+      "Test 178 Prediction: 1 True Class: 1\n",
+      "Test 179 Prediction: 8 True Class: 8\n",
+      "Test 180 Prediction: 1 True Class: 1\n",
+      "Test 181 Prediction: 8 True Class: 8\n",
+      "Test 182 Prediction: 5 True Class: 5\n",
+      "Test 183 Prediction: 0 True Class: 0\n",
+      "Test 184 Prediction: 2 True Class: 8\n",
+      "Test 185 Prediction: 9 True Class: 9\n",
+      "Test 186 Prediction: 2 True Class: 2\n",
+      "Test 187 Prediction: 5 True Class: 5\n",
+      "Test 188 Prediction: 0 True Class: 0\n",
+      "Test 189 Prediction: 1 True Class: 1\n",
+      "Test 190 Prediction: 1 True Class: 1\n",
+      "Test 191 Prediction: 1 True Class: 1\n",
+      "Test 192 Prediction: 0 True Class: 0\n",
+      "Test 193 Prediction: 4 True Class: 9\n",
+      "Test 194 Prediction: 0 True Class: 0\n",
+      "Test 195 Prediction: 1 True Class: 3\n",
+      "Test 196 Prediction: 1 True Class: 1\n",
+      "Test 197 Prediction: 6 True Class: 6\n",
+      "Test 198 Prediction: 4 True Class: 4\n",
+      "Test 199 Prediction: 2 True Class: 2\n",
+      "Done!\n",
+      "Accuracy: 0.92\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # loop over test data\n",
+    "    for i in range(len(Xte)):\n",
+    "        # Get nearest neighbor\n",
+    "        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})\n",
+    "        # Get nearest neighbor class label and compare it to its true label\n",
+    "        print \"Test\", i, \"Prediction:\", np.argmax(Ytr[nn_index]), \\\n",
+    "            \"True Class:\", np.argmax(Yte[i])\n",
+    "        # Calculate accuracy\n",
+    "        if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):\n",
+    "            accuracy += 1./len(Xte)\n",
+    "    print \"Done!\"\n",
+    "    print \"Accuracy:\", accuracy"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Plik diff jest za duży
+ 229 - 0
tensorflow_v1/notebooks/2_BasicModels/random_forest.ipynb


+ 724 - 0
tensorflow_v1/notebooks/2_BasicModels/word2vec.ipynb

@@ -0,0 +1,724 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Word2Vec (Word Embedding)\n",
+    "\n",
+    "Implement Word2Vec algorithm to compute vector representations of words.\n",
+    "This example is using a small chunk of Wikipedia articles to train from.\n",
+    "\n",
+    "More info: [Mikolov, Tomas et al. \"Efficient Estimation of Word Representations in Vector Space.\", 2013](https://arxiv.org/pdf/1301.3781.pdf)\n",
+    "\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import division, print_function, absolute_import\n",
+    "\n",
+    "import collections\n",
+    "import os\n",
+    "import random\n",
+    "import urllib\n",
+    "import zipfile\n",
+    "\n",
+    "import numpy as np\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Training Parameters\n",
+    "learning_rate = 0.1\n",
+    "batch_size = 128\n",
+    "num_steps = 3000000\n",
+    "display_step = 10000\n",
+    "eval_step = 200000\n",
+    "\n",
+    "# Evaluation Parameters\n",
+    "eval_words = ['five', 'of', 'going', 'hardware', 'american', 'britain']\n",
+    "\n",
+    "# Word2Vec Parameters\n",
+    "embedding_size = 200 # Dimension of the embedding vector\n",
+    "max_vocabulary_size = 50000 # Total number of different words in the vocabulary\n",
+    "min_occurrence = 10 # Remove all words that does not appears at least n times\n",
+    "skip_window = 3 # How many words to consider left and right\n",
+    "num_skips = 2 # How many times to reuse an input to generate a label\n",
+    "num_sampled = 64 # Number of negative examples to sample"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Downloading the dataset... (It may take some time)\n",
+      "Done!\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Download a small chunk of Wikipedia articles collection\n",
+    "url = 'http://mattmahoney.net/dc/text8.zip'\n",
+    "data_path = 'text8.zip'\n",
+    "if not os.path.exists(data_path):\n",
+    "    print(\"Downloading the dataset... (It may take some time)\")\n",
+    "    filename, _ = urllib.urlretrieve(url, data_path)\n",
+    "    print(\"Done!\")\n",
+    "# Unzip the dataset file. Text has already been processed\n",
+    "with zipfile.ZipFile(data_path) as f:\n",
+    "    text_words = f.read(f.namelist()[0]).lower().split()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Words count: 17005207\n",
+      "Unique words: 253854\n",
+      "Vocabulary size: 50000\n",
+      "Most common words: [('UNK', 418391), ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764), ('in', 372201), ('a', 325873), ('to', 316376), ('zero', 264975), ('nine', 250430)]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Build the dictionary and replace rare words with UNK token\n",
+    "count = [('UNK', -1)]\n",
+    "# Retrieve the most common words\n",
+    "count.extend(collections.Counter(text_words).most_common(max_vocabulary_size - 1))\n",
+    "# Remove samples with less than 'min_occurrence' occurrences\n",
+    "for i in range(len(count) - 1, -1, -1):\n",
+    "    if count[i][1] < min_occurrence:\n",
+    "        count.pop(i)\n",
+    "    else:\n",
+    "        # The collection is ordered, so stop when 'min_occurrence' is reached\n",
+    "        break\n",
+    "# Compute the vocabulary size\n",
+    "vocabulary_size = len(count)\n",
+    "# Assign an id to each word\n",
+    "word2id = dict()\n",
+    "for i, (word, _)in enumerate(count):\n",
+    "    word2id[word] = i\n",
+    "\n",
+    "data = list()\n",
+    "unk_count = 0\n",
+    "for word in text_words:\n",
+    "    # Retrieve a word id, or assign it index 0 ('UNK') if not in dictionary\n",
+    "    index = word2id.get(word, 0)\n",
+    "    if index == 0:\n",
+    "        unk_count += 1\n",
+    "    data.append(index)\n",
+    "count[0] = ('UNK', unk_count)\n",
+    "id2word = dict(zip(word2id.values(), word2id.keys()))\n",
+    "\n",
+    "print(\"Words count:\", len(text_words))\n",
+    "print(\"Unique words:\", len(set(text_words)))\n",
+    "print(\"Vocabulary size:\", vocabulary_size)\n",
+    "print(\"Most common words:\", count[:10])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "data_index = 0\n",
+    "# Generate training batch for the skip-gram model\n",
+    "def next_batch(batch_size, num_skips, skip_window):\n",
+    "    global data_index\n",
+    "    assert batch_size % num_skips == 0\n",
+    "    assert num_skips <= 2 * skip_window\n",
+    "    batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n",
+    "    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n",
+    "    # get window size (words left and right + current one)\n",
+    "    span = 2 * skip_window + 1\n",
+    "    buffer = collections.deque(maxlen=span)\n",
+    "    if data_index + span > len(data):\n",
+    "        data_index = 0\n",
+    "    buffer.extend(data[data_index:data_index + span])\n",
+    "    data_index += span\n",
+    "    for i in range(batch_size // num_skips):\n",
+    "        context_words = [w for w in range(span) if w != skip_window]\n",
+    "        words_to_use = random.sample(context_words, num_skips)\n",
+    "        for j, context_word in enumerate(words_to_use):\n",
+    "            batch[i * num_skips + j] = buffer[skip_window]\n",
+    "            labels[i * num_skips + j, 0] = buffer[context_word]\n",
+    "        if data_index == len(data):\n",
+    "            buffer.extend(data[0:span])\n",
+    "            data_index = span\n",
+    "        else:\n",
+    "            buffer.append(data[data_index])\n",
+    "            data_index += 1\n",
+    "    # Backtrack a little bit to avoid skipping words in the end of a batch\n",
+    "    data_index = (data_index + len(data) - span) % len(data)\n",
+    "    return batch, labels"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Input data\n",
+    "X = tf.placeholder(tf.int32, shape=[None])\n",
+    "# Input label\n",
+    "Y = tf.placeholder(tf.int32, shape=[None, 1])\n",
+    "\n",
+    "# Ensure the following ops & var are assigned on CPU\n",
+    "# (some ops are not compatible on GPU)\n",
+    "with tf.device('/cpu:0'):\n",
+    "    # Create the embedding variable (each row represent a word embedding vector)\n",
+    "    embedding = tf.Variable(tf.random_normal([vocabulary_size, embedding_size]))\n",
+    "    # Lookup the corresponding embedding vectors for each sample in X\n",
+    "    X_embed = tf.nn.embedding_lookup(embedding, X)\n",
+    "\n",
+    "    # Construct the variables for the NCE loss\n",
+    "    nce_weights = tf.Variable(tf.random_normal([vocabulary_size, embedding_size]))\n",
+    "    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n",
+    "\n",
+    "# Compute the average NCE loss for the batch\n",
+    "loss_op = tf.reduce_mean(\n",
+    "    tf.nn.nce_loss(weights=nce_weights,\n",
+    "                   biases=nce_biases,\n",
+    "                   labels=Y,\n",
+    "                   inputs=X_embed,\n",
+    "                   num_sampled=num_sampled,\n",
+    "                   num_classes=vocabulary_size))\n",
+    "\n",
+    "# Define the optimizer\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluation\n",
+    "# Compute the cosine similarity between input data embedding and every embedding vectors\n",
+    "X_embed_norm = X_embed / tf.sqrt(tf.reduce_sum(tf.square(X_embed)))\n",
+    "embedding_norm = embedding / tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keepdims=True))\n",
+    "cosine_sim_op = tf.matmul(X_embed_norm, embedding_norm, transpose_b=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Average Loss= 520.3188\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: brothers, swinging, dissemination, fruitful, trichloride, dll, timur, torre,\n",
+      "\"of\" nearest neighbors: malting, vaginal, cecil, xiaoping, arrangers, hydras, exhibits, splits,\n",
+      "\"going\" nearest neighbors: besht, xps, sdtv, mississippi, frequencies, tora, reciprocating, tursiops,\n",
+      "\"hardware\" nearest neighbors: burgh, residences, mares, attested, whirlwind, isomerism, admiration, ties,\n",
+      "\"american\" nearest neighbors: tensile, months, baffling, cricket, kodak, risky, nicomedia, jura,\n",
+      "\"britain\" nearest neighbors: superstring, interpretations, genealogical, munition, boer, occasional, psychologists, turbofan,\n",
+      "Step 10000, Average Loss= 202.2640\n",
+      "Step 20000, Average Loss= 96.5149\n",
+      "Step 30000, Average Loss= 67.2858\n",
+      "Step 40000, Average Loss= 52.5055\n",
+      "Step 50000, Average Loss= 42.6301\n",
+      "Step 60000, Average Loss= 37.3644\n",
+      "Step 70000, Average Loss= 33.1220\n",
+      "Step 80000, Average Loss= 30.5835\n",
+      "Step 90000, Average Loss= 28.2243\n",
+      "Step 100000, Average Loss= 25.5532\n",
+      "Step 110000, Average Loss= 24.0891\n",
+      "Step 120000, Average Loss= 21.8576\n",
+      "Step 130000, Average Loss= 21.2192\n",
+      "Step 140000, Average Loss= 19.8834\n",
+      "Step 150000, Average Loss= 19.3362\n",
+      "Step 160000, Average Loss= 18.3129\n",
+      "Step 170000, Average Loss= 17.4952\n",
+      "Step 180000, Average Loss= 16.8531\n",
+      "Step 190000, Average Loss= 15.9615\n",
+      "Step 200000, Average Loss= 15.0718\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, eight, six, seven, two, nine, one,\n",
+      "\"of\" nearest neighbors: the, is, a, was, with, in, and, on,\n",
+      "\"going\" nearest neighbors: time, military, called, with, used, state, most, new,\n",
+      "\"hardware\" nearest neighbors: deaths, system, three, at, zero, two, s, UNK,\n",
+      "\"american\" nearest neighbors: UNK, and, s, about, in, when, from, after,\n",
+      "\"britain\" nearest neighbors: years, were, from, both, of, these, is, many,\n",
+      "Step 210000, Average Loss= 14.9267\n",
+      "Step 220000, Average Loss= 15.4700\n",
+      "Step 230000, Average Loss= 14.0867\n",
+      "Step 240000, Average Loss= 14.5337\n",
+      "Step 250000, Average Loss= 13.2458\n",
+      "Step 260000, Average Loss= 13.2944\n",
+      "Step 270000, Average Loss= 13.0396\n",
+      "Step 280000, Average Loss= 12.1902\n",
+      "Step 290000, Average Loss= 11.7444\n",
+      "Step 300000, Average Loss= 11.8473\n",
+      "Step 310000, Average Loss= 11.1306\n",
+      "Step 320000, Average Loss= 11.1699\n",
+      "Step 330000, Average Loss= 10.8638\n",
+      "Step 340000, Average Loss= 10.7910\n",
+      "Step 350000, Average Loss= 11.0721\n",
+      "Step 360000, Average Loss= 10.6309\n",
+      "Step 370000, Average Loss= 10.4836\n",
+      "Step 380000, Average Loss= 10.3482\n",
+      "Step 390000, Average Loss= 10.0679\n",
+      "Step 400000, Average Loss= 10.0070\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, seven, eight, two, one, zero,\n",
+      "\"of\" nearest neighbors: and, in, the, a, for, by, is, while,\n",
+      "\"going\" nearest neighbors: name, called, made, military, music, people, city, was,\n",
+      "\"hardware\" nearest neighbors: power, a, john, the, has, see, and, system,\n",
+      "\"american\" nearest neighbors: s, british, UNK, john, in, during, and, from,\n",
+      "\"britain\" nearest neighbors: from, general, are, before, first, after, history, was,\n",
+      "Step 410000, Average Loss= 10.1151\n",
+      "Step 420000, Average Loss= 9.5719\n",
+      "Step 430000, Average Loss= 9.8267\n",
+      "Step 440000, Average Loss= 9.4704\n",
+      "Step 450000, Average Loss= 9.5561\n",
+      "Step 460000, Average Loss= 9.1479\n",
+      "Step 470000, Average Loss= 8.8914\n",
+      "Step 480000, Average Loss= 9.0281\n",
+      "Step 490000, Average Loss= 9.3139\n",
+      "Step 500000, Average Loss= 9.1559\n",
+      "Step 510000, Average Loss= 8.8257\n",
+      "Step 520000, Average Loss= 8.9081\n",
+      "Step 530000, Average Loss= 8.8572\n",
+      "Step 540000, Average Loss= 8.5835\n",
+      "Step 550000, Average Loss= 8.4495\n",
+      "Step 560000, Average Loss= 8.4193\n",
+      "Step 570000, Average Loss= 8.3399\n",
+      "Step 580000, Average Loss= 8.1633\n",
+      "Step 590000, Average Loss= 8.2914\n",
+      "Step 600000, Average Loss= 8.0268\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, two, seven, eight, one, zero,\n",
+      "\"of\" nearest neighbors: and, the, in, including, with, for, on, or,\n",
+      "\"going\" nearest neighbors: popular, king, his, music, and, time, name, being,\n",
+      "\"hardware\" nearest neighbors: power, over, then, than, became, at, less, for,\n",
+      "\"american\" nearest neighbors: english, s, german, in, french, since, john, between,\n",
+      "\"britain\" nearest neighbors: however, were, state, first, group, general, from, second,\n",
+      "Step 610000, Average Loss= 8.1733\n",
+      "Step 620000, Average Loss= 8.2522\n",
+      "Step 630000, Average Loss= 8.0434\n",
+      "Step 640000, Average Loss= 8.0930\n",
+      "Step 650000, Average Loss= 7.8770\n",
+      "Step 660000, Average Loss= 7.9221\n",
+      "Step 670000, Average Loss= 7.7645\n",
+      "Step 680000, Average Loss= 7.9534\n",
+      "Step 690000, Average Loss= 7.7507\n",
+      "Step 700000, Average Loss= 7.7499\n",
+      "Step 710000, Average Loss= 7.6629\n",
+      "Step 720000, Average Loss= 7.6055\n",
+      "Step 730000, Average Loss= 7.4779\n",
+      "Step 740000, Average Loss= 7.3182\n",
+      "Step 750000, Average Loss= 7.6399\n",
+      "Step 760000, Average Loss= 7.4364\n",
+      "Step 770000, Average Loss= 7.6509\n",
+      "Step 780000, Average Loss= 7.3204\n",
+      "Step 790000, Average Loss= 7.4101\n",
+      "Step 800000, Average Loss= 7.4354\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, seven, eight, two, one, nine,\n",
+      "\"of\" nearest neighbors: and, the, its, a, with, at, in, for,\n",
+      "\"going\" nearest neighbors: were, man, music, now, great, support, popular, her,\n",
+      "\"hardware\" nearest neighbors: power, system, then, military, high, against, since, international,\n",
+      "\"american\" nearest neighbors: english, british, born, b, john, french, d, german,\n",
+      "\"britain\" nearest neighbors: government, second, before, from, state, several, the, at,\n",
+      "Step 810000, Average Loss= 7.2603\n",
+      "Step 820000, Average Loss= 7.1646\n",
+      "Step 830000, Average Loss= 7.3155\n",
+      "Step 840000, Average Loss= 7.1274\n",
+      "Step 850000, Average Loss= 7.1237\n",
+      "Step 860000, Average Loss= 7.1528\n",
+      "Step 870000, Average Loss= 7.0673\n",
+      "Step 880000, Average Loss= 7.2167\n",
+      "Step 890000, Average Loss= 7.1359\n",
+      "Step 900000, Average Loss= 7.0940\n",
+      "Step 910000, Average Loss= 7.1114\n",
+      "Step 920000, Average Loss= 6.9328\n",
+      "Step 930000, Average Loss= 7.0108\n",
+      "Step 940000, Average Loss= 7.0630\n",
+      "Step 950000, Average Loss= 6.8371\n",
+      "Step 960000, Average Loss= 7.0466\n",
+      "Step 970000, Average Loss= 6.8331\n",
+      "Step 980000, Average Loss= 6.9670\n",
+      "Step 990000, Average Loss= 6.7357\n",
+      "Step 1000000, Average Loss= 6.6453\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, eight, seven, two, nine, zero,\n",
+      "\"of\" nearest neighbors: the, became, including, first, second, from, following, and,\n",
+      "\"going\" nearest neighbors: near, music, popular, made, while, his, works, most,\n",
+      "\"hardware\" nearest neighbors: power, system, before, its, using, for, thus, an,\n",
+      "\"american\" nearest neighbors: b, born, d, UNK, nine, john, english, seven,\n",
+      "\"britain\" nearest neighbors: of, following, government, home, from, state, end, several,\n",
+      "Step 1010000, Average Loss= 6.7193\n",
+      "Step 1020000, Average Loss= 6.9297\n",
+      "Step 1030000, Average Loss= 6.7905\n",
+      "Step 1040000, Average Loss= 6.7709\n",
+      "Step 1050000, Average Loss= 6.7337\n",
+      "Step 1060000, Average Loss= 6.7617\n",
+      "Step 1070000, Average Loss= 6.7489\n",
+      "Step 1080000, Average Loss= 6.6259\n",
+      "Step 1090000, Average Loss= 6.6415\n",
+      "Step 1100000, Average Loss= 6.7209\n",
+      "Step 1110000, Average Loss= 6.5471\n",
+      "Step 1120000, Average Loss= 6.6508\n",
+      "Step 1130000, Average Loss= 6.5184\n",
+      "Step 1140000, Average Loss= 6.6202\n",
+      "Step 1150000, Average Loss= 6.7205\n",
+      "Step 1160000, Average Loss= 6.5821\n",
+      "Step 1170000, Average Loss= 6.6200\n",
+      "Step 1180000, Average Loss= 6.5089\n",
+      "Step 1190000, Average Loss= 6.5587\n",
+      "Step 1200000, Average Loss= 6.4930\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, seven, eight, two, nine, zero,\n",
+      "\"of\" nearest neighbors: the, and, including, in, first, with, following, from,\n",
+      "\"going\" nearest neighbors: near, popular, works, today, large, now, when, both,\n",
+      "\"hardware\" nearest neighbors: power, system, computer, its, both, for, using, which,\n",
+      "\"american\" nearest neighbors: born, d, john, german, b, UNK, english, s,\n",
+      "\"britain\" nearest neighbors: state, following, government, home, became, people, were, the,\n",
+      "Step 1210000, Average Loss= 6.5985\n",
+      "Step 1220000, Average Loss= 6.4534\n",
+      "Step 1230000, Average Loss= 6.5083\n",
+      "Step 1240000, Average Loss= 6.4913\n",
+      "Step 1250000, Average Loss= 6.4326\n",
+      "Step 1260000, Average Loss= 6.3891\n",
+      "Step 1270000, Average Loss= 6.1601\n",
+      "Step 1280000, Average Loss= 6.4479\n",
+      "Step 1290000, Average Loss= 6.3813\n",
+      "Step 1300000, Average Loss= 6.5335\n",
+      "Step 1310000, Average Loss= 6.2971\n",
+      "Step 1320000, Average Loss= 6.3723\n",
+      "Step 1330000, Average Loss= 6.4234\n",
+      "Step 1340000, Average Loss= 6.3130\n",
+      "Step 1350000, Average Loss= 6.2867\n",
+      "Step 1360000, Average Loss= 6.3505\n",
+      "Step 1370000, Average Loss= 6.2990\n",
+      "Step 1380000, Average Loss= 6.3012\n",
+      "Step 1390000, Average Loss= 6.3112\n",
+      "Step 1400000, Average Loss= 6.2680\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, two, seven, eight, one, zero,\n",
+      "\"of\" nearest neighbors: the, its, and, including, in, with, see, for,\n",
+      "\"going\" nearest neighbors: near, great, like, today, began, called, an, another,\n",
+      "\"hardware\" nearest neighbors: power, computer, system, for, program, high, control, small,\n",
+      "\"american\" nearest neighbors: english, german, french, born, john, british, s, references,\n",
+      "\"britain\" nearest neighbors: state, great, government, people, following, became, along, home,\n",
+      "Step 1410000, Average Loss= 6.3157\n",
+      "Step 1420000, Average Loss= 6.3466\n",
+      "Step 1430000, Average Loss= 6.3090\n",
+      "Step 1440000, Average Loss= 6.3330\n",
+      "Step 1450000, Average Loss= 6.2072\n",
+      "Step 1460000, Average Loss= 6.2363\n",
+      "Step 1470000, Average Loss= 6.2736\n",
+      "Step 1480000, Average Loss= 6.1793\n",
+      "Step 1490000, Average Loss= 6.2977\n",
+      "Step 1500000, Average Loss= 6.1899\n",
+      "Step 1510000, Average Loss= 6.2381\n",
+      "Step 1520000, Average Loss= 6.1027\n",
+      "Step 1530000, Average Loss= 6.0046\n",
+      "Step 1540000, Average Loss= 6.0747\n",
+      "Step 1550000, Average Loss= 6.2524\n",
+      "Step 1560000, Average Loss= 6.1247\n",
+      "Step 1570000, Average Loss= 6.1937\n",
+      "Step 1580000, Average Loss= 6.0450\n",
+      "Step 1590000, Average Loss= 6.1556\n",
+      "Step 1600000, Average Loss= 6.1765\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, two, seven, eight, one, zero,\n",
+      "\"of\" nearest neighbors: the, and, its, for, from, modern, in, part,\n",
+      "\"going\" nearest neighbors: great, today, once, now, while, her, like, by,\n",
+      "\"hardware\" nearest neighbors: power, system, high, program, control, computer, typically, making,\n",
+      "\"american\" nearest neighbors: born, english, british, german, john, french, b, d,\n",
+      "\"britain\" nearest neighbors: country, state, home, government, first, following, during, from,\n",
+      "Step 1610000, Average Loss= 6.1029\n",
+      "Step 1620000, Average Loss= 6.0501\n",
+      "Step 1630000, Average Loss= 6.1536\n",
+      "Step 1640000, Average Loss= 6.0483\n",
+      "Step 1650000, Average Loss= 6.1197\n",
+      "Step 1660000, Average Loss= 6.0261\n",
+      "Step 1670000, Average Loss= 6.1012\n",
+      "Step 1680000, Average Loss= 6.1795\n",
+      "Step 1690000, Average Loss= 6.1224\n",
+      "Step 1700000, Average Loss= 6.0896\n",
+      "Step 1710000, Average Loss= 6.0418\n",
+      "Step 1720000, Average Loss= 6.0626\n",
+      "Step 1730000, Average Loss= 6.0214\n",
+      "Step 1740000, Average Loss= 6.1206\n",
+      "Step 1750000, Average Loss= 5.9721\n",
+      "Step 1760000, Average Loss= 6.0782\n",
+      "Step 1770000, Average Loss= 6.0291\n",
+      "Step 1780000, Average Loss= 6.0187\n",
+      "Step 1790000, Average Loss= 5.9761\n",
+      "Step 1800000, Average Loss= 5.7518\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, seven, eight, nine, two, zero,\n",
+      "\"of\" nearest neighbors: the, from, in, became, and, second, first, including,\n",
+      "\"going\" nearest neighbors: today, which, once, little, made, before, now, etc,\n",
+      "\"hardware\" nearest neighbors: computer, power, program, system, high, typically, current, eventually,\n",
+      "\"american\" nearest neighbors: b, d, born, actor, UNK, robert, william, english,\n",
+      "\"britain\" nearest neighbors: government, state, country, from, world, great, of, in,\n",
+      "Step 1810000, Average Loss= 5.9839\n",
+      "Step 1820000, Average Loss= 5.9931\n",
+      "Step 1830000, Average Loss= 6.0794\n",
+      "Step 1840000, Average Loss= 5.9072\n",
+      "Step 1850000, Average Loss= 5.9831\n",
+      "Step 1860000, Average Loss= 6.0023\n",
+      "Step 1870000, Average Loss= 5.9375\n",
+      "Step 1880000, Average Loss= 5.9250\n",
+      "Step 1890000, Average Loss= 5.9422\n",
+      "Step 1900000, Average Loss= 5.9339\n",
+      "Step 1910000, Average Loss= 5.9235\n",
+      "Step 1920000, Average Loss= 5.9692\n",
+      "Step 1930000, Average Loss= 5.9022\n",
+      "Step 1940000, Average Loss= 5.9599\n",
+      "Step 1950000, Average Loss= 6.0174\n",
+      "Step 1960000, Average Loss= 5.9530\n",
+      "Step 1970000, Average Loss= 5.9479\n",
+      "Step 1980000, Average Loss= 5.8870\n",
+      "Step 1990000, Average Loss= 5.9271\n",
+      "Step 2000000, Average Loss= 5.8774\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, seven, eight, two, nine, zero,\n",
+      "\"of\" nearest neighbors: and, the, from, in, within, first, including, with,\n",
+      "\"going\" nearest neighbors: today, before, another, little, work, etc, now, him,\n",
+      "\"hardware\" nearest neighbors: computer, program, system, both, making, designed, power, simple,\n",
+      "\"american\" nearest neighbors: actor, born, d, robert, john, b, german, writer,\n",
+      "\"britain\" nearest neighbors: government, state, following, great, england, became, country, from,\n",
+      "Step 2010000, Average Loss= 5.9373\n",
+      "Step 2020000, Average Loss= 5.9113\n",
+      "Step 2030000, Average Loss= 5.9158\n",
+      "Step 2040000, Average Loss= 5.9020\n",
+      "Step 2050000, Average Loss= 5.8608\n",
+      "Step 2060000, Average Loss= 5.7379\n",
+      "Step 2070000, Average Loss= 5.7143\n",
+      "Step 2080000, Average Loss= 5.9379\n",
+      "Step 2090000, Average Loss= 5.8201\n",
+      "Step 2100000, Average Loss= 5.9390\n",
+      "Step 2110000, Average Loss= 5.7295\n",
+      "Step 2120000, Average Loss= 5.8290\n",
+      "Step 2130000, Average Loss= 5.9042\n",
+      "Step 2140000, Average Loss= 5.8367\n",
+      "Step 2150000, Average Loss= 5.7760\n",
+      "Step 2160000, Average Loss= 5.8664\n",
+      "Step 2170000, Average Loss= 5.7974\n",
+      "Step 2180000, Average Loss= 5.8523\n",
+      "Step 2190000, Average Loss= 5.8047\n",
+      "Step 2200000, Average Loss= 5.8172\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, eight, two, seven, one, zero,\n",
+      "\"of\" nearest neighbors: the, with, group, in, its, and, from, including,\n",
+      "\"going\" nearest neighbors: produced, when, today, while, little, before, had, like,\n",
+      "\"hardware\" nearest neighbors: computer, system, power, technology, program, simple, for, designed,\n",
+      "\"american\" nearest neighbors: english, canadian, german, french, author, british, film, born,\n",
+      "\"britain\" nearest neighbors: government, great, state, established, british, england, country, army,\n",
+      "Step 2210000, Average Loss= 5.8847\n",
+      "Step 2220000, Average Loss= 5.8622\n",
+      "Step 2230000, Average Loss= 5.8295\n",
+      "Step 2240000, Average Loss= 5.8484\n",
+      "Step 2250000, Average Loss= 5.7917\n",
+      "Step 2260000, Average Loss= 5.7846\n",
+      "Step 2270000, Average Loss= 5.8307\n",
+      "Step 2280000, Average Loss= 5.7341\n",
+      "Step 2290000, Average Loss= 5.8519\n",
+      "Step 2300000, Average Loss= 5.7792\n",
+      "Step 2310000, Average Loss= 5.8277\n",
+      "Step 2320000, Average Loss= 5.7196\n",
+      "Step 2330000, Average Loss= 5.5469\n",
+      "Step 2340000, Average Loss= 5.7177\n",
+      "Step 2350000, Average Loss= 5.8139\n",
+      "Step 2360000, Average Loss= 5.7849\n",
+      "Step 2370000, Average Loss= 5.7022\n",
+      "Step 2380000, Average Loss= 5.7447\n",
+      "Step 2390000, Average Loss= 5.7667\n",
+      "Step 2400000, Average Loss= 5.7625\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: three, four, six, seven, two, eight, zero, nine,\n",
+      "\"of\" nearest neighbors: the, and, from, part, in, following, within, including,\n",
+      "\"going\" nearest neighbors: where, once, little, now, again, while, off, produced,\n",
+      "\"hardware\" nearest neighbors: system, computer, high, power, using, designed, systems, simple,\n",
+      "\"american\" nearest neighbors: author, actor, english, born, writer, british, b, d,\n",
+      "\"britain\" nearest neighbors: great, established, government, england, country, state, army, former,\n",
+      "Step 2410000, Average Loss= 5.6953\n",
+      "Step 2420000, Average Loss= 5.7413\n",
+      "Step 2430000, Average Loss= 5.7242\n",
+      "Step 2440000, Average Loss= 5.7397\n",
+      "Step 2450000, Average Loss= 5.7755\n",
+      "Step 2460000, Average Loss= 5.6881\n",
+      "Step 2470000, Average Loss= 5.7471\n",
+      "Step 2480000, Average Loss= 5.8159\n",
+      "Step 2490000, Average Loss= 5.7452\n",
+      "Step 2500000, Average Loss= 5.7547\n",
+      "Step 2510000, Average Loss= 5.6945\n",
+      "Step 2520000, Average Loss= 5.7318\n",
+      "Step 2530000, Average Loss= 5.6682\n",
+      "Step 2540000, Average Loss= 5.7660\n",
+      "Step 2550000, Average Loss= 5.6956\n",
+      "Step 2560000, Average Loss= 5.7307\n",
+      "Step 2570000, Average Loss= 5.7015\n",
+      "Step 2580000, Average Loss= 5.6932\n",
+      "Step 2590000, Average Loss= 5.6386\n",
+      "Step 2600000, Average Loss= 5.4734\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, seven, eight, nine, two, zero,\n",
+      "\"of\" nearest neighbors: the, and, in, from, became, including, for, with,\n",
+      "\"going\" nearest neighbors: little, again, just, a, now, where, to, for,\n",
+      "\"hardware\" nearest neighbors: computer, program, system, software, designed, systems, technology, current,\n",
+      "\"american\" nearest neighbors: actor, d, writer, b, born, singer, author, robert,\n",
+      "\"britain\" nearest neighbors: great, established, government, england, country, in, from, state,\n",
+      "Step 2610000, Average Loss= 5.7291\n",
+      "Step 2620000, Average Loss= 5.6412\n",
+      "Step 2630000, Average Loss= 5.7485\n",
+      "Step 2640000, Average Loss= 5.5833\n",
+      "Step 2650000, Average Loss= 5.6548\n",
+      "Step 2660000, Average Loss= 5.7159\n",
+      "Step 2670000, Average Loss= 5.6569\n",
+      "Step 2680000, Average Loss= 5.6080\n",
+      "Step 2690000, Average Loss= 5.7037\n",
+      "Step 2700000, Average Loss= 5.6360\n",
+      "Step 2710000, Average Loss= 5.6707\n",
+      "Step 2720000, Average Loss= 5.6811\n",
+      "Step 2730000, Average Loss= 5.6237\n",
+      "Step 2740000, Average Loss= 5.7050\n",
+      "Step 2750000, Average Loss= 5.6991\n",
+      "Step 2760000, Average Loss= 5.6691\n",
+      "Step 2770000, Average Loss= 5.7057\n",
+      "Step 2780000, Average Loss= 5.6162\n",
+      "Step 2790000, Average Loss= 5.6484\n",
+      "Step 2800000, Average Loss= 5.6627\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, six, three, seven, eight, nine, two, one,\n",
+      "\"of\" nearest neighbors: the, in, following, including, part, and, from, under,\n",
+      "\"going\" nearest neighbors: again, before, little, away, once, when, eventually, then,\n",
+      "\"hardware\" nearest neighbors: computer, system, software, program, systems, designed, for, design,\n",
+      "\"american\" nearest neighbors: actor, writer, singer, author, born, robert, d, john,\n",
+      "\"britain\" nearest neighbors: established, england, great, government, france, army, the, throughout,\n",
+      "Step 2810000, Average Loss= 5.5900\n",
+      "Step 2820000, Average Loss= 5.7053\n",
+      "Step 2830000, Average Loss= 5.6064\n",
+      "Step 2840000, Average Loss= 5.6891\n",
+      "Step 2850000, Average Loss= 5.5571\n",
+      "Step 2860000, Average Loss= 5.4490\n",
+      "Step 2870000, Average Loss= 5.5428\n",
+      "Step 2880000, Average Loss= 5.6832\n",
+      "Step 2890000, Average Loss= 5.5973\n",
+      "Step 2900000, Average Loss= 5.5816\n",
+      "Step 2910000, Average Loss= 5.5647\n",
+      "Step 2920000, Average Loss= 5.6001\n",
+      "Step 2930000, Average Loss= 5.6459\n",
+      "Step 2940000, Average Loss= 5.5622\n",
+      "Step 2950000, Average Loss= 5.5707\n",
+      "Step 2960000, Average Loss= 5.6492\n",
+      "Step 2970000, Average Loss= 5.5633\n",
+      "Step 2980000, Average Loss= 5.6323\n",
+      "Step 2990000, Average Loss= 5.5440\n",
+      "Step 3000000, Average Loss= 5.6209\n",
+      "Evaluation...\n",
+      "\"five\" nearest neighbors: four, three, six, eight, seven, two, zero, one,\n",
+      "\"of\" nearest neighbors: the, in, and, including, group, includes, part, from,\n",
+      "\"going\" nearest neighbors: once, again, when, quickly, before, eventually, little, had,\n",
+      "\"hardware\" nearest neighbors: computer, system, software, designed, program, simple, systems, sound,\n",
+      "\"american\" nearest neighbors: canadian, english, author, german, french, british, irish, australian,\n",
+      "\"britain\" nearest neighbors: established, england, great, government, throughout, france, british, northern,\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()\n",
+    "\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Testing data\n",
+    "    x_test = np.array([word2id[w] for w in eval_words])\n",
+    "\n",
+    "    average_loss = 0\n",
+    "    for step in xrange(1, num_steps + 1):\n",
+    "        # Get a new batch of data\n",
+    "        batch_x, batch_y = next_batch(batch_size, num_skips, skip_window)\n",
+    "        # Run training op\n",
+    "        _, loss = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})\n",
+    "        average_loss += loss\n",
+    "\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            if step > 1:\n",
+    "                average_loss /= display_step\n",
+    "            print(\"Step \" + str(step) + \", Average Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(average_loss))\n",
+    "            average_loss = 0\n",
+    "\n",
+    "        # Evaluation\n",
+    "        if step % eval_step == 0 or step == 1:\n",
+    "            print(\"Evaluation...\")\n",
+    "            sim = sess.run(cosine_sim_op, feed_dict={X: x_test})\n",
+    "            for i in xrange(len(eval_words)):\n",
+    "                top_k = 8  # number of nearest neighbors\n",
+    "                nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n",
+    "                log_str = '\"%s\" nearest neighbors:' % eval_words[i]\n",
+    "                for k in xrange(top_k):\n",
+    "                    log_str = '%s %s,' % (log_str, id2word[nearest[k]])\n",
+    "                print(log_str)\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

Plik diff jest za duży
+ 310 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/autoencoder.ipynb


+ 301 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb

@@ -0,0 +1,301 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Bi-directional Recurrent Neural Network Example\n",
+    "\n",
+    "Build a bi-directional recurrent neural network (LSTM) with TensorFlow.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## BiRNN Overview\n",
+    "\n",
+    "<img src=\"https://ai2-s2-public.s3.amazonaws.com/figures/2016-11-08/191dd7df9cb91ac22f56ed0dfa4a5651e8767a51/1-Figure2-1.png\" alt=\"nn\" style=\"width: 600px;\"/>\n",
+    "\n",
+    "References:\n",
+    "- [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf), Sepp Hochreiter & Jurgen Schmidhuber, Neural Computation 9(8): 1735-1780, 1997.\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "To classify images using a recurrent neural network, we consider every image row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 timesteps for every sample.\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow.contrib import rnn\n",
+    "import numpy as np\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Training Parameters\n",
+    "learning_rate = 0.001\n",
+    "training_steps = 10000\n",
+    "batch_size = 128\n",
+    "display_step = 200\n",
+    "\n",
+    "# Network Parameters\n",
+    "num_input = 28 # MNIST data input (img shape: 28*28)\n",
+    "timesteps = 28 # timesteps\n",
+    "num_hidden = 128 # hidden layer num of features\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "X = tf.placeholder(\"float\", [None, timesteps, num_input])\n",
+    "Y = tf.placeholder(\"float\", [None, num_classes])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Define weights\n",
+    "weights = {\n",
+    "    # Hidden layer weights => 2*n_hidden because of forward + backward cells\n",
+    "    'out': tf.Variable(tf.random_normal([2*num_hidden, num_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'out': tf.Variable(tf.random_normal([num_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "def BiRNN(x, weights, biases):\n",
+    "\n",
+    "    # Prepare data shape to match `rnn` function requirements\n",
+    "    # Current data input shape: (batch_size, timesteps, n_input)\n",
+    "    # Required shape: 'timesteps' tensors list of shape (batch_size, num_input)\n",
+    "\n",
+    "    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input)\n",
+    "    x = tf.unstack(x, timesteps, 1)\n",
+    "\n",
+    "    # Define lstm cells with tensorflow\n",
+    "    # Forward direction cell\n",
+    "    lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)\n",
+    "    # Backward direction cell\n",
+    "    lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)\n",
+    "\n",
+    "    # Get lstm cell output\n",
+    "    try:\n",
+    "        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
+    "                                              dtype=tf.float32)\n",
+    "    except Exception: # Old TensorFlow version only returns outputs not states\n",
+    "        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,\n",
+    "                                        dtype=tf.float32)\n",
+    "\n",
+    "    # Linear activation, using rnn inner loop last output\n",
+    "    return tf.matmul(outputs[-1], weights['out']) + biases['out']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "logits = BiRNN(X, weights, biases)\n",
+    "prediction = tf.nn.softmax(logits)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "    logits=logits, labels=Y))\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluate model (with test logits, for dropout to be disabled)\n",
+    "correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 2.6218, Training Accuracy= 0.086\n",
+      "Step 200, Minibatch Loss= 2.1900, Training Accuracy= 0.211\n",
+      "Step 400, Minibatch Loss= 2.0144, Training Accuracy= 0.375\n",
+      "Step 600, Minibatch Loss= 1.8729, Training Accuracy= 0.445\n",
+      "Step 800, Minibatch Loss= 1.8000, Training Accuracy= 0.469\n",
+      "Step 1000, Minibatch Loss= 1.7244, Training Accuracy= 0.453\n",
+      "Step 1200, Minibatch Loss= 1.5657, Training Accuracy= 0.523\n",
+      "Step 1400, Minibatch Loss= 1.5473, Training Accuracy= 0.547\n",
+      "Step 1600, Minibatch Loss= 1.5288, Training Accuracy= 0.500\n",
+      "Step 1800, Minibatch Loss= 1.4203, Training Accuracy= 0.555\n",
+      "Step 2000, Minibatch Loss= 1.2525, Training Accuracy= 0.641\n",
+      "Step 2200, Minibatch Loss= 1.2696, Training Accuracy= 0.594\n",
+      "Step 2400, Minibatch Loss= 1.2000, Training Accuracy= 0.664\n",
+      "Step 2600, Minibatch Loss= 1.1017, Training Accuracy= 0.625\n",
+      "Step 2800, Minibatch Loss= 1.2656, Training Accuracy= 0.578\n",
+      "Step 3000, Minibatch Loss= 1.0830, Training Accuracy= 0.656\n",
+      "Step 3200, Minibatch Loss= 1.1522, Training Accuracy= 0.633\n",
+      "Step 3400, Minibatch Loss= 0.9484, Training Accuracy= 0.680\n",
+      "Step 3600, Minibatch Loss= 1.0470, Training Accuracy= 0.641\n",
+      "Step 3800, Minibatch Loss= 1.0609, Training Accuracy= 0.586\n",
+      "Step 4000, Minibatch Loss= 1.1853, Training Accuracy= 0.648\n",
+      "Step 4200, Minibatch Loss= 0.9438, Training Accuracy= 0.750\n",
+      "Step 4400, Minibatch Loss= 0.7986, Training Accuracy= 0.766\n",
+      "Step 4600, Minibatch Loss= 0.8070, Training Accuracy= 0.750\n",
+      "Step 4800, Minibatch Loss= 0.8382, Training Accuracy= 0.734\n",
+      "Step 5000, Minibatch Loss= 0.7397, Training Accuracy= 0.766\n",
+      "Step 5200, Minibatch Loss= 0.7870, Training Accuracy= 0.727\n",
+      "Step 5400, Minibatch Loss= 0.6380, Training Accuracy= 0.828\n",
+      "Step 5600, Minibatch Loss= 0.7975, Training Accuracy= 0.719\n",
+      "Step 5800, Minibatch Loss= 0.7934, Training Accuracy= 0.766\n",
+      "Step 6000, Minibatch Loss= 0.6628, Training Accuracy= 0.805\n",
+      "Step 6200, Minibatch Loss= 0.7958, Training Accuracy= 0.672\n",
+      "Step 6400, Minibatch Loss= 0.6582, Training Accuracy= 0.773\n",
+      "Step 6600, Minibatch Loss= 0.5908, Training Accuracy= 0.812\n",
+      "Step 6800, Minibatch Loss= 0.6182, Training Accuracy= 0.820\n",
+      "Step 7000, Minibatch Loss= 0.5513, Training Accuracy= 0.812\n",
+      "Step 7200, Minibatch Loss= 0.6683, Training Accuracy= 0.789\n",
+      "Step 7400, Minibatch Loss= 0.5337, Training Accuracy= 0.828\n",
+      "Step 7600, Minibatch Loss= 0.6428, Training Accuracy= 0.805\n",
+      "Step 7800, Minibatch Loss= 0.6708, Training Accuracy= 0.797\n",
+      "Step 8000, Minibatch Loss= 0.4664, Training Accuracy= 0.852\n",
+      "Step 8200, Minibatch Loss= 0.4249, Training Accuracy= 0.859\n",
+      "Step 8400, Minibatch Loss= 0.7723, Training Accuracy= 0.773\n",
+      "Step 8600, Minibatch Loss= 0.4706, Training Accuracy= 0.859\n",
+      "Step 8800, Minibatch Loss= 0.4800, Training Accuracy= 0.867\n",
+      "Step 9000, Minibatch Loss= 0.4636, Training Accuracy= 0.891\n",
+      "Step 9200, Minibatch Loss= 0.5734, Training Accuracy= 0.828\n",
+      "Step 9400, Minibatch Loss= 0.5548, Training Accuracy= 0.875\n",
+      "Step 9600, Minibatch Loss= 0.3575, Training Accuracy= 0.922\n",
+      "Step 9800, Minibatch Loss= 0.4566, Training Accuracy= 0.844\n",
+      "Step 10000, Minibatch Loss= 0.5125, Training Accuracy= 0.844\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.890625\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    for step in range(1, training_steps+1):\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Reshape data to get 28 seq of 28 elements\n",
+    "        batch_x = batch_x.reshape((batch_size, timesteps, num_input))\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            # Calculate batch loss and accuracy\n",
+    "            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n",
+    "                                                                 Y: batch_y})\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.3f}\".format(acc))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Calculate accuracy for 128 mnist test images\n",
+    "    test_len = 128\n",
+    "    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))\n",
+    "    test_label = mnist.test.labels[:test_len]\n",
+    "    print(\"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Plik diff jest za duży
+ 423 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/convolutional_network.ipynb


+ 303 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/convolutional_network_raw.ipynb

@@ -0,0 +1,303 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Convolutional Neural Network Example\n",
+    "\n",
+    "Build a convolutional neural network with TensorFlow.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## CNN Overview\n",
+    "\n",
+    "![CNN](http://personal.ie.cuhk.edu.hk/~ccloy/project_target_code/images/fig3.png)\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import division, print_function, absolute_import\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Training Parameters\n",
+    "learning_rate = 0.001\n",
+    "num_steps = 500\n",
+    "batch_size = 128\n",
+    "display_step = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "num_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "dropout = 0.75 # Dropout, probability to keep units\n",
+    "\n",
+    "# tf Graph input\n",
+    "X = tf.placeholder(tf.float32, [None, num_input])\n",
+    "Y = tf.placeholder(tf.float32, [None, num_classes])\n",
+    "keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create some wrappers for simplicity\n",
+    "def conv2d(x, W, b, strides=1):\n",
+    "    # Conv2D wrapper, with bias and relu activation\n",
+    "    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n",
+    "    x = tf.nn.bias_add(x, b)\n",
+    "    return tf.nn.relu(x)\n",
+    "\n",
+    "\n",
+    "def maxpool2d(x, k=2):\n",
+    "    # MaxPool2D wrapper\n",
+    "    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n",
+    "                          padding='SAME')\n",
+    "\n",
+    "\n",
+    "# Create model\n",
+    "def conv_net(x, weights, biases, dropout):\n",
+    "    # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n",
+    "    # Reshape to match picture format [Height x Width x Channel]\n",
+    "    # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n",
+    "    x = tf.reshape(x, shape=[-1, 28, 28, 1])\n",
+    "\n",
+    "    # Convolution Layer\n",
+    "    conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n",
+    "    # Max Pooling (down-sampling)\n",
+    "    conv1 = maxpool2d(conv1, k=2)\n",
+    "\n",
+    "    # Convolution Layer\n",
+    "    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n",
+    "    # Max Pooling (down-sampling)\n",
+    "    conv2 = maxpool2d(conv2, k=2)\n",
+    "\n",
+    "    # Fully connected layer\n",
+    "    # Reshape conv2 output to fit fully connected layer input\n",
+    "    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\n",
+    "    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n",
+    "    fc1 = tf.nn.relu(fc1)\n",
+    "    # Apply Dropout\n",
+    "    fc1 = tf.nn.dropout(fc1, dropout)\n",
+    "\n",
+    "    # Output, class prediction\n",
+    "    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\n",
+    "    return out"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    # 5x5 conv, 1 input, 32 outputs\n",
+    "    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\n",
+    "    # 5x5 conv, 32 inputs, 64 outputs\n",
+    "    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\n",
+    "    # fully connected, 7*7*64 inputs, 1024 outputs\n",
+    "    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\n",
+    "    # 1024 inputs, 10 outputs (class prediction)\n",
+    "    'out': tf.Variable(tf.random_normal([1024, num_classes]))\n",
+    "}\n",
+    "\n",
+    "biases = {\n",
+    "    'bc1': tf.Variable(tf.random_normal([32])),\n",
+    "    'bc2': tf.Variable(tf.random_normal([64])),\n",
+    "    'bd1': tf.Variable(tf.random_normal([1024])),\n",
+    "    'out': tf.Variable(tf.random_normal([num_classes]))\n",
+    "}\n",
+    "\n",
+    "# Construct model\n",
+    "logits = conv_net(X, weights, biases, keep_prob)\n",
+    "prediction = tf.nn.softmax(logits)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "    logits=logits, labels=Y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "\n",
+    "# Evaluate model\n",
+    "correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 63763.3047, Training Accuracy= 0.141\n",
+      "Step 10, Minibatch Loss= 26429.6680, Training Accuracy= 0.242\n",
+      "Step 20, Minibatch Loss= 12171.8584, Training Accuracy= 0.586\n",
+      "Step 30, Minibatch Loss= 6306.6318, Training Accuracy= 0.734\n",
+      "Step 40, Minibatch Loss= 5113.7583, Training Accuracy= 0.711\n",
+      "Step 50, Minibatch Loss= 4022.2131, Training Accuracy= 0.805\n",
+      "Step 60, Minibatch Loss= 3125.4949, Training Accuracy= 0.867\n",
+      "Step 70, Minibatch Loss= 2225.4875, Training Accuracy= 0.875\n",
+      "Step 80, Minibatch Loss= 1843.3540, Training Accuracy= 0.867\n",
+      "Step 90, Minibatch Loss= 1715.7744, Training Accuracy= 0.875\n",
+      "Step 100, Minibatch Loss= 2611.2708, Training Accuracy= 0.906\n",
+      "Step 110, Minibatch Loss= 4804.0913, Training Accuracy= 0.875\n",
+      "Step 120, Minibatch Loss= 1067.5258, Training Accuracy= 0.938\n",
+      "Step 130, Minibatch Loss= 2519.1514, Training Accuracy= 0.898\n",
+      "Step 140, Minibatch Loss= 2687.9292, Training Accuracy= 0.906\n",
+      "Step 150, Minibatch Loss= 1983.4077, Training Accuracy= 0.938\n",
+      "Step 160, Minibatch Loss= 2844.6553, Training Accuracy= 0.930\n",
+      "Step 170, Minibatch Loss= 3602.2524, Training Accuracy= 0.914\n",
+      "Step 180, Minibatch Loss= 175.3922, Training Accuracy= 0.961\n",
+      "Step 190, Minibatch Loss= 645.1918, Training Accuracy= 0.945\n",
+      "Step 200, Minibatch Loss= 1147.6567, Training Accuracy= 0.938\n",
+      "Step 210, Minibatch Loss= 1140.4148, Training Accuracy= 0.914\n",
+      "Step 220, Minibatch Loss= 1572.8756, Training Accuracy= 0.906\n",
+      "Step 230, Minibatch Loss= 1292.9274, Training Accuracy= 0.898\n",
+      "Step 240, Minibatch Loss= 1501.4623, Training Accuracy= 0.953\n",
+      "Step 250, Minibatch Loss= 1908.2997, Training Accuracy= 0.898\n",
+      "Step 260, Minibatch Loss= 2182.2380, Training Accuracy= 0.898\n",
+      "Step 270, Minibatch Loss= 487.5807, Training Accuracy= 0.961\n",
+      "Step 280, Minibatch Loss= 1284.1130, Training Accuracy= 0.945\n",
+      "Step 290, Minibatch Loss= 1232.4919, Training Accuracy= 0.891\n",
+      "Step 300, Minibatch Loss= 1198.8336, Training Accuracy= 0.945\n",
+      "Step 310, Minibatch Loss= 2010.5345, Training Accuracy= 0.906\n",
+      "Step 320, Minibatch Loss= 786.3917, Training Accuracy= 0.945\n",
+      "Step 330, Minibatch Loss= 1408.3556, Training Accuracy= 0.898\n",
+      "Step 340, Minibatch Loss= 1453.7538, Training Accuracy= 0.953\n",
+      "Step 350, Minibatch Loss= 999.8901, Training Accuracy= 0.906\n",
+      "Step 360, Minibatch Loss= 914.3958, Training Accuracy= 0.961\n",
+      "Step 370, Minibatch Loss= 488.0052, Training Accuracy= 0.938\n",
+      "Step 380, Minibatch Loss= 1070.8710, Training Accuracy= 0.922\n",
+      "Step 390, Minibatch Loss= 151.4658, Training Accuracy= 0.961\n",
+      "Step 400, Minibatch Loss= 555.3539, Training Accuracy= 0.953\n",
+      "Step 410, Minibatch Loss= 765.5746, Training Accuracy= 0.945\n",
+      "Step 420, Minibatch Loss= 326.9393, Training Accuracy= 0.969\n",
+      "Step 430, Minibatch Loss= 530.8968, Training Accuracy= 0.977\n",
+      "Step 440, Minibatch Loss= 463.3909, Training Accuracy= 0.977\n",
+      "Step 450, Minibatch Loss= 362.2226, Training Accuracy= 0.977\n",
+      "Step 460, Minibatch Loss= 414.0034, Training Accuracy= 0.953\n",
+      "Step 470, Minibatch Loss= 583.4587, Training Accuracy= 0.945\n",
+      "Step 480, Minibatch Loss= 566.1262, Training Accuracy= 0.969\n",
+      "Step 490, Minibatch Loss= 691.1143, Training Accuracy= 0.961\n",
+      "Step 500, Minibatch Loss= 282.8893, Training Accuracy= 0.984\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.976562\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    for step in range(1, num_steps+1):\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: dropout})\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            # Calculate batch loss and accuracy\n",
+    "            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n",
+    "                                                                 Y: batch_y,\n",
+    "                                                                 keep_prob: 1.0})\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.3f}\".format(acc))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Calculate accuracy for 256 MNIST test images\n",
+    "    print(\"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={X: mnist.test.images[:256],\n",
+    "                                      Y: mnist.test.labels[:256],\n",
+    "                                      keep_prob: 1.0}))\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Plik diff jest za duży
+ 333 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/dcgan.ipynb


+ 352 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/dynamic_rnn.ipynb

@@ -0,0 +1,352 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Dynamic Recurrent Neural Network.\n",
+    "\n",
+    "TensorFlow implementation of a Recurrent Neural Network (LSTM) that performs dynamic computation over sequences with variable length. This example is using a toy dataset to classify linear sequences. The generated sequences have variable length.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## RNN Overview\n",
+    "\n",
+    "<img src=\"http://colah.github.io/posts/2015-08-Understanding-LSTMs/img/RNN-unrolled.png\" alt=\"nn\" style=\"width: 600px;\"/>\n",
+    "\n",
+    "References:\n",
+    "- [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf), Sepp Hochreiter & Jurgen Schmidhuber, Neural Computation 9(8): 1735-1780, 1997."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "import random"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# ====================\n",
+    "#  TOY DATA GENERATOR\n",
+    "# ====================\n",
+    "\n",
+    "class ToySequenceData(object):\n",
+    "    \"\"\" Generate sequence of data with dynamic length.\n",
+    "    This class generate samples for training:\n",
+    "    - Class 0: linear sequences (i.e. [0, 1, 2, 3,...])\n",
+    "    - Class 1: random sequences (i.e. [1, 3, 10, 7,...])\n",
+    "\n",
+    "    NOTICE:\n",
+    "    We have to pad each sequence to reach 'max_seq_len' for TensorFlow\n",
+    "    consistency (we cannot feed a numpy array with inconsistent\n",
+    "    dimensions). The dynamic calculation will then be perform thanks to\n",
+    "    'seqlen' attribute that records every actual sequence length.\n",
+    "    \"\"\"\n",
+    "    def __init__(self, n_samples=1000, max_seq_len=20, min_seq_len=3,\n",
+    "                 max_value=1000):\n",
+    "        self.data = []\n",
+    "        self.labels = []\n",
+    "        self.seqlen = []\n",
+    "        for i in range(n_samples):\n",
+    "            # Random sequence length\n",
+    "            len = random.randint(min_seq_len, max_seq_len)\n",
+    "            # Monitor sequence length for TensorFlow dynamic calculation\n",
+    "            self.seqlen.append(len)\n",
+    "            # Add a random or linear int sequence (50% prob)\n",
+    "            if random.random() < .5:\n",
+    "                # Generate a linear sequence\n",
+    "                rand_start = random.randint(0, max_value - len)\n",
+    "                s = [[float(i)/max_value] for i in\n",
+    "                     range(rand_start, rand_start + len)]\n",
+    "                # Pad sequence for dimension consistency\n",
+    "                s += [[0.] for i in range(max_seq_len - len)]\n",
+    "                self.data.append(s)\n",
+    "                self.labels.append([1., 0.])\n",
+    "            else:\n",
+    "                # Generate a random sequence\n",
+    "                s = [[float(random.randint(0, max_value))/max_value]\n",
+    "                     for i in range(len)]\n",
+    "                # Pad sequence for dimension consistency\n",
+    "                s += [[0.] for i in range(max_seq_len - len)]\n",
+    "                self.data.append(s)\n",
+    "                self.labels.append([0., 1.])\n",
+    "        self.batch_id = 0\n",
+    "\n",
+    "    def next(self, batch_size):\n",
+    "        \"\"\" Return a batch of data. When dataset end is reached, start over.\n",
+    "        \"\"\"\n",
+    "        if self.batch_id == len(self.data):\n",
+    "            self.batch_id = 0\n",
+    "        batch_data = (self.data[self.batch_id:min(self.batch_id +\n",
+    "                                                  batch_size, len(self.data))])\n",
+    "        batch_labels = (self.labels[self.batch_id:min(self.batch_id +\n",
+    "                                                  batch_size, len(self.data))])\n",
+    "        batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id +\n",
+    "                                                  batch_size, len(self.data))])\n",
+    "        self.batch_id = min(self.batch_id + batch_size, len(self.data))\n",
+    "        return batch_data, batch_labels, batch_seqlen"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# ==========\n",
+    "#   MODEL\n",
+    "# ==========\n",
+    "\n",
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_steps = 10000\n",
+    "batch_size = 128\n",
+    "display_step = 200\n",
+    "\n",
+    "# Network Parameters\n",
+    "seq_max_len = 20 # Sequence max length\n",
+    "n_hidden = 64 # hidden layer num of features\n",
+    "n_classes = 2 # linear sequence or not\n",
+    "\n",
+    "trainset = ToySequenceData(n_samples=1000, max_seq_len=seq_max_len)\n",
+    "testset = ToySequenceData(n_samples=500, max_seq_len=seq_max_len)\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(\"float\", [None, seq_max_len, 1])\n",
+    "y = tf.placeholder(\"float\", [None, n_classes])\n",
+    "# A placeholder for indicating each sequence length\n",
+    "seqlen = tf.placeholder(tf.int32, [None])\n",
+    "\n",
+    "# Define weights\n",
+    "weights = {\n",
+    "    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def dynamicRNN(x, seqlen, weights, biases):\n",
+    "\n",
+    "    # Prepare data shape to match `rnn` function requirements\n",
+    "    # Current data input shape: (batch_size, n_steps, n_input)\n",
+    "    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)\n",
+    "    \n",
+    "    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n",
+    "    x = tf.unstack(x, seq_max_len, 1)\n",
+    "\n",
+    "    # Define a lstm cell with tensorflow\n",
+    "    lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)\n",
+    "\n",
+    "    # Get lstm cell output, providing 'sequence_length' will perform dynamic\n",
+    "    # calculation.\n",
+    "    outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32,\n",
+    "                                sequence_length=seqlen)\n",
+    "\n",
+    "    # When performing dynamic calculation, we must retrieve the last\n",
+    "    # dynamically computed output, i.e., if a sequence length is 10, we need\n",
+    "    # to retrieve the 10th output.\n",
+    "    # However TensorFlow doesn't support advanced indexing yet, so we build\n",
+    "    # a custom op that for each sample in batch size, get its length and\n",
+    "    # get the corresponding relevant output.\n",
+    "\n",
+    "    # 'outputs' is a list of output at every timestep, we pack them in a Tensor\n",
+    "    # and change back dimension to [batch_size, n_step, n_input]\n",
+    "    outputs = tf.stack(outputs)\n",
+    "    outputs = tf.transpose(outputs, [1, 0, 2])\n",
+    "\n",
+    "    # Hack to build the indexing and retrieve the right output.\n",
+    "    batch_size = tf.shape(outputs)[0]\n",
+    "    # Start indices for each sample\n",
+    "    index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1)\n",
+    "    # Indexing\n",
+    "    outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)\n",
+    "\n",
+    "    # Linear activation, using outputs computed above\n",
+    "    return tf.matmul(outputs, weights['out']) + biases['out']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/Users/aymeric.damien/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gradients_impl.py:93: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
+      "  \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n"
+     ]
+    }
+   ],
+   "source": [
+    "pred = dynamicRNN(x, seqlen, weights, biases)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Evaluate model\n",
+    "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 0.864517, Training Accuracy= 0.42188\n",
+      "Step 200, Minibatch Loss= 0.686012, Training Accuracy= 0.43269\n",
+      "Step 400, Minibatch Loss= 0.682970, Training Accuracy= 0.48077\n",
+      "Step 600, Minibatch Loss= 0.679640, Training Accuracy= 0.50962\n",
+      "Step 800, Minibatch Loss= 0.675208, Training Accuracy= 0.53846\n",
+      "Step 1000, Minibatch Loss= 0.668636, Training Accuracy= 0.56731\n",
+      "Step 1200, Minibatch Loss= 0.657525, Training Accuracy= 0.62500\n",
+      "Step 1400, Minibatch Loss= 0.635423, Training Accuracy= 0.67308\n",
+      "Step 1600, Minibatch Loss= 0.580433, Training Accuracy= 0.75962\n",
+      "Step 1800, Minibatch Loss= 0.475599, Training Accuracy= 0.81731\n",
+      "Step 2000, Minibatch Loss= 0.434865, Training Accuracy= 0.83654\n",
+      "Step 2200, Minibatch Loss= 0.423690, Training Accuracy= 0.85577\n",
+      "Step 2400, Minibatch Loss= 0.417472, Training Accuracy= 0.85577\n",
+      "Step 2600, Minibatch Loss= 0.412906, Training Accuracy= 0.85577\n",
+      "Step 2800, Minibatch Loss= 0.409193, Training Accuracy= 0.85577\n",
+      "Step 3000, Minibatch Loss= 0.406035, Training Accuracy= 0.86538\n",
+      "Step 3200, Minibatch Loss= 0.403287, Training Accuracy= 0.87500\n",
+      "Step 3400, Minibatch Loss= 0.400862, Training Accuracy= 0.87500\n",
+      "Step 3600, Minibatch Loss= 0.398704, Training Accuracy= 0.86538\n",
+      "Step 3800, Minibatch Loss= 0.396768, Training Accuracy= 0.86538\n",
+      "Step 4000, Minibatch Loss= 0.395017, Training Accuracy= 0.86538\n",
+      "Step 4200, Minibatch Loss= 0.393422, Training Accuracy= 0.86538\n",
+      "Step 4400, Minibatch Loss= 0.391957, Training Accuracy= 0.85577\n",
+      "Step 4600, Minibatch Loss= 0.390600, Training Accuracy= 0.85577\n",
+      "Step 4800, Minibatch Loss= 0.389334, Training Accuracy= 0.86538\n",
+      "Step 5000, Minibatch Loss= 0.388143, Training Accuracy= 0.86538\n",
+      "Step 5200, Minibatch Loss= 0.387015, Training Accuracy= 0.86538\n",
+      "Step 5400, Minibatch Loss= 0.385940, Training Accuracy= 0.86538\n",
+      "Step 5600, Minibatch Loss= 0.384907, Training Accuracy= 0.86538\n",
+      "Step 5800, Minibatch Loss= 0.383904, Training Accuracy= 0.85577\n",
+      "Step 6000, Minibatch Loss= 0.382921, Training Accuracy= 0.86538\n",
+      "Step 6200, Minibatch Loss= 0.381941, Training Accuracy= 0.86538\n",
+      "Step 6400, Minibatch Loss= 0.380947, Training Accuracy= 0.86538\n",
+      "Step 6600, Minibatch Loss= 0.379912, Training Accuracy= 0.86538\n",
+      "Step 6800, Minibatch Loss= 0.378796, Training Accuracy= 0.86538\n",
+      "Step 7000, Minibatch Loss= 0.377540, Training Accuracy= 0.86538\n",
+      "Step 7200, Minibatch Loss= 0.376041, Training Accuracy= 0.86538\n",
+      "Step 7400, Minibatch Loss= 0.374130, Training Accuracy= 0.85577\n",
+      "Step 7600, Minibatch Loss= 0.371514, Training Accuracy= 0.85577\n",
+      "Step 7800, Minibatch Loss= 0.367723, Training Accuracy= 0.85577\n",
+      "Step 8000, Minibatch Loss= 0.362049, Training Accuracy= 0.85577\n",
+      "Step 8200, Minibatch Loss= 0.353558, Training Accuracy= 0.85577\n",
+      "Step 8400, Minibatch Loss= 0.341072, Training Accuracy= 0.86538\n",
+      "Step 8600, Minibatch Loss= 0.323062, Training Accuracy= 0.87500\n",
+      "Step 8800, Minibatch Loss= 0.299278, Training Accuracy= 0.89423\n",
+      "Step 9000, Minibatch Loss= 0.273857, Training Accuracy= 0.90385\n",
+      "Step 9200, Minibatch Loss= 0.248392, Training Accuracy= 0.91346\n",
+      "Step 9400, Minibatch Loss= 0.221348, Training Accuracy= 0.92308\n",
+      "Step 9600, Minibatch Loss= 0.191947, Training Accuracy= 0.92308\n",
+      "Step 9800, Minibatch Loss= 0.159308, Training Accuracy= 0.93269\n",
+      "Step 10000, Minibatch Loss= 0.136938, Training Accuracy= 0.96154\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.952\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    for step in range(1, training_steps+1):\n",
+    "        batch_x, batch_y, batch_seqlen = trainset.next(batch_size)\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\n",
+    "                                       seqlen: batch_seqlen})\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            # Calculate batch accuracy & loss\n",
+    "            acc, loss = sess.run([accuracy, cost], feed_dict={x: batch_x, y: batch_y,\n",
+    "                                                seqlen: batch_seqlen})\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.5f}\".format(acc))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Calculate accuracy\n",
+    "    test_data = testset.data\n",
+    "    test_label = testset.labels\n",
+    "    test_seqlen = testset.seqlen\n",
+    "    print(\"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={x: test_data, y: test_label,\n",
+    "                                      seqlen: test_seqlen}))"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

Plik diff jest za duży
+ 323 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/gan.ipynb


Plik diff jest za duży
+ 390 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/neural_network.ipynb


+ 287 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/neural_network_eager_api.ipynb

@@ -0,0 +1,287 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Neural Network with Eager API\n",
+    "\n",
+    "Build a 2-hidden layers fully connected neural network (a.k.a multilayer perceptron) with TensorFlow's Eager API.\n",
+    "\n",
+    "This example is using some of TensorFlow higher-level wrappers (tf.estimators, tf.layers, tf.metrics, ...), you can check 'neural_network_raw' example for a raw, and more detailed TensorFlow implementation.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Neural Network Overview\n",
+    "\n",
+    "<img src=\"http://cs231n.github.io/assets/nn1/neural_net2.jpeg\" alt=\"nn\" style=\"width: 400px;\"/>\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Set Eager API\n",
+    "tf.enable_eager_execution()\n",
+    "tfe = tf.contrib.eager"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "num_steps = 1000\n",
+    "batch_size = 128\n",
+    "display_step = 100\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer number of neurons\n",
+    "n_hidden_2 = 256 # 2nd layer number of neurons\n",
+    "num_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Using TF Dataset to split data into batches\n",
+    "dataset = tf.data.Dataset.from_tensor_slices(\n",
+    "    (mnist.train.images, mnist.train.labels))\n",
+    "dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n",
+    "dataset_iter = tfe.Iterator(dataset)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Define the neural network. To use eager API and tf.layers API together,\n",
+    "# we must instantiate a tfe.Network class as follow:\n",
+    "class NeuralNet(tfe.Network):\n",
+    "    def __init__(self):\n",
+    "        # Define each layer\n",
+    "        super(NeuralNet, self).__init__()\n",
+    "        # Hidden fully connected layer with 256 neurons\n",
+    "        self.layer1 = self.track_layer(\n",
+    "            tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))\n",
+    "        # Hidden fully connected layer with 256 neurons\n",
+    "        self.layer2 = self.track_layer(\n",
+    "            tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))\n",
+    "        # Output fully connected layer with a neuron for each class\n",
+    "        self.out_layer = self.track_layer(tf.layers.Dense(num_classes))\n",
+    "\n",
+    "    def call(self, x):\n",
+    "        x = self.layer1(x)\n",
+    "        x = self.layer2(x)\n",
+    "        return self.out_layer(x)\n",
+    "\n",
+    "\n",
+    "neural_net = NeuralNet()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Cross-Entropy loss function\n",
+    "def loss_fn(inference_fn, inputs, labels):\n",
+    "    # Using sparse_softmax cross entropy\n",
+    "    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
+    "        logits=inference_fn(inputs), labels=labels))\n",
+    "\n",
+    "\n",
+    "# Calculate accuracy\n",
+    "def accuracy_fn(inference_fn, inputs, labels):\n",
+    "    prediction = tf.nn.softmax(inference_fn(inputs))\n",
+    "    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)\n",
+    "    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "\n",
+    "# SGD Optimizer\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "\n",
+    "# Compute gradients\n",
+    "grad = tfe.implicit_gradients(loss_fn)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Initial loss= 2.340397596\n",
+      "Step: 0001  loss= 2.340397596  accuracy= 0.0703\n",
+      "Step: 0100  loss= 0.586046159  accuracy= 0.8305\n",
+      "Step: 0200  loss= 0.253318846  accuracy= 0.9282\n",
+      "Step: 0300  loss= 0.214748293  accuracy= 0.9377\n",
+      "Step: 0400  loss= 0.180644721  accuracy= 0.9466\n",
+      "Step: 0500  loss= 0.137285724  accuracy= 0.9591\n",
+      "Step: 0600  loss= 0.119845696  accuracy= 0.9636\n",
+      "Step: 0700  loss= 0.113618039  accuracy= 0.9665\n",
+      "Step: 0800  loss= 0.109642141  accuracy= 0.9676\n",
+      "Step: 0900  loss= 0.085067607  accuracy= 0.9746\n",
+      "Step: 1000  loss= 0.079819344  accuracy= 0.9754\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Training\n",
+    "average_loss = 0.\n",
+    "average_acc = 0.\n",
+    "for step in range(num_steps):\n",
+    "\n",
+    "    # Iterate through the dataset\n",
+    "    d = dataset_iter.next()\n",
+    "    \n",
+    "    # Images\n",
+    "    x_batch = d[0]\n",
+    "    # Labels\n",
+    "    y_batch = tf.cast(d[1], dtype=tf.int64)\n",
+    "\n",
+    "    # Compute the batch loss\n",
+    "    batch_loss = loss_fn(neural_net, x_batch, y_batch)\n",
+    "    average_loss += batch_loss\n",
+    "    # Compute the batch accuracy\n",
+    "    batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)\n",
+    "    average_acc += batch_accuracy\n",
+    "\n",
+    "    if step == 0:\n",
+    "        # Display the initial cost, before optimizing\n",
+    "        print(\"Initial loss= {:.9f}\".format(average_loss))\n",
+    "\n",
+    "    # Update the variables following gradients info\n",
+    "    optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))\n",
+    "\n",
+    "    # Display info\n",
+    "    if (step + 1) % display_step == 0 or step == 0:\n",
+    "        if step > 0:\n",
+    "            average_loss /= display_step\n",
+    "            average_acc /= display_step\n",
+    "        print(\"Step:\", '%04d' % (step + 1), \" loss=\",\n",
+    "              \"{:.9f}\".format(average_loss), \" accuracy=\",\n",
+    "              \"{:.4f}\".format(average_acc))\n",
+    "        average_loss = 0.\n",
+    "        average_acc = 0."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Testset Accuracy: 0.9719\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Evaluate model on the test image set\n",
+    "testX = mnist.test.images\n",
+    "testY = mnist.test.labels\n",
+    "\n",
+    "test_acc = accuracy_fn(neural_net, testX, testY)\n",
+    "print(\"Testset Accuracy: {:.4f}\".format(test_acc))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 224 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/neural_network_raw.ipynb

@@ -0,0 +1,224 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Neural Network Example\n",
+    "\n",
+    "Build a 2-hidden layers fully connected neural network (a.k.a multilayer perceptron) with TensorFlow.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Neural Network Overview\n",
+    "\n",
+    "<img src=\"http://cs231n.github.io/assets/nn1/neural_net2.jpeg\" alt=\"nn\" style=\"width: 400px;\"/>\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.1\n",
+    "num_steps = 500\n",
+    "batch_size = 128\n",
+    "display_step = 100\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer number of neurons\n",
+    "n_hidden_2 = 256 # 2nd layer number of neurons\n",
+    "num_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "X = tf.placeholder(\"float\", [None, num_input])\n",
+    "Y = tf.placeholder(\"float\", [None, num_classes])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n",
+    "    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
+    "    'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([num_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Create model\n",
+    "def neural_net(x):\n",
+    "    # Hidden fully connected layer with 256 neurons\n",
+    "    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n",
+    "    # Hidden fully connected layer with 256 neurons\n",
+    "    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n",
+    "    # Output fully connected layer with a neuron for each class\n",
+    "    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n",
+    "    return out_layer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Construct model\n",
+    "logits = neural_net(X)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "    logits=logits, labels=Y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluate model (with test logits, for dropout to be disabled)\n",
+    "correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 13208.1406, Training Accuracy= 0.266\n",
+      "Step 100, Minibatch Loss= 462.8610, Training Accuracy= 0.867\n",
+      "Step 200, Minibatch Loss= 232.8298, Training Accuracy= 0.844\n",
+      "Step 300, Minibatch Loss= 85.2141, Training Accuracy= 0.891\n",
+      "Step 400, Minibatch Loss= 38.0552, Training Accuracy= 0.883\n",
+      "Step 500, Minibatch Loss= 55.3689, Training Accuracy= 0.867\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.8729\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    for step in range(1, num_steps+1):\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            # Calculate batch loss and accuracy\n",
+    "            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n",
+    "                                                                 Y: batch_y})\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.3f}\".format(acc))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Calculate accuracy for MNIST test images\n",
+    "    print(\"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={X: mnist.test.images,\n",
+    "                                      Y: mnist.test.labels}))"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 292 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/recurrent_network.ipynb

@@ -0,0 +1,292 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Recurrent Neural Network Example\n",
+    "\n",
+    "Build a recurrent neural network (LSTM) with TensorFlow.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## RNN Overview\n",
+    "\n",
+    "<img src=\"http://colah.github.io/posts/2015-08-Understanding-LSTMs/img/RNN-unrolled.png\" alt=\"nn\" style=\"width: 600px;\"/>\n",
+    "\n",
+    "References:\n",
+    "- [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf), Sepp Hochreiter & Jurgen Schmidhuber, Neural Computation 9(8): 1735-1780, 1997.\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "To classify images using a recurrent neural network, we consider every image row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then handle 28 sequences of 28 timesteps for every sample.\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow.contrib import rnn\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Training Parameters\n",
+    "learning_rate = 0.001\n",
+    "training_steps = 10000\n",
+    "batch_size = 128\n",
+    "display_step = 200\n",
+    "\n",
+    "# Network Parameters\n",
+    "num_input = 28 # MNIST data input (img shape: 28*28)\n",
+    "timesteps = 28 # timesteps\n",
+    "num_hidden = 128 # hidden layer num of features\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "X = tf.placeholder(\"float\", [None, timesteps, num_input])\n",
+    "Y = tf.placeholder(\"float\", [None, num_classes])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Define weights\n",
+    "weights = {\n",
+    "    'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'out': tf.Variable(tf.random_normal([num_classes]))\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "def RNN(x, weights, biases):\n",
+    "\n",
+    "    # Prepare data shape to match `rnn` function requirements\n",
+    "    # Current data input shape: (batch_size, timesteps, n_input)\n",
+    "    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)\n",
+    "\n",
+    "    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)\n",
+    "    x = tf.unstack(x, timesteps, 1)\n",
+    "\n",
+    "    # Define a lstm cell with tensorflow\n",
+    "    lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)\n",
+    "\n",
+    "    # Get lstm cell output\n",
+    "    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n",
+    "\n",
+    "    # Linear activation, using rnn inner loop last output\n",
+    "    return tf.matmul(outputs[-1], weights['out']) + biases['out']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "logits = RNN(X, weights, biases)\n",
+    "prediction = tf.nn.softmax(logits)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "    logits=logits, labels=Y))\n",
+    "optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluate model (with test logits, for dropout to be disabled)\n",
+    "correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 2.6268, Training Accuracy= 0.102\n",
+      "Step 200, Minibatch Loss= 2.0722, Training Accuracy= 0.328\n",
+      "Step 400, Minibatch Loss= 1.9181, Training Accuracy= 0.336\n",
+      "Step 600, Minibatch Loss= 1.8858, Training Accuracy= 0.336\n",
+      "Step 800, Minibatch Loss= 1.7022, Training Accuracy= 0.422\n",
+      "Step 1000, Minibatch Loss= 1.6365, Training Accuracy= 0.477\n",
+      "Step 1200, Minibatch Loss= 1.6691, Training Accuracy= 0.516\n",
+      "Step 1400, Minibatch Loss= 1.4626, Training Accuracy= 0.547\n",
+      "Step 1600, Minibatch Loss= 1.4707, Training Accuracy= 0.539\n",
+      "Step 1800, Minibatch Loss= 1.4087, Training Accuracy= 0.570\n",
+      "Step 2000, Minibatch Loss= 1.3033, Training Accuracy= 0.570\n",
+      "Step 2200, Minibatch Loss= 1.3773, Training Accuracy= 0.508\n",
+      "Step 2400, Minibatch Loss= 1.3092, Training Accuracy= 0.570\n",
+      "Step 2600, Minibatch Loss= 1.2272, Training Accuracy= 0.609\n",
+      "Step 2800, Minibatch Loss= 1.1827, Training Accuracy= 0.633\n",
+      "Step 3000, Minibatch Loss= 1.0453, Training Accuracy= 0.641\n",
+      "Step 3200, Minibatch Loss= 1.0400, Training Accuracy= 0.648\n",
+      "Step 3400, Minibatch Loss= 1.1145, Training Accuracy= 0.656\n",
+      "Step 3600, Minibatch Loss= 0.9884, Training Accuracy= 0.688\n",
+      "Step 3800, Minibatch Loss= 1.0395, Training Accuracy= 0.703\n",
+      "Step 4000, Minibatch Loss= 1.0096, Training Accuracy= 0.664\n",
+      "Step 4200, Minibatch Loss= 0.8806, Training Accuracy= 0.758\n",
+      "Step 4400, Minibatch Loss= 0.9090, Training Accuracy= 0.766\n",
+      "Step 4600, Minibatch Loss= 1.0060, Training Accuracy= 0.703\n",
+      "Step 4800, Minibatch Loss= 0.8954, Training Accuracy= 0.703\n",
+      "Step 5000, Minibatch Loss= 0.8163, Training Accuracy= 0.750\n",
+      "Step 5200, Minibatch Loss= 0.7620, Training Accuracy= 0.773\n",
+      "Step 5400, Minibatch Loss= 0.7388, Training Accuracy= 0.758\n",
+      "Step 5600, Minibatch Loss= 0.7604, Training Accuracy= 0.695\n",
+      "Step 5800, Minibatch Loss= 0.7459, Training Accuracy= 0.734\n",
+      "Step 6000, Minibatch Loss= 0.7448, Training Accuracy= 0.734\n",
+      "Step 6200, Minibatch Loss= 0.7208, Training Accuracy= 0.773\n",
+      "Step 6400, Minibatch Loss= 0.6557, Training Accuracy= 0.773\n",
+      "Step 6600, Minibatch Loss= 0.8616, Training Accuracy= 0.758\n",
+      "Step 6800, Minibatch Loss= 0.6089, Training Accuracy= 0.773\n",
+      "Step 7000, Minibatch Loss= 0.5020, Training Accuracy= 0.844\n",
+      "Step 7200, Minibatch Loss= 0.5980, Training Accuracy= 0.812\n",
+      "Step 7400, Minibatch Loss= 0.6786, Training Accuracy= 0.766\n",
+      "Step 7600, Minibatch Loss= 0.4891, Training Accuracy= 0.859\n",
+      "Step 7800, Minibatch Loss= 0.7042, Training Accuracy= 0.797\n",
+      "Step 8000, Minibatch Loss= 0.4200, Training Accuracy= 0.859\n",
+      "Step 8200, Minibatch Loss= 0.6442, Training Accuracy= 0.742\n",
+      "Step 8400, Minibatch Loss= 0.5569, Training Accuracy= 0.828\n",
+      "Step 8600, Minibatch Loss= 0.5838, Training Accuracy= 0.836\n",
+      "Step 8800, Minibatch Loss= 0.5579, Training Accuracy= 0.812\n",
+      "Step 9000, Minibatch Loss= 0.4337, Training Accuracy= 0.867\n",
+      "Step 9200, Minibatch Loss= 0.4366, Training Accuracy= 0.844\n",
+      "Step 9400, Minibatch Loss= 0.5051, Training Accuracy= 0.844\n",
+      "Step 9600, Minibatch Loss= 0.5244, Training Accuracy= 0.805\n",
+      "Step 9800, Minibatch Loss= 0.4932, Training Accuracy= 0.805\n",
+      "Step 10000, Minibatch Loss= 0.4833, Training Accuracy= 0.852\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.882812\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    for step in range(1, training_steps+1):\n",
+    "        batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "        # Reshape data to get 28 seq of 28 elements\n",
+    "        batch_x = batch_x.reshape((batch_size, timesteps, num_input))\n",
+    "        # Run optimization op (backprop)\n",
+    "        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n",
+    "        if step % display_step == 0 or step == 1:\n",
+    "            # Calculate batch loss and accuracy\n",
+    "            loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n",
+    "                                                                 Y: batch_y})\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.3f}\".format(acc))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Calculate accuracy for 128 mnist test images\n",
+    "    test_len = 128\n",
+    "    test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))\n",
+    "    test_label = mnist.test.labels[:test_len]\n",
+    "    print(\"Testing Accuracy:\", \\\n",
+    "        sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

Plik diff jest za duży
+ 316 - 0
tensorflow_v1/notebooks/3_NeuralNetworks/variational_autoencoder.ipynb


+ 252 - 0
tensorflow_v1/notebooks/4_Utils/save_restore_model.ipynb

@@ -0,0 +1,252 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Save & Restore a Model\n",
+    "\n",
+    "Save and Restore a model using TensorFlow.\n",
+    "This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/).\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n",
+    "\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "batch_size = 100\n",
+    "display_step = 1\n",
+    "model_path = \"/tmp/model.ckpt\"\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer number of features\n",
+    "n_hidden_2 = 256 # 2nd layer number of features\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph input\n",
+    "x = tf.placeholder(\"float\", [None, n_input])\n",
+    "y = tf.placeholder(\"float\", [None, n_classes])\n",
+    "\n",
+    "\n",
+    "# Create model\n",
+    "def multilayer_perceptron(x, weights, biases):\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n",
+    "    layer_1 = tf.nn.relu(layer_1)\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n",
+    "    layer_2 = tf.nn.relu(layer_2)\n",
+    "    # Output layer with linear activation\n",
+    "    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n",
+    "    return out_layer\n",
+    "\n",
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n",
+    "    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n",
+    "}\n",
+    "biases = {\n",
+    "    'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n",
+    "    'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n",
+    "    'out': tf.Variable(tf.random_normal([n_classes]))\n",
+    "}\n",
+    "\n",
+    "# Construct model\n",
+    "pred = multilayer_perceptron(x, weights, biases)\n",
+    "\n",
+    "# Define loss and optimizer\n",
+    "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.global_variables_initializer()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# 'Saver' op to save and restore all the variables\n",
+    "saver = tf.train.Saver()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Starting 1st session...\n",
+      "Epoch: 0001 cost= 187.778896380\n",
+      "Epoch: 0002 cost= 42.367902536\n",
+      "Epoch: 0003 cost= 26.488964058\n",
+      "First Optimization Finished!\n",
+      "Accuracy: 0.9075\n",
+      "Model saved in file: /tmp/model.ckpt\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Running first session\n",
+    "print(\"Starting 1st session...\")\n",
+    "with tf.Session() as sess:\n",
+    "    # Initialize variables\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(3):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n",
+    "                                                          y: batch_y})\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if epoch % display_step == 0:\n",
+    "            print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n",
+    "                \"{:.9f}\".format(avg_cost)\n",
+    "    print(\"First Optimization Finished!\")\n",
+    "\n",
+    "    # Test model\n",
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
+    "    print(\"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))\n",
+    "\n",
+    "    # Save model weights to disk\n",
+    "    save_path = saver.save(sess, model_path)\n",
+    "    print(\"Model saved in file: %s\" % save_path)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Starting 2nd session...\n",
+      "Model restored from file: /tmp/model.ckpt\n",
+      "Epoch: 0001 cost= 18.292712951\n",
+      "Epoch: 0002 cost= 13.404136196\n",
+      "Epoch: 0003 cost= 9.855191723\n",
+      "Epoch: 0004 cost= 7.276933088\n",
+      "Epoch: 0005 cost= 5.564581285\n",
+      "Epoch: 0006 cost= 4.165259939\n",
+      "Epoch: 0007 cost= 3.139393926\n",
+      "Second Optimization Finished!\n",
+      "Accuracy: 0.9385\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Running a new session\n",
+    "print(\"Starting 2nd session...\")\n",
+    "with tf.Session() as sess:\n",
+    "    # Initialize variables\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Restore model weights from previously saved model\n",
+    "    load_path = saver.restore(sess, model_path)\n",
+    "    print(\"Model restored from file: %s\" % save_path)\n",
+    "\n",
+    "    # Resume training\n",
+    "    for epoch in range(7):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples / batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop) and cost op (to get loss value)\n",
+    "            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n",
+    "                                                          y: batch_y})\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if epoch % display_step == 0:\n",
+    "            print(\"Epoch:\", '%04d' % (epoch + 1), \"cost=\", \\\n",
+    "                \"{:.9f}\".format(avg_cost))\n",
+    "    print(\"Second Optimization Finished!\")\n",
+    "\n",
+    "    # Test model\n",
+    "    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    # Calculate accuracy\n",
+    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
+    "    print(\"Accuracy:\", accuracy.eval(\n",
+    "        {x: mnist.test.images, y: mnist.test.labels}))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 307 - 0
tensorflow_v1/notebooks/4_Utils/tensorboard_advanced.ipynb

@@ -0,0 +1,307 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Tensorboard Advanced\n",
+    "\n",
+    "Advanced visualization using Tensorboard (weights, gradient, ...). This example is using the MNIST database of handwritten digits\n",
+    "(http://yann.lecun.com/exdb/mnist/).\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 25\n",
+    "batch_size = 100\n",
+    "display_step = 1\n",
+    "logs_path = '/tmp/tensorflow_logs/example/'\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_hidden_1 = 256 # 1st layer number of features\n",
+    "n_hidden_2 = 256 # 2nd layer number of features\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "\n",
+    "# tf Graph Input\n",
+    "# mnist data image of shape 28*28=784\n",
+    "x = tf.placeholder(tf.float32, [None, 784], name='InputData')\n",
+    "# 0-9 digits recognition => 10 classes\n",
+    "y = tf.placeholder(tf.float32, [None, 10], name='LabelData')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create model\n",
+    "def multilayer_perceptron(x, weights, biases):\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])\n",
+    "    layer_1 = tf.nn.relu(layer_1)\n",
+    "    # Create a summary to visualize the first layer ReLU activation\n",
+    "    tf.summary.histogram(\"relu1\", layer_1)\n",
+    "    # Hidden layer with RELU activation\n",
+    "    layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])\n",
+    "    layer_2 = tf.nn.relu(layer_2)\n",
+    "    # Create another summary to visualize the second layer ReLU activation\n",
+    "    tf.summary.histogram(\"relu2\", layer_2)\n",
+    "    # Output layer\n",
+    "    out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])\n",
+    "    return out_layer\n",
+    "\n",
+    "# Store layers weight & bias\n",
+    "weights = {\n",
+    "    'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),\n",
+    "    'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),\n",
+    "    'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')\n",
+    "}\n",
+    "biases = {\n",
+    "    'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),\n",
+    "    'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),\n",
+    "    'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Encapsulating all ops into scopes, making Tensorboard's Graph\n",
+    "# Visualization more convenient\n",
+    "with tf.name_scope('Model'):\n",
+    "    # Build model\n",
+    "    pred = multilayer_perceptron(x, weights, biases)\n",
+    "\n",
+    "with tf.name_scope('Loss'):\n",
+    "    # Softmax Cross entropy (cost function)\n",
+    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
+    "\n",
+    "with tf.name_scope('SGD'):\n",
+    "    # Gradient Descent\n",
+    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
+    "    # Op to calculate every variable gradient\n",
+    "    grads = tf.gradients(loss, tf.trainable_variables())\n",
+    "    grads = list(zip(grads, tf.trainable_variables()))\n",
+    "    # Op to update all variables according to their gradient\n",
+    "    apply_grads = optimizer.apply_gradients(grads_and_vars=grads)\n",
+    "\n",
+    "with tf.name_scope('Accuracy'):\n",
+    "    # Accuracy\n",
+    "    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    acc = tf.reduce_mean(tf.cast(acc, tf.float32))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()\n",
+    "\n",
+    "# Create a summary to monitor cost tensor\n",
+    "tf.summary.scalar(\"loss\", loss)\n",
+    "# Create a summary to monitor accuracy tensor\n",
+    "tf.summary.scalar(\"accuracy\", acc)\n",
+    "# Create summaries to visualize weights\n",
+    "for var in tf.trainable_variables():\n",
+    "    tf.summary.histogram(var.name, var)\n",
+    "# Summarize all gradients\n",
+    "for grad, var in grads:\n",
+    "    tf.summary.histogram(var.name + '/gradient', grad)\n",
+    "# Merge all summaries into a single op\n",
+    "merged_summary_op = tf.summary.merge_all()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0001 cost= 59.570364205\n",
+      "Epoch: 0002 cost= 13.585465186\n",
+      "Epoch: 0003 cost= 8.379069252\n",
+      "Epoch: 0004 cost= 6.005265894\n",
+      "Epoch: 0005 cost= 4.498054792\n",
+      "Epoch: 0006 cost= 3.503682522\n",
+      "Epoch: 0007 cost= 2.822272765\n",
+      "Epoch: 0008 cost= 2.306899852\n",
+      "Epoch: 0009 cost= 1.912765543\n",
+      "Epoch: 0010 cost= 1.597006118\n",
+      "Epoch: 0011 cost= 1.330172869\n",
+      "Epoch: 0012 cost= 1.142490618\n",
+      "Epoch: 0013 cost= 0.939443911\n",
+      "Epoch: 0014 cost= 0.820920588\n",
+      "Epoch: 0015 cost= 0.702543302\n",
+      "Epoch: 0016 cost= 0.604815631\n",
+      "Epoch: 0017 cost= 0.505682561\n",
+      "Epoch: 0018 cost= 0.439700446\n",
+      "Epoch: 0019 cost= 0.378268929\n",
+      "Epoch: 0020 cost= 0.299557848\n",
+      "Epoch: 0021 cost= 0.269859066\n",
+      "Epoch: 0022 cost= 0.230899029\n",
+      "Epoch: 0023 cost= 0.183722090\n",
+      "Epoch: 0024 cost= 0.164173368\n",
+      "Epoch: 0025 cost= 0.142141250\n",
+      "Optimization Finished!\n",
+      "Accuracy: 0.9336\n",
+      "Run the command line:\n",
+      "--> tensorboard --logdir=/tmp/tensorflow_logs \n",
+      "Then open http://0.0.0.0:6006/ into your web browser\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # op to write logs to Tensorboard\n",
+    "    summary_writer = tf.summary.FileWriter(logs_path,\n",
+    "                                            graph=tf.get_default_graph())\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(training_epochs):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples/batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop), cost op (to get loss value)\n",
+    "            # and summary nodes\n",
+    "            _, c, summary = sess.run([apply_grads, loss, merged_summary_op],\n",
+    "                                     feed_dict={x: batch_xs, y: batch_ys})\n",
+    "            # Write logs at every iteration\n",
+    "            summary_writer.add_summary(summary, epoch * total_batch + i)\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if (epoch+1) % display_step == 0:\n",
+    "            print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Test model\n",
+    "    # Calculate accuracy\n",
+    "    print(\"Accuracy:\", acc.eval({x: mnist.test.images, y: mnist.test.labels}))\n",
+    "\n",
+    "    print(\"Run the command line:\\n\" \\\n",
+    "          \"--> tensorboard --logdir=/tmp/tensorflow_logs \" \\\n",
+    "          \"\\nThen open http://0.0.0.0:6006/ into your web browser\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loss and Accuracy Visualization\n",
+    "<img src=\"../../resources/img/tensorboard_advanced_1.png\"/>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Computation Graph Visualization\n",
+    "<img src=\"../../resources/img/tensorboard_advanced_2.png\"/>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Weights and Gradients Visualization\n",
+    "<img src=\"../../resources/img/tensorboard_advanced_3.png\"/>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Activations Visualization\n",
+    "<img src=\"../../resources/img/tensorboard_advanced_4.png\"/>"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 217 - 0
tensorflow_v1/notebooks/4_Utils/tensorboard_basic.ipynb

@@ -0,0 +1,217 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Tensorboard Basics\n",
+    "\n",
+    "Graph and Loss visualization using Tensorboard. This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/).\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MINST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "training_epochs = 25\n",
+    "batch_size = 100\n",
+    "display_epoch = 1\n",
+    "logs_path = '/tmp/tensorflow_logs/example/'\n",
+    "\n",
+    "# tf Graph Input\n",
+    "# mnist data image of shape 28*28=784\n",
+    "x = tf.placeholder(tf.float32, [None, 784], name='InputData')\n",
+    "# 0-9 digits recognition => 10 classes\n",
+    "y = tf.placeholder(tf.float32, [None, 10], name='LabelData')\n",
+    "\n",
+    "# Set model weights\n",
+    "W = tf.Variable(tf.zeros([784, 10]), name='Weights')\n",
+    "b = tf.Variable(tf.zeros([10]), name='Bias')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Construct model and encapsulating all ops into scopes, making\n",
+    "# Tensorboard's Graph visualization more convenient\n",
+    "with tf.name_scope('Model'):\n",
+    "    # Model\n",
+    "    pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n",
+    "with tf.name_scope('Loss'):\n",
+    "    # Minimize error using cross entropy\n",
+    "    cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))\n",
+    "with tf.name_scope('SGD'):\n",
+    "    # Gradient Descent\n",
+    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
+    "with tf.name_scope('Accuracy'):\n",
+    "    # Accuracy\n",
+    "    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
+    "    acc = tf.reduce_mean(tf.cast(acc, tf.float32))\n",
+    "\n",
+    "# Initializing the variables\n",
+    "init = tf.global_variables_initializer()\n",
+    "\n",
+    "# Create a summary to monitor cost tensor\n",
+    "tf.summary.scalar(\"loss\", cost)\n",
+    "# Create a summary to monitor accuracy tensor\n",
+    "tf.summary.scalar(\"accuracy\", acc)\n",
+    "# Merge all summaries into a single op\n",
+    "merged_summary_op = tf.summary.merge_all()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Epoch: 0001 cost= 1.182138961\n",
+      "Epoch: 0002 cost= 0.664609327\n",
+      "Epoch: 0003 cost= 0.552565036\n",
+      "Epoch: 0004 cost= 0.498541865\n",
+      "Epoch: 0005 cost= 0.465393374\n",
+      "Epoch: 0006 cost= 0.442491178\n",
+      "Epoch: 0007 cost= 0.425474149\n",
+      "Epoch: 0008 cost= 0.412152022\n",
+      "Epoch: 0009 cost= 0.401320939\n",
+      "Epoch: 0010 cost= 0.392305281\n",
+      "Epoch: 0011 cost= 0.384732356\n",
+      "Epoch: 0012 cost= 0.378109478\n",
+      "Epoch: 0013 cost= 0.372409370\n",
+      "Epoch: 0014 cost= 0.367236996\n",
+      "Epoch: 0015 cost= 0.362727492\n",
+      "Epoch: 0016 cost= 0.358627345\n",
+      "Epoch: 0017 cost= 0.354815522\n",
+      "Epoch: 0018 cost= 0.351413656\n",
+      "Epoch: 0019 cost= 0.348314827\n",
+      "Epoch: 0020 cost= 0.345429416\n",
+      "Epoch: 0021 cost= 0.342749324\n",
+      "Epoch: 0022 cost= 0.340224642\n",
+      "Epoch: 0023 cost= 0.337897302\n",
+      "Epoch: 0024 cost= 0.335720168\n",
+      "Epoch: 0025 cost= 0.333691911\n",
+      "Optimization Finished!\n",
+      "Accuracy: 0.9143\n",
+      "Run the command line:\n",
+      "--> tensorboard --logdir=/tmp/tensorflow_logs \n",
+      "Then open http://0.0.0.0:6006/ into your web browser\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Start Training\n",
+    "with tf.Session() as sess:\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # op to write logs to Tensorboard\n",
+    "    summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for epoch in range(training_epochs):\n",
+    "        avg_cost = 0.\n",
+    "        total_batch = int(mnist.train.num_examples / batch_size)\n",
+    "        # Loop over all batches\n",
+    "        for i in range(total_batch):\n",
+    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
+    "            # Run optimization op (backprop), cost op (to get loss value)\n",
+    "            # and summary nodes\n",
+    "            _, c, summary = sess.run([optimizer, cost, merged_summary_op],\n",
+    "                                     feed_dict={x: batch_xs, y: batch_ys})\n",
+    "            # Write logs at every iteration\n",
+    "            summary_writer.add_summary(summary, epoch * total_batch + i)\n",
+    "            # Compute average loss\n",
+    "            avg_cost += c / total_batch\n",
+    "        # Display logs per epoch step\n",
+    "        if (epoch+1) % display_epoch == 0:\n",
+    "            print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Test model\n",
+    "    # Calculate accuracy\n",
+    "    print(\"Accuracy:\", acc.eval({x: mnist.test.images, y: mnist.test.labels}))\n",
+    "\n",
+    "    print(\"Run the command line:\\n\" \\\n",
+    "          \"--> tensorboard --logdir=/tmp/tensorflow_logs \" \\\n",
+    "          \"\\nThen open http://0.0.0.0:6006/ into your web browser\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Loss and Accuracy Visualization\n",
+    "\n",
+    "<img src=\"../../resources/img/tensorboard_basic_1.png\"/>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Graph Visualization\n",
+    "\n",
+    "<img src=\"../../resources/img/tensorboard_basic_2.png\"/>"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 291 - 0
tensorflow_v1/notebooks/5_DataManagement/build_an_image_dataset.ipynb

@@ -0,0 +1,291 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": false
+   },
+   "source": [
+    "# Build an Image Dataset in TensorFlow.\n",
+    "\n",
+    "For this example, you need to make your own set of images (JPEG).\n",
+    "We will show 2 different ways to build that dataset:\n",
+    "\n",
+    "- From a root folder, that will have a sub-folder containing images for each class\n",
+    "\n",
+    "```\n",
+    "    ROOT_FOLDER\n",
+    "       |-------- SUBFOLDER (CLASS 0)\n",
+    "       |             |\n",
+    "       |             | ----- image1.jpg\n",
+    "       |             | ----- image2.jpg\n",
+    "       |             | ----- etc...\n",
+    "       |             \n",
+    "       |-------- SUBFOLDER (CLASS 1)\n",
+    "       |             |\n",
+    "       |             | ----- image1.jpg\n",
+    "       |             | ----- image2.jpg\n",
+    "       |             | ----- etc...\n",
+    "\n",
+    "```\n",
+    "\n",
+    "- From a plain text file, that will list all images with their class ID:\n",
+    "\n",
+    "```\n",
+    "    /path/to/image/1.jpg CLASS_ID\n",
+    "    /path/to/image/2.jpg CLASS_ID\n",
+    "    /path/to/image/3.jpg CLASS_ID\n",
+    "    /path/to/image/4.jpg CLASS_ID\n",
+    "    etc...\n",
+    "```\n",
+    "\n",
+    "Below, there are some parameters that you need to change (Marked 'CHANGE HERE'), \n",
+    "such as the dataset path.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "import os\n",
+    "\n",
+    "# Dataset Parameters - CHANGE HERE\n",
+    "MODE = 'folder' # or 'file', if you choose a plain text file (see above).\n",
+    "DATASET_PATH = '/path/to/dataset/' # the dataset file or root folder path.\n",
+    "\n",
+    "# Image Parameters\n",
+    "N_CLASSES = 2 # CHANGE HERE, total number of classes\n",
+    "IMG_HEIGHT = 64 # CHANGE HERE, the image height to be resized to\n",
+    "IMG_WIDTH = 64 # CHANGE HERE, the image width to be resized to\n",
+    "CHANNELS = 3 # The 3 color channels, change to 1 if grayscale"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Reading the dataset\n",
+    "# 2 modes: 'file' or 'folder'\n",
+    "def read_images(dataset_path, mode, batch_size):\n",
+    "    imagepaths, labels = list(), list()\n",
+    "    if mode == 'file':\n",
+    "        # Read dataset file\n",
+    "        with open(dataset_path) as f:\n",
+    "            data = f.read().splitlines()\n",
+    "        for d in data:\n",
+    "            imagepaths.append(d.split(' ')[0])\n",
+    "            labels.append(int(d.split(' ')[1]))\n",
+    "    elif mode == 'folder':\n",
+    "        # An ID will be affected to each sub-folders by alphabetical order\n",
+    "        label = 0\n",
+    "        # List the directory\n",
+    "        try:  # Python 2\n",
+    "            classes = sorted(os.walk(dataset_path).next()[1])\n",
+    "        except Exception:  # Python 3\n",
+    "            classes = sorted(os.walk(dataset_path).__next__()[1])\n",
+    "        # List each sub-directory (the classes)\n",
+    "        for c in classes:\n",
+    "            c_dir = os.path.join(dataset_path, c)\n",
+    "            try:  # Python 2\n",
+    "                walk = os.walk(c_dir).next()\n",
+    "            except Exception:  # Python 3\n",
+    "                walk = os.walk(c_dir).__next__()\n",
+    "            # Add each image to the training set\n",
+    "            for sample in walk[2]:\n",
+    "                # Only keeps jpeg images\n",
+    "                if sample.endswith('.jpg') or sample.endswith('.jpeg'):\n",
+    "                    imagepaths.append(os.path.join(c_dir, sample))\n",
+    "                    labels.append(label)\n",
+    "            label += 1\n",
+    "    else:\n",
+    "        raise Exception(\"Unknown mode.\")\n",
+    "\n",
+    "    # Convert to Tensor\n",
+    "    imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)\n",
+    "    labels = tf.convert_to_tensor(labels, dtype=tf.int32)\n",
+    "    # Build a TF Queue, shuffle data\n",
+    "    image, label = tf.train.slice_input_producer([imagepaths, labels],\n",
+    "                                                 shuffle=True)\n",
+    "\n",
+    "    # Read images from disk\n",
+    "    image = tf.read_file(image)\n",
+    "    image = tf.image.decode_jpeg(image, channels=CHANNELS)\n",
+    "\n",
+    "    # Resize images to a common size\n",
+    "    image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])\n",
+    "\n",
+    "    # Normalize\n",
+    "    image = image * 1.0/127.5 - 1.0\n",
+    "\n",
+    "    # Create batches\n",
+    "    X, Y = tf.train.batch([image, label], batch_size=batch_size,\n",
+    "                          capacity=batch_size * 8,\n",
+    "                          num_threads=4)\n",
+    "\n",
+    "    return X, Y"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# -----------------------------------------------\n",
+    "# THIS IS A CLASSIC CNN (see examples, section 3)\n",
+    "# -----------------------------------------------\n",
+    "# Note that a few elements have changed (usage of queues).\n",
+    "\n",
+    "# Parameters\n",
+    "learning_rate = 0.001\n",
+    "num_steps = 10000\n",
+    "batch_size = 128\n",
+    "display_step = 100\n",
+    "\n",
+    "# Network Parameters\n",
+    "dropout = 0.75 # Dropout, probability to keep units\n",
+    "\n",
+    "# Build the data input\n",
+    "X, Y = read_images(DATASET_PATH, MODE, batch_size)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create model\n",
+    "def conv_net(x, n_classes, dropout, reuse, is_training):\n",
+    "    # Define a scope for reusing the variables\n",
+    "    with tf.variable_scope('ConvNet', reuse=reuse):\n",
+    "\n",
+    "        # Convolution Layer with 32 filters and a kernel size of 5\n",
+    "        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n",
+    "\n",
+    "        # Convolution Layer with 32 filters and a kernel size of 5\n",
+    "        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n",
+    "\n",
+    "        # Flatten the data to a 1-D vector for the fully connected layer\n",
+    "        fc1 = tf.contrib.layers.flatten(conv2)\n",
+    "\n",
+    "        # Fully connected layer (in contrib folder for now)\n",
+    "        fc1 = tf.layers.dense(fc1, 1024)\n",
+    "        # Apply Dropout (if is_training is False, dropout is not applied)\n",
+    "        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n",
+    "\n",
+    "        # Output layer, class prediction\n",
+    "        out = tf.layers.dense(fc1, n_classes)\n",
+    "        # Because 'softmax_cross_entropy_with_logits' already apply softmax,\n",
+    "        # we only apply softmax to testing network\n",
+    "        out = tf.nn.softmax(out) if not is_training else out\n",
+    "\n",
+    "    return out"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Because Dropout have different behavior at training and prediction time, we\n",
+    "# need to create 2 distinct computation graphs that share the same weights.\n",
+    "\n",
+    "# Create a graph for training\n",
+    "logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True)\n",
+    "# Create another graph for testing that reuse the same weights\n",
+    "logits_test = conv_net(X, N_CLASSES, dropout, reuse=True, is_training=False)\n",
+    "\n",
+    "# Define loss and optimizer (with train logits, for dropout to take effect)\n",
+    "loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n",
+    "    logits=logits_train, labels=Y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluate model (with test logits, for dropout to be disabled)\n",
+    "correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()\n",
+    "\n",
+    "# Saver object\n",
+    "saver = tf.train.Saver()\n",
+    "\n",
+    "# Start training\n",
+    "with tf.Session() as sess:\n",
+    "\n",
+    "    # Run the initializer\n",
+    "    sess.run(init)\n",
+    "\n",
+    "    # Start the data queue\n",
+    "    tf.train.start_queue_runners()\n",
+    "\n",
+    "    # Training cycle\n",
+    "    for step in range(1, num_steps+1):\n",
+    "\n",
+    "        if step % display_step == 0:\n",
+    "            # Run optimization and calculate batch loss and accuracy\n",
+    "            _, loss, acc = sess.run([train_op, loss_op, accuracy])\n",
+    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "                  \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                  \"{:.3f}\".format(acc))\n",
+    "        else:\n",
+    "            # Only run the optimization op (backprop)\n",
+    "            sess.run(train_op)\n",
+    "\n",
+    "    print(\"Optimization Finished!\")\n",
+    "\n",
+    "    # Save your model\n",
+    "    saver.save(sess, 'my_tf_model')"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

Plik diff jest za duży
+ 418 - 0
tensorflow_v1/notebooks/5_DataManagement/image_transformation.ipynb


+ 577 - 0
tensorflow_v1/notebooks/5_DataManagement/load_data.ipynb

@@ -0,0 +1,577 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Load and parse data with TensorFlow\n",
+    "\n",
+    "A TensorFlow example to build input pipelines for loading data efficiently.\n",
+    "\n",
+    "\n",
+    "- Numpy Arrays\n",
+    "- Images\n",
+    "- CSV file\n",
+    "- Custom data from a Generator\n",
+    "\n",
+    "For more information about creating and loading TensorFlow's `TFRecords` data format, see: [tfrecords.ipynb](tfrecords.ipynb)\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from __future__ import absolute_import, division, print_function\n",
+    "\n",
+    "import numpy as np\n",
+    "import random\n",
+    "import requests\n",
+    "import string\n",
+    "import tarfile\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Load Numpy Arrays\n",
+    "\n",
+    "Build a data pipeline over numpy arrays."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create a toy dataset (even and odd numbers, with respective labels of 0 and 1).\n",
+    "evens = np.arange(0, 100, step=2, dtype=np.int32)\n",
+    "evens_label = np.zeros(50, dtype=np.int32)\n",
+    "odds = np.arange(1, 100, step=2, dtype=np.int32)\n",
+    "odds_label = np.ones(50, dtype=np.int32)\n",
+    "# Concatenate arrays\n",
+    "features = np.concatenate([evens, odds])\n",
+    "labels = np.concatenate([evens_label, odds_label])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "with tf.Graph().as_default():\n",
+    "    # Create TF session.\n",
+    "    sess = tf.Session()\n",
+    "    \n",
+    "    # Slice the numpy arrays (each row becoming a record).\n",
+    "    data = tf.data.Dataset.from_tensor_slices((features, labels))\n",
+    "    # Refill data indefinitely.  \n",
+    "    data = data.repeat()\n",
+    "    # Shuffle data.\n",
+    "    data = data.shuffle(buffer_size=100)\n",
+    "    # Batch data (aggregate records together).\n",
+    "    data = data.batch(batch_size=4)\n",
+    "    # Prefetch batch (pre-load batch for faster consumption).\n",
+    "    data = data.prefetch(buffer_size=1)\n",
+    "    \n",
+    "    # Create an iterator over the dataset.\n",
+    "    iterator = data.make_initializable_iterator()\n",
+    "    # Initialize the iterator.\n",
+    "    sess.run(iterator.initializer)\n",
+    "\n",
+    "    # Get next data batch.\n",
+    "    d = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[82 58 80 23] [0 0 0 1]\n",
+      "[16 91 74 96] [0 1 0 0]\n",
+      "[ 4 17 32 34] [0 1 0 0]\n",
+      "[16  8 77 21] [0 0 1 1]\n",
+      "[20 99 48 18] [0 1 0 0]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Display data.\n",
+    "for i in range(5):\n",
+    "    x, y = sess.run(d)\n",
+    "    print(x, y)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Load CSV files\n",
+    "\n",
+    "Build a data pipeline from features stored in a CSV file. For this example, Titanic dataset will be used as a toy dataset stored in CSV format."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Titanic Dataset\n",
+    "\n",
+    "\n",
+    "\n",
+    "survived|pclass|name|sex|age|sibsp|parch|ticket|fare\n",
+    "--------|------|----|---|---|-----|-----|------|----\n",
+    "1|1|\"Allen, Miss. Elisabeth Walton\"|female|29|0|0|24160|211.3375\n",
+    "1|1|\"Allison, Master. Hudson Trevor\"|male|0.9167|1|2|113781|151.5500\n",
+    "0|1|\"Allison, Miss. Helen Loraine\"|female|2|1|2|113781|151.5500\n",
+    "0|1|\"Allison, Mr. Hudson Joshua Creighton\"|male|30|1|2|113781|151.5500\n",
+    "...|...|...|...|...|...|...|...|..."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Download Titanic dataset (in csv format).\n",
+    "d = requests.get(\"https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/titanic_dataset.csv\")\n",
+    "with open(\"titanic_dataset.csv\", \"wb\") as f:\n",
+    "    f.write(d.content)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Load Titanic dataset.\n",
+    "# Original features: survived,pclass,name,sex,age,sibsp,parch,ticket,fare\n",
+    "# Select specific columns: survived,pclass,name,sex,age,fare\n",
+    "column_to_use = [0, 1, 2, 3, 4, 8]\n",
+    "record_defaults = [tf.int32, tf.int32, tf.string, tf.string, tf.float32, tf.float32]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "with tf.Graph().as_default():\n",
+    "    # Create TF session.\n",
+    "    sess = tf.Session()\n",
+    "    \n",
+    "    # Load the whole dataset file, and slice each line.\n",
+    "    data = tf.data.experimental.CsvDataset(\"titanic_dataset.csv\", record_defaults, header=True, select_cols=column_to_use)\n",
+    "    # Refill data indefinitely.  \n",
+    "    data = data.repeat()\n",
+    "    # Shuffle data.\n",
+    "    data = data.shuffle(buffer_size=1000)\n",
+    "    # Batch data (aggregate records together).\n",
+    "    data = data.batch(batch_size=2)\n",
+    "    # Prefetch batch (pre-load batch for faster consumption).\n",
+    "    data = data.prefetch(buffer_size=1)\n",
+    "    \n",
+    "    # Create an iterator over the dataset.\n",
+    "    iterator = data.make_initializable_iterator()\n",
+    "    # Initialize the iterator.\n",
+    "    sess.run(iterator.initializer)\n",
+    "\n",
+    "    # Get next data batch.\n",
+    "    d = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[1 0]\n",
+      "[3 1]\n",
+      "['Lam, Mr. Ali' 'Widener, Mr. Harry Elkins']\n",
+      "['male' 'male']\n",
+      "[ 0. 27.]\n",
+      "[ 56.4958 211.5   ]\n",
+      "\n",
+      "[0 1]\n",
+      "[1 1]\n",
+      "['Baumann, Mr. John D' 'Daly, Mr. Peter Denis ']\n",
+      "['male' 'male']\n",
+      "[ 0. 51.]\n",
+      "[25.925 26.55 ]\n",
+      "\n",
+      "[0 1]\n",
+      "[3 1]\n",
+      "['Assam, Mr. Ali' 'Newell, Miss. Madeleine']\n",
+      "['male' 'female']\n",
+      "[23. 31.]\n",
+      "[  7.05  113.275]\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Display data.\n",
+    "for i in range(3):\n",
+    "    survived, pclass, name, sex, age, fare = sess.run(d)\n",
+    "    print(survived)\n",
+    "    print(pclass)\n",
+    "    print(name)\n",
+    "    print(sex)\n",
+    "    print(age)\n",
+    "    print(fare)\n",
+    "    print(\"\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Load Images\n",
+    "\n",
+    "Build a data pipeline by loading images from disk. For this example, Oxford Flowers dataset will be used."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Download Oxford 17 flowers dataset.\n",
+    "d = requests.get(\"http://www.robots.ox.ac.uk/~vgg/data/flowers/17/17flowers.tgz\")\n",
+    "with open(\"17flowers.tgz\", \"wb\") as f:\n",
+    "    f.write(d.content)\n",
+    "# Extract archive.\n",
+    "with tarfile.open(\"17flowers.tgz\") as t:\n",
+    "    t.extractall()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create a file to list all images path and their corresponding label.\n",
+    "with open('jpg/dataset.csv', 'w') as f:\n",
+    "    c = 0\n",
+    "    for i in range(1360):\n",
+    "        f.write(\"jpg/image_%04i.jpg,%i\\n\" % (i+1, c))\n",
+    "        if (i+1) % 80 == 0:\n",
+    "            c += 1"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "with tf.Graph().as_default():\n",
+    "    \n",
+    "    # Load Images.\n",
+    "    with open(\"jpg/dataset.csv\") as f:\n",
+    "        dataset_file = f.read().splitlines()\n",
+    "    \n",
+    "    # Create TF session.\n",
+    "    sess = tf.Session()\n",
+    "\n",
+    "    # Load the whole dataset file, and slice each line.\n",
+    "    data = tf.data.Dataset.from_tensor_slices(dataset_file)\n",
+    "    # Refill data indefinitely.\n",
+    "    data = data.repeat()\n",
+    "    # Shuffle data.\n",
+    "    data = data.shuffle(buffer_size=1000)\n",
+    "\n",
+    "    # Load and pre-process images.\n",
+    "    def load_image(path):\n",
+    "        # Read image from path.\n",
+    "        image = tf.io.read_file(path)\n",
+    "        # Decode the jpeg image to array [0, 255].\n",
+    "        image = tf.image.decode_jpeg(image)\n",
+    "        # Resize images to a common size of 256x256.\n",
+    "        image = tf.image.resize(image, [256, 256])\n",
+    "        # Rescale values to [-1, 1].\n",
+    "        image = 1. - image / 127.5\n",
+    "        return image\n",
+    "    # Decode each line from the dataset file.\n",
+    "    def parse_records(line):\n",
+    "        # File is in csv format: \"image_path,label_id\".\n",
+    "        # TensorFlow requires a default value, but it will never be used.\n",
+    "        image_path, image_label = tf.io.decode_csv(line, [\"\", 0])\n",
+    "        # Apply the function to load images.\n",
+    "        image = load_image(image_path)\n",
+    "        return image, image_label\n",
+    "    # Use 'map' to apply the above functions in parallel.\n",
+    "    data = data.map(parse_records, num_parallel_calls=4)\n",
+    "\n",
+    "    # Batch data (aggregate images-array together).\n",
+    "    data = data.batch(batch_size=2)\n",
+    "    # Prefetch batch (pre-load batch for faster consumption).\n",
+    "    data = data.prefetch(buffer_size=1)\n",
+    "    \n",
+    "    # Create an iterator over the dataset.\n",
+    "    iterator = data.make_initializable_iterator()\n",
+    "    # Initialize the iterator.\n",
+    "    sess.run(iterator.initializer)\n",
+    "\n",
+    "    # Get next data batch.\n",
+    "    d = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[[[[ 0.1294117   0.05098033  0.46666664]\n",
+      "   [ 0.1368872   0.05098033  0.48909312]\n",
+      "   [ 0.0931372   0.0068627   0.46029407]\n",
+      "   ...\n",
+      "   [ 0.23480386  0.0522058   0.6102941 ]\n",
+      "   [ 0.12696075 -0.05416667  0.38063723]\n",
+      "   [-0.10024512 -0.28848052  0.10367644]]\n",
+      "\n",
+      "  [[ 0.04120708 -0.06118262  0.36256123]\n",
+      "   [ 0.08009624 -0.02229345  0.41640145]\n",
+      "   [ 0.06797445 -0.04132879  0.41923058]\n",
+      "   ...\n",
+      "   [ 0.2495715   0.06697345  0.6251221 ]\n",
+      "   [ 0.12058818 -0.06094813  0.37577546]\n",
+      "   [-0.05184889 -0.24009418  0.16777915]]\n",
+      "\n",
+      "  [[-0.09234071 -0.22738981  0.20484066]\n",
+      "   [-0.03100491 -0.17312062  0.2811274 ]\n",
+      "   [ 0.01051998 -0.13237214  0.3376838 ]\n",
+      "   ...\n",
+      "   [ 0.27787983  0.07494056  0.64203525]\n",
+      "   [ 0.11533964 -0.09005249  0.3869906 ]\n",
+      "   [-0.02704227 -0.23958337  0.19454747]]\n",
+      "\n",
+      "  ...\n",
+      "\n",
+      "  [[ 0.07913595 -0.13069856  0.29874384]\n",
+      "   [ 0.10140878 -0.09445572  0.35912937]\n",
+      "   [ 0.08869672 -0.08415675  0.41446364]\n",
+      "   ...\n",
+      "   [ 0.25821072  0.22463232  0.69197303]\n",
+      "   [ 0.31636214  0.25750512  0.79362744]\n",
+      "   [ 0.09552741  0.01709598  0.57395875]]\n",
+      "\n",
+      "  [[ 0.09019601 -0.12156868  0.3098039 ]\n",
+      "   [ 0.17446858 -0.02271283  0.43218917]\n",
+      "   [ 0.06583172 -0.10818791  0.39230233]\n",
+      "   ...\n",
+      "   [ 0.27021956  0.23664117  0.70269513]\n",
+      "   [ 0.19560927  0.1385014   0.6740407 ]\n",
+      "   [ 0.04364848 -0.03478289  0.5220798 ]]\n",
+      "\n",
+      "  [[ 0.02830875 -0.18345594  0.24791664]\n",
+      "   [ 0.12937105 -0.06781042  0.38709164]\n",
+      "   [ 0.01120263 -0.162817    0.33767325]\n",
+      "   ...\n",
+      "   [ 0.25989532  0.22631687  0.69237083]\n",
+      "   [ 0.1200884   0.06298059  0.5985198 ]\n",
+      "   [ 0.05961001 -0.01882136  0.53804135]]]\n",
+      "\n",
+      "\n",
+      " [[[ 0.3333333   0.25490195  0.05882347]\n",
+      "   [ 0.3333333   0.25490195  0.05882347]\n",
+      "   [ 0.3340686   0.24705875  0.03039211]\n",
+      "   ...\n",
+      "   [-0.5215688  -0.4599266  -0.14632356]\n",
+      "   [-0.5100491  -0.47083342 -0.03725493]\n",
+      "   [-0.43419123 -0.39497554  0.05992639]]\n",
+      "\n",
+      "  [[ 0.34117645  0.26274508  0.0666666 ]\n",
+      "   [ 0.35646445  0.2630821   0.0744791 ]\n",
+      "   [ 0.3632046   0.2548713   0.04384762]\n",
+      "   ...\n",
+      "   [-0.9210479  -0.84267783 -0.4540485 ]\n",
+      "   [-0.9017464  -0.8390626  -0.3507018 ]\n",
+      "   [-0.83339334 -0.7632048  -0.2534927 ]]\n",
+      "\n",
+      "  [[ 0.3646446   0.2706495   0.06678915]\n",
+      "   [ 0.37248772  0.27837008  0.07445425]\n",
+      "   [ 0.38033658  0.27053267  0.05950326]\n",
+      "   ...\n",
+      "   [-0.94302344 -0.84222686 -0.30278325]\n",
+      "   [-0.91017747 -0.8090074  -0.18615782]\n",
+      "   [-0.83437514 -0.7402575  -0.08192408]]\n",
+      "\n",
+      "  ...\n",
+      "\n",
+      "  [[ 0.64705884  0.654902    0.67058825]\n",
+      "   [ 0.6318321   0.63967526  0.65536153]\n",
+      "   [ 0.63128924  0.6391324   0.65481865]\n",
+      "   ...\n",
+      "   [ 0.6313726   0.57647055  0.51372546]\n",
+      "   [ 0.6078431   0.53725487  0.4823529 ]\n",
+      "   [ 0.6078431   0.53725487  0.4823529 ]]\n",
+      "\n",
+      "  [[ 0.654902    0.654902    0.6704657 ]\n",
+      "   [ 0.654902    0.654902    0.6704657 ]\n",
+      "   [ 0.64778835  0.64778835  0.6492474 ]\n",
+      "   ...\n",
+      "   [ 0.6392157   0.5843137   0.5215686 ]\n",
+      "   [ 0.6393325   0.56874424  0.5138422 ]\n",
+      "   [ 0.63106614  0.5604779   0.50557595]]\n",
+      "\n",
+      "  [[ 0.654902    0.64705884  0.6313726 ]\n",
+      "   [ 0.6548728   0.64702964  0.63134336]\n",
+      "   [ 0.64705884  0.63210785  0.6377451 ]\n",
+      "   ...\n",
+      "   [ 0.63244915  0.5775472   0.5148021 ]\n",
+      "   [ 0.6698529   0.5992647   0.5443627 ]\n",
+      "   [ 0.6545358   0.5839475   0.5290455 ]]]] [5 9]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Display data.\n",
+    "for i in range(1):\n",
+    "    batch_x, batch_y = sess.run(d)\n",
+    "    print(batch_x, batch_y)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Load data from a Generator\n",
+    "\n",
+    "Build a data pipeline from a custom generator. For this example, a toy generator yielding random string, vector and it is used."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create a dummy generator.\n",
+    "def generate_features():\n",
+    "    # Function to generate a random string.\n",
+    "    def random_string(length):\n",
+    "        return ''.join(random.choice(string.ascii_letters) for m in xrange(length))\n",
+    "    # Return a random string, a random vector, and a random int.\n",
+    "    yield random_string(4), np.random.uniform(size=4), random.randint(0, 10)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "with tf.Graph().as_default():\n",
+    "\n",
+    "    # Create TF session.\n",
+    "    sess = tf.Session()\n",
+    "\n",
+    "    # Create TF dataset from the generator.\n",
+    "    data = tf.data.Dataset.from_generator(generate_features, output_types=(tf.string, tf.float32, tf.int32))\n",
+    "    # Refill data indefinitely.\n",
+    "    data = data.repeat()\n",
+    "    # Shuffle data.\n",
+    "    data = data.shuffle(buffer_size=100)\n",
+    "    # Batch data (aggregate records together).\n",
+    "    data = data.batch(batch_size=4)\n",
+    "    # Prefetch batch (pre-load batch for faster consumption).\n",
+    "    data = data.prefetch(buffer_size=1)\n",
+    "\n",
+    "    # Create an iterator over the dataset.\n",
+    "    iterator = data.make_initializable_iterator()\n",
+    "    # Initialize the iterator.\n",
+    "    sess.run(iterator.initializer)\n",
+    "\n",
+    "    # Get next data batch.\n",
+    "    d = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "['AvCS' 'kAaI' 'QwGX' 'IWOI'] [[0.6096093  0.32192084 0.26622605 0.70250475]\n",
+      " [0.72534287 0.7637426  0.19977213 0.74121326]\n",
+      " [0.6930984  0.09409562 0.4063325  0.5002103 ]\n",
+      " [0.05160935 0.59411395 0.276416   0.98264974]] [1 3 5 6]\n",
+      "['EXjS' 'brvx' 'kwNz' 'eFOb'] [[0.34355283 0.26881003 0.70575935 0.7503411 ]\n",
+      " [0.9584373  0.27466875 0.27802315 0.9563204 ]\n",
+      " [0.19129485 0.07014314 0.0932724  0.20726128]\n",
+      " [0.28744072 0.81736153 0.37507302 0.8984588 ]] [1 9 7 0]\n",
+      "['vpSa' 'UuqW' 'xaTO' 'milw'] [[0.2942028  0.8228986  0.5793326  0.16651365]\n",
+      " [0.28259405 0.599063   0.2922477  0.95071274]\n",
+      " [0.23645316 0.00258607 0.06772221 0.7291911 ]\n",
+      " [0.12861755 0.31435087 0.576638   0.7333119 ]] [3 5 8 4]\n",
+      "['UBBb' 'MUXs' 'nLJB' 'OBGl'] [[0.2677402  0.17931737 0.02607645 0.85898155]\n",
+      " [0.58647937 0.727203   0.13329858 0.8898983 ]\n",
+      " [0.13872191 0.47390288 0.7061665  0.08478573]\n",
+      " [0.3786016  0.22002582 0.91989636 0.45837343]] [ 5  8  0 10]\n",
+      "['kiiz' 'bQYG' 'WpUU' 'AuIY'] [[0.74781317 0.13744462 0.9236441  0.63558507]\n",
+      " [0.23649399 0.35303807 0.0951511  0.03541444]\n",
+      " [0.33599988 0.6906629  0.97166294 0.55850506]\n",
+      " [0.90997607 0.5545979  0.43635726 0.9127501 ]] [8 1 4 4]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Display data.\n",
+    "for i in range(5):\n",
+    "    batch_str, batch_vector, batch_int = sess.run(d)\n",
+    "    print(batch_str, batch_vector, batch_int)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "tf1",
+   "language": "python",
+   "name": "tf1"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.15+"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 222 - 0
tensorflow_v1/notebooks/5_DataManagement/tensorflow_dataset_api.ipynb

@@ -0,0 +1,222 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# TensorFlow Dataset API\n",
+    "\n",
+    "In this example, we will show how to load numpy array data into the new \n",
+    "TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline\n",
+    "with queues, that make data processing and training faster (especially on GPU).\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "\n",
+    "# Import MNIST data (Numpy format)\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Parameters\n",
+    "learning_rate = 0.01\n",
+    "num_steps = 1000\n",
+    "batch_size = 128\n",
+    "display_step = 100\n",
+    "\n",
+    "# Network Parameters\n",
+    "n_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "n_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "dropout = 0.75 # Dropout, probability to keep units\n",
+    "\n",
+    "sess = tf.Session()\n",
+    "\n",
+    "# Create a dataset tensor from the images and the labels\n",
+    "dataset = tf.data.Dataset.from_tensor_slices(\n",
+    "    (mnist.train.images, mnist.train.labels))\n",
+    "# Automatically refill the data queue when empty\n",
+    "dataset = dataset.repeat()\n",
+    "# Create batches of data\n",
+    "dataset = dataset.batch(batch_size)\n",
+    "# Prefetch data for faster consumption\n",
+    "dataset = dataset.prefetch(batch_size)\n",
+    "\n",
+    "# Create an iterator over the dataset\n",
+    "iterator = dataset.make_initializable_iterator()\n",
+    "# Initialize the iterator\n",
+    "sess.run(iterator.initializer)\n",
+    "\n",
+    "# Neural Net Input (images, labels)\n",
+    "X, Y = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# -----------------------------------------------\n",
+    "# THIS IS A CLASSIC CNN (see examples, section 3)\n",
+    "# -----------------------------------------------\n",
+    "# Note that a few elements have changed (usage of sess run).\n",
+    "\n",
+    "# Create model\n",
+    "def conv_net(x, n_classes, dropout, reuse, is_training):\n",
+    "    # Define a scope for reusing the variables\n",
+    "    with tf.variable_scope('ConvNet', reuse=reuse):\n",
+    "        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n",
+    "        # Reshape to match picture format [Height x Width x Channel]\n",
+    "        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n",
+    "        x = tf.reshape(x, shape=[-1, 28, 28, 1])\n",
+    "\n",
+    "        # Convolution Layer with 32 filters and a kernel size of 5\n",
+    "        conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n",
+    "\n",
+    "        # Convolution Layer with 32 filters and a kernel size of 5\n",
+    "        conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n",
+    "\n",
+    "        # Flatten the data to a 1-D vector for the fully connected layer\n",
+    "        fc1 = tf.contrib.layers.flatten(conv2)\n",
+    "\n",
+    "        # Fully connected layer (in contrib folder for now)\n",
+    "        fc1 = tf.layers.dense(fc1, 1024)\n",
+    "        # Apply Dropout (if is_training is False, dropout is not applied)\n",
+    "        fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n",
+    "\n",
+    "        # Output layer, class prediction\n",
+    "        out = tf.layers.dense(fc1, n_classes)\n",
+    "        # Because 'softmax_cross_entropy_with_logits' already apply softmax,\n",
+    "        # we only apply softmax to testing network\n",
+    "        out = tf.nn.softmax(out) if not is_training else out\n",
+    "\n",
+    "    return out\n",
+    "\n",
+    "\n",
+    "# Because Dropout have different behavior at training and prediction time, we\n",
+    "# need to create 2 distinct computation graphs that share the same weights.\n",
+    "\n",
+    "# Create a graph for training\n",
+    "logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True)\n",
+    "# Create another graph for testing that reuse the same weights, but has\n",
+    "# different behavior for 'dropout' (not applied).\n",
+    "logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False)\n",
+    "\n",
+    "# Define loss and optimizer (with train logits, for dropout to take effect)\n",
+    "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "    logits=logits_train, labels=Y))\n",
+    "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "train_op = optimizer.minimize(loss_op)\n",
+    "\n",
+    "# Evaluate model (with test logits, for dropout to be disabled)\n",
+    "correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1))\n",
+    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1, Minibatch Loss= 7.9429, Training Accuracy= 0.070\n",
+      "Step 100, Minibatch Loss= 0.3491, Training Accuracy= 0.922\n",
+      "Step 200, Minibatch Loss= 0.2343, Training Accuracy= 0.922\n",
+      "Step 300, Minibatch Loss= 0.1838, Training Accuracy= 0.969\n",
+      "Step 400, Minibatch Loss= 0.1715, Training Accuracy= 0.953\n",
+      "Step 500, Minibatch Loss= 0.2730, Training Accuracy= 0.938\n",
+      "Step 600, Minibatch Loss= 0.3427, Training Accuracy= 0.953\n",
+      "Step 700, Minibatch Loss= 0.2261, Training Accuracy= 0.961\n",
+      "Step 800, Minibatch Loss= 0.1487, Training Accuracy= 0.953\n",
+      "Step 900, Minibatch Loss= 0.1438, Training Accuracy= 0.945\n",
+      "Step 1000, Minibatch Loss= 0.1786, Training Accuracy= 0.961\n",
+      "Optimization Finished!\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Initialize the variables (i.e. assign their default value)\n",
+    "init = tf.global_variables_initializer()\n",
+    "\n",
+    "# Run the initializer\n",
+    "sess.run(init)\n",
+    "\n",
+    "# Training cycle\n",
+    "for step in range(1, num_steps + 1):\n",
+    "    \n",
+    "    # Run optimization\n",
+    "    sess.run(train_op)\n",
+    "        \n",
+    "    if step % display_step == 0 or step == 1:\n",
+    "        # Calculate batch loss and accuracy\n",
+    "        # (note that this consume a new batch of data)\n",
+    "        loss, acc = sess.run([loss_op, accuracy])\n",
+    "        print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
+    "              \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "              \"{:.3f}\".format(acc))\n",
+    "\n",
+    "print(\"Optimization Finished!\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 261 - 0
tensorflow_v1/notebooks/5_DataManagement/tfrecords.ipynb

@@ -0,0 +1,261 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Create and Load TFRecords\n",
+    "\n",
+    "A simple TensorFlow example to parse a dataset into TFRecord format, and then read that dataset.\n",
+    "\n",
+    "In this example, the Titanic Dataset (in CSV format) will be used as a toy dataset, for parsing all the dataset features into TFRecord format, and then building an input pipeline that can be used for training models.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Titanic Dataset\n",
+    "\n",
+    "The titanic dataset is a popular dataset for ML that provides a list of all passengers onboard the Titanic, along with various features such as their age, sex, class (1st, 2nd, 3rd)... And if the passenger survived the disaster or not.\n",
+    "\n",
+    "It can be used to see that even though some luck was involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class...\n",
+    "\n",
+    "#### Overview\n",
+    "survived|pclass|name|sex|age|sibsp|parch|ticket|fare\n",
+    "--------|------|----|---|---|-----|-----|------|----\n",
+    "1|1|\"Allen, Miss. Elisabeth Walton\"|female|29|0|0|24160|211.3375\n",
+    "1|1|\"Allison, Master. Hudson Trevor\"|male|0.9167|1|2|113781|151.5500\n",
+    "0|1|\"Allison, Miss. Helen Loraine\"|female|2|1|2|113781|151.5500\n",
+    "0|1|\"Allison, Mr. Hudson Joshua Creighton\"|male|30|1|2|113781|151.5500\n",
+    "...|...|...|...|...|...|...|...|...\n",
+    "\n",
+    "\n",
+    "#### Variable Descriptions\n",
+    "```\n",
+    "survived        Survived\n",
+    "                (0 = No; 1 = Yes)\n",
+    "pclass          Passenger Class\n",
+    "                (1 = 1st; 2 = 2nd; 3 = 3rd)\n",
+    "name            Name\n",
+    "sex             Sex\n",
+    "age             Age\n",
+    "sibsp           Number of Siblings/Spouses Aboard\n",
+    "parch           Number of Parents/Children Aboard\n",
+    "ticket          Ticket Number\n",
+    "fare            Passenger Fare\n",
+    "```"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from __future__ import absolute_import, division, print_function\n",
+    "\n",
+    "import csv\n",
+    "import requests\n",
+    "import tensorflow as tf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Download Titanic dataset (in csv format).\n",
+    "d = requests.get(\"https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/titanic_dataset.csv\")\n",
+    "with open(\"titanic_dataset.csv\", \"wb\") as f:\n",
+    "    f.write(d.content)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Create TFRecords"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Generate Integer Features.\n",
+    "def build_int64_feature(data):\n",
+    "    return tf.train.Feature(int64_list=tf.train.Int64List(value=[data]))\n",
+    "\n",
+    "# Generate Float Features.\n",
+    "def build_float_feature(data):\n",
+    "    return tf.train.Feature(float_list=tf.train.FloatList(value=[data]))\n",
+    "\n",
+    "# Generate String Features.\n",
+    "def build_string_feature(data):\n",
+    "    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[data]))\n",
+    "\n",
+    "# Generate a TF `Example`, parsing all features of the dataset.\n",
+    "def convert_to_tfexample(survived, pclass, name, sex, age, sibsp, parch, ticket, fare):\n",
+    "    return tf.train.Example(\n",
+    "        features=tf.train.Features(\n",
+    "            feature={\n",
+    "                'survived': build_int64_feature(survived),\n",
+    "                'pclass': build_int64_feature(pclass),\n",
+    "                'name': build_string_feature(name),\n",
+    "                'sex': build_string_feature(sex),\n",
+    "                'age': build_float_feature(age),\n",
+    "                'sibsp': build_int64_feature(sibsp),\n",
+    "                'parch': build_int64_feature(parch),\n",
+    "                'ticket': build_string_feature(ticket),\n",
+    "                'fare': build_float_feature(fare),\n",
+    "            })\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Open dataset file.\n",
+    "with open(\"titanic_dataset.csv\") as f:\n",
+    "    # Output TFRecord file.\n",
+    "    with tf.io.TFRecordWriter(\"titanic_dataset.tfrecord\") as w:\n",
+    "        # Generate a TF Example for all row in our dataset.\n",
+    "        # CSV reader will read and parse all rows.\n",
+    "        reader = csv.reader(f, skipinitialspace=True)\n",
+    "        for i, record in enumerate(reader):\n",
+    "            # Skip header.\n",
+    "            if i == 0:\n",
+    "                continue\n",
+    "            survived, pclass, name, sex, age, sibsp, parch, ticket, fare = record\n",
+    "            # Parse each csv row to TF Example using the above functions.\n",
+    "            example = convert_to_tfexample(int(survived), int(pclass), name, sex, float(age), int(sibsp), int(parch), ticket, float(fare))\n",
+    "            # Serialize each TF Example to string, and write to TFRecord file.\n",
+    "            w.write(example.SerializeToString())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Load TFRecords"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Build features template, with types.\n",
+    "features = {\n",
+    "    'survived': tf.io.FixedLenFeature([], tf.int64),\n",
+    "    'pclass': tf.io.FixedLenFeature([], tf.int64),\n",
+    "    'name': tf.io.FixedLenFeature([], tf.string),\n",
+    "    'sex': tf.io.FixedLenFeature([], tf.string),\n",
+    "    'age': tf.io.FixedLenFeature([], tf.float32),\n",
+    "    'sibsp': tf.io.FixedLenFeature([], tf.int64),\n",
+    "    'parch': tf.io.FixedLenFeature([], tf.int64),\n",
+    "    'ticket': tf.io.FixedLenFeature([], tf.string),\n",
+    "    'fare': tf.io.FixedLenFeature([], tf.float32),\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create TensorFlow session.\n",
+    "sess = tf.Session()\n",
+    "\n",
+    "# Load TFRecord data.\n",
+    "filenames = [\"titanic_dataset.tfrecord\"]\n",
+    "data = tf.data.TFRecordDataset(filenames)\n",
+    "\n",
+    "# Parse features, using the above template.\n",
+    "def parse_record(record):\n",
+    "    return tf.io.parse_single_example(record, features=features)\n",
+    "# Apply the parsing to each record from the dataset.\n",
+    "data = data.map(parse_record)\n",
+    "\n",
+    "# Refill data indefinitely.\n",
+    "data = data.repeat()\n",
+    "# Shuffle data.\n",
+    "data = data.shuffle(buffer_size=1000)\n",
+    "# Batch data (aggregate records together).\n",
+    "data = data.batch(batch_size=4)\n",
+    "# Prefetch batch (pre-load batch for faster consumption).\n",
+    "data = data.prefetch(buffer_size=1)\n",
+    "\n",
+    "# Create an iterator over the dataset.\n",
+    "iterator = data.make_initializable_iterator()\n",
+    "# Initialize the iterator.\n",
+    "sess.run(iterator.initializer)\n",
+    "\n",
+    "# Get next data batch.\n",
+    "x = iterator.get_next()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{'fare': array([ 35.5   ,  73.5   , 133.65  ,  19.2583], dtype=float32), 'name': array(['Sloper, Mr. William Thompson', 'Davies, Mr. Charles Henry',\n",
+      "       'Frauenthal, Dr. Henry William', 'Baclini, Miss. Marie Catherine'],\n",
+      "      dtype=object), 'age': array([28., 18., 50.,  5.], dtype=float32), 'parch': array([0, 0, 0, 1]), 'pclass': array([1, 2, 1, 3]), 'sex': array(['male', 'male', 'male', 'female'], dtype=object), 'survived': array([1, 0, 1, 1]), 'sibsp': array([0, 0, 2, 2]), 'ticket': array(['113788', 'S.O.C. 14879', 'PC 17611', '2666'], dtype=object)}\n",
+      "\n",
+      "{'fare': array([ 18.75 , 106.425,  78.85 ,  90.   ], dtype=float32), 'name': array(['Richards, Mrs. Sidney (Emily Hocking)', 'LeRoy, Miss. Bertha',\n",
+      "       'Cavendish, Mrs. Tyrell William (Julia Florence Siegel)',\n",
+      "       'Hoyt, Mrs. Frederick Maxfield (Jane Anne Forby)'], dtype=object), 'age': array([24., 30., 76., 35.], dtype=float32), 'parch': array([3, 0, 0, 0]), 'pclass': array([2, 1, 1, 1]), 'sex': array(['female', 'female', 'female', 'female'], dtype=object), 'survived': array([1, 1, 1, 1]), 'sibsp': array([2, 0, 1, 1]), 'ticket': array(['29106', 'PC 17761', '19877', '19943'], dtype=object)}\n",
+      "\n",
+      "{'fare': array([19.9667, 15.5   , 15.0458, 66.6   ], dtype=float32), 'name': array(['Hagland, Mr. Konrad Mathias Reiersen', 'Lennon, Miss. Mary',\n",
+      "       'Richard, Mr. Emile', 'Pears, Mr. Thomas Clinton'], dtype=object), 'age': array([ 0.,  0., 23., 29.], dtype=float32), 'parch': array([0, 0, 0, 0]), 'pclass': array([3, 3, 2, 1]), 'sex': array(['male', 'female', 'male', 'male'], dtype=object), 'survived': array([0, 0, 0, 0]), 'sibsp': array([1, 1, 0, 1]), 'ticket': array(['65304', '370371', 'SC/PARIS 2133', '113776'], dtype=object)}\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Dequeue data and display.\n",
+    "for i in range(3):\n",
+    "    print(sess.run(x))\n",
+    "    print(\"\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "tf1",
+   "language": "python",
+   "name": "tf1"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.15+"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 179 - 0
tensorflow_v1/notebooks/6_MultiGPU/multigpu_basics.ipynb

@@ -0,0 +1,179 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Multi-GPU Basics\n",
+    "\n",
+    "Basic Multi-GPU computation example using TensorFlow library.\n",
+    "\n",
+    "This tutorial requires your machine to have 2 GPUs\n",
+    "\"/cpu:0\": The CPU of your machine.\n",
+    "\"/gpu:0\": The first GPU of your machine\n",
+    "\"/gpu:1\": The second GPU of your machine\n",
+    "For this example, we are using 2 GTX-980\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "import datetime"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "#Processing Units logs\n",
+    "log_device_placement = True\n",
+    "\n",
+    "#num of multiplications to perform\n",
+    "n = 10"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Example: compute A^n + B^n on 2 GPUs\n",
+    "\n",
+    "# Create random large matrix\n",
+    "A = np.random.rand(1e4, 1e4).astype('float32')\n",
+    "B = np.random.rand(1e4, 1e4).astype('float32')\n",
+    "\n",
+    "# Creates a graph to store results\n",
+    "c1 = []\n",
+    "c2 = []\n",
+    "\n",
+    "# Define matrix power\n",
+    "def matpow(M, n):\n",
+    "    if n < 1: #Abstract cases where n < 1\n",
+    "        return M\n",
+    "    else:\n",
+    "        return tf.matmul(M, matpow(M, n-1))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Single GPU computing\n",
+    "\n",
+    "with tf.device('/gpu:0'):\n",
+    "    a = tf.constant(A)\n",
+    "    b = tf.constant(B)\n",
+    "    #compute A^n and B^n and store results in c1\n",
+    "    c1.append(matpow(a, n))\n",
+    "    c1.append(matpow(b, n))\n",
+    "\n",
+    "with tf.device('/cpu:0'):\n",
+    "  sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n\n",
+    "\n",
+    "t1_1 = datetime.datetime.now()\n",
+    "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n",
+    "    # Runs the op.\n",
+    "    sess.run(sum)\n",
+    "t2_1 = datetime.datetime.now()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Multi GPU computing\n",
+    "# GPU:0 computes A^n\n",
+    "with tf.device('/gpu:0'):\n",
+    "    #compute A^n and store result in c2\n",
+    "    a = tf.constant(A)\n",
+    "    c2.append(matpow(a, n))\n",
+    "\n",
+    "#GPU:1 computes B^n\n",
+    "with tf.device('/gpu:1'):\n",
+    "    #compute B^n and store result in c2\n",
+    "    b = tf.constant(B)\n",
+    "    c2.append(matpow(b, n))\n",
+    "\n",
+    "with tf.device('/cpu:0'):\n",
+    "  sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n\n",
+    "\n",
+    "t1_2 = datetime.datetime.now()\n",
+    "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n",
+    "    # Runs the op.\n",
+    "    sess.run(sum)\n",
+    "t2_2 = datetime.datetime.now()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Single GPU computation time: 0:00:11.833497\n",
+      "Multi GPU computation time: 0:00:07.085913\n"
+     ]
+    }
+   ],
+   "source": [
+    "print \"Single GPU computation time: \" + str(t2_1-t1_1)\n",
+    "print \"Multi GPU computation time: \" + str(t2_2-t1_2)"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.12"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 328 - 0
tensorflow_v1/notebooks/6_MultiGPU/multigpu_cnn.ipynb

@@ -0,0 +1,328 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Multi-GPU Training Example\n",
+    "\n",
+    "Train a convolutional neural network on multiple GPU with TensorFlow.\n",
+    "\n",
+    "This example is using TensorFlow layers, see 'convolutional_network_raw' example\n",
+    "for a raw TensorFlow implementation with variables.\n",
+    "\n",
+    "- Author: Aymeric Damien\n",
+    "- Project: https://github.com/aymericdamien/TensorFlow-Examples/"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Training with multiple GPU cards\n",
+    "\n",
+    "In this example, we are using data parallelism to split the training accross multiple GPUs. Each GPU has a full replica of the neural network model, and the weights (i.e. variables) are updated synchronously by waiting that each GPU process its batch of data.\n",
+    "\n",
+    "First, each GPU process a distinct batch of data and compute the corresponding gradients, then, all gradients are accumulated in the CPU and averaged. The model weights are finally updated with the gradients averaged, and the new model weights are sent back to each GPU, to repeat the training process.\n",
+    "\n",
+    "<img src=\"https://www.tensorflow.org/images/Parallelism.png\" alt=\"Parallelism\" style=\"width: 400px;\"/>\n",
+    "\n",
+    "## MNIST Dataset Overview\n",
+    "\n",
+    "This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n",
+    "\n",
+    "![MNIST Dataset](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n",
+    "\n",
+    "More info: http://yann.lecun.com/exdb/mnist/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
+      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
+     ]
+    }
+   ],
+   "source": [
+    "from __future__ import print_function\n",
+    "\n",
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "import time\n",
+    "\n",
+    "# Import MNIST data\n",
+    "from tensorflow.examples.tutorials.mnist import input_data\n",
+    "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n",
+    "\n",
+    "# Parameters\n",
+    "num_gpus = 2\n",
+    "num_steps = 200\n",
+    "learning_rate = 0.001\n",
+    "batch_size = 1024\n",
+    "display_step = 10\n",
+    "\n",
+    "# Network Parameters\n",
+    "num_input = 784 # MNIST data input (img shape: 28*28)\n",
+    "num_classes = 10 # MNIST total classes (0-9 digits)\n",
+    "dropout = 0.75 # Dropout, probability to keep units"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Build a convolutional neural network\n",
+    "def conv_net(x, n_classes, dropout, reuse, is_training):\n",
+    "    # Define a scope for reusing the variables\n",
+    "    with tf.variable_scope('ConvNet', reuse=reuse):\n",
+    "        # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n",
+    "        # Reshape to match picture format [Height x Width x Channel]\n",
+    "        # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n",
+    "        x = tf.reshape(x, shape=[-1, 28, 28, 1])\n",
+    "\n",
+    "        # Convolution Layer with 64 filters and a kernel size of 5\n",
+    "        x = tf.layers.conv2d(x, 64, 5, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        x = tf.layers.max_pooling2d(x, 2, 2)\n",
+    "\n",
+    "        # Convolution Layer with 256 filters and a kernel size of 5\n",
+    "        x = tf.layers.conv2d(x, 256, 3, activation=tf.nn.relu)\n",
+    "        # Convolution Layer with 512 filters and a kernel size of 5\n",
+    "        x = tf.layers.conv2d(x, 512, 3, activation=tf.nn.relu)\n",
+    "        # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n",
+    "        x = tf.layers.max_pooling2d(x, 2, 2)\n",
+    "\n",
+    "        # Flatten the data to a 1-D vector for the fully connected layer\n",
+    "        x = tf.contrib.layers.flatten(x)\n",
+    "\n",
+    "        # Fully connected layer (in contrib folder for now)\n",
+    "        x = tf.layers.dense(x, 2048)\n",
+    "        # Apply Dropout (if is_training is False, dropout is not applied)\n",
+    "        x = tf.layers.dropout(x, rate=dropout, training=is_training)\n",
+    "\n",
+    "        # Fully connected layer (in contrib folder for now)\n",
+    "        x = tf.layers.dense(x, 1024)\n",
+    "        # Apply Dropout (if is_training is False, dropout is not applied)\n",
+    "        x = tf.layers.dropout(x, rate=dropout, training=is_training)\n",
+    "\n",
+    "        # Output layer, class prediction\n",
+    "        out = tf.layers.dense(x, n_classes)\n",
+    "        # Because 'softmax_cross_entropy_with_logits' loss already apply\n",
+    "        # softmax, we only apply softmax to testing network\n",
+    "        out = tf.nn.softmax(out) if not is_training else out\n",
+    "\n",
+    "    return out"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Build the function to average the gradients\n",
+    "def average_gradients(tower_grads):\n",
+    "    average_grads = []\n",
+    "    for grad_and_vars in zip(*tower_grads):\n",
+    "        # Note that each grad_and_vars looks like the following:\n",
+    "        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n",
+    "        grads = []\n",
+    "        for g, _ in grad_and_vars:\n",
+    "            # Add 0 dimension to the gradients to represent the tower.\n",
+    "            expanded_g = tf.expand_dims(g, 0)\n",
+    "\n",
+    "            # Append on a 'tower' dimension which we will average over below.\n",
+    "            grads.append(expanded_g)\n",
+    "\n",
+    "        # Average over the 'tower' dimension.\n",
+    "        grad = tf.concat(grads, 0)\n",
+    "        grad = tf.reduce_mean(grad, 0)\n",
+    "\n",
+    "        # Keep in mind that the Variables are redundant because they are shared\n",
+    "        # across towers. So .. we will just return the first tower's pointer to\n",
+    "        # the Variable.\n",
+    "        v = grad_and_vars[0][1]\n",
+    "        grad_and_var = (grad, v)\n",
+    "        average_grads.append(grad_and_var)\n",
+    "    return average_grads"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# By default, all variables will be placed on '/gpu:0'\n",
+    "# So we need a custom device function, to assign all variables to '/cpu:0'\n",
+    "# Note: If GPUs are peered, '/gpu:0' can be a faster option\n",
+    "PS_OPS = ['Variable', 'VariableV2', 'AutoReloadVariable']\n",
+    "\n",
+    "def assign_to_device(device, ps_device='/cpu:0'):\n",
+    "    def _assign(op):\n",
+    "        node_def = op if isinstance(op, tf.NodeDef) else op.node_def\n",
+    "        if node_def.op in PS_OPS:\n",
+    "            return \"/\" + ps_device\n",
+    "        else:\n",
+    "            return device\n",
+    "\n",
+    "    return _assign"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Step 1: Minibatch Loss= 2.4077, Training Accuracy= 0.123, 682 Examples/sec\n",
+      "Step 10: Minibatch Loss= 1.0067, Training Accuracy= 0.765, 6528 Examples/sec\n",
+      "Step 20: Minibatch Loss= 0.2442, Training Accuracy= 0.945, 6803 Examples/sec\n",
+      "Step 30: Minibatch Loss= 0.2013, Training Accuracy= 0.951, 6741 Examples/sec\n",
+      "Step 40: Minibatch Loss= 0.1445, Training Accuracy= 0.962, 6700 Examples/sec\n",
+      "Step 50: Minibatch Loss= 0.0940, Training Accuracy= 0.971, 6746 Examples/sec\n",
+      "Step 60: Minibatch Loss= 0.0792, Training Accuracy= 0.977, 6627 Examples/sec\n",
+      "Step 70: Minibatch Loss= 0.0593, Training Accuracy= 0.979, 6749 Examples/sec\n",
+      "Step 80: Minibatch Loss= 0.0799, Training Accuracy= 0.984, 6368 Examples/sec\n",
+      "Step 90: Minibatch Loss= 0.0614, Training Accuracy= 0.988, 6762 Examples/sec\n",
+      "Step 100: Minibatch Loss= 0.0716, Training Accuracy= 0.983, 6338 Examples/sec\n",
+      "Step 110: Minibatch Loss= 0.0531, Training Accuracy= 0.986, 6504 Examples/sec\n",
+      "Step 120: Minibatch Loss= 0.0425, Training Accuracy= 0.990, 6721 Examples/sec\n",
+      "Step 130: Minibatch Loss= 0.0473, Training Accuracy= 0.986, 6735 Examples/sec\n",
+      "Step 140: Minibatch Loss= 0.0345, Training Accuracy= 0.991, 6636 Examples/sec\n",
+      "Step 150: Minibatch Loss= 0.0419, Training Accuracy= 0.993, 6777 Examples/sec\n",
+      "Step 160: Minibatch Loss= 0.0602, Training Accuracy= 0.984, 6392 Examples/sec\n",
+      "Step 170: Minibatch Loss= 0.0425, Training Accuracy= 0.990, 6855 Examples/sec\n",
+      "Step 180: Minibatch Loss= 0.0107, Training Accuracy= 0.998, 6804 Examples/sec\n",
+      "Step 190: Minibatch Loss= 0.0204, Training Accuracy= 0.995, 6645 Examples/sec\n",
+      "Step 200: Minibatch Loss= 0.0296, Training Accuracy= 0.993, 6747 Examples/sec\n",
+      "Optimization Finished!\n",
+      "Testing Accuracy: 0.990671\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Place all ops on CPU by default\n",
+    "with tf.device('/cpu:0'):\n",
+    "    tower_grads = []\n",
+    "    reuse_vars = False\n",
+    "\n",
+    "    # tf Graph input\n",
+    "    X = tf.placeholder(tf.float32, [None, num_input])\n",
+    "    Y = tf.placeholder(tf.float32, [None, num_classes])\n",
+    "\n",
+    "    # Loop over all GPUs and construct their own computation graph\n",
+    "    for i in range(num_gpus):\n",
+    "        with tf.device(assign_to_device('/gpu:{}'.format(i), ps_device='/cpu:0')):\n",
+    "\n",
+    "            # Split data between GPUs\n",
+    "            _x = X[i * batch_size: (i+1) * batch_size]\n",
+    "            _y = Y[i * batch_size: (i+1) * batch_size]\n",
+    "\n",
+    "            # Because Dropout have different behavior at training and prediction time, we\n",
+    "            # need to create 2 distinct computation graphs that share the same weights.\n",
+    "\n",
+    "            # Create a graph for training\n",
+    "            logits_train = conv_net(_x, num_classes, dropout,\n",
+    "                                    reuse=reuse_vars, is_training=True)\n",
+    "            # Create another graph for testing that reuse the same weights\n",
+    "            logits_test = conv_net(_x, num_classes, dropout,\n",
+    "                                   reuse=True, is_training=False)\n",
+    "\n",
+    "            # Define loss and optimizer (with train logits, for dropout to take effect)\n",
+    "            loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
+    "                logits=logits_train, labels=_y))\n",
+    "            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n",
+    "            grads = optimizer.compute_gradients(loss_op)\n",
+    "\n",
+    "            # Only first GPU compute accuracy\n",
+    "            if i == 0:\n",
+    "                # Evaluate model (with test logits, for dropout to be disabled)\n",
+    "                correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(_y, 1))\n",
+    "                accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
+    "\n",
+    "            reuse_vars = True\n",
+    "            tower_grads.append(grads)\n",
+    "\n",
+    "    tower_grads = average_gradients(tower_grads)\n",
+    "    train_op = optimizer.apply_gradients(tower_grads)\n",
+    "\n",
+    "    # Initializing the variables\n",
+    "    init = tf.global_variables_initializer()\n",
+    "\n",
+    "    # Launch the graph\n",
+    "    with tf.Session() as sess:\n",
+    "        sess.run(init)\n",
+    "        step = 1\n",
+    "        # Keep training until reach max iterations\n",
+    "        for step in range(1, num_steps + 1):\n",
+    "            # Get a batch for each GPU\n",
+    "            batch_x, batch_y = mnist.train.next_batch(batch_size * num_gpus)\n",
+    "            # Run optimization op (backprop)\n",
+    "            ts = time.time()\n",
+    "            sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n",
+    "            te = time.time() - ts\n",
+    "            if step % display_step == 0 or step == 1:\n",
+    "                # Calculate batch loss and accuracy\n",
+    "                loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n",
+    "                                                                     Y: batch_y})\n",
+    "                print(\"Step \" + str(step) + \": Minibatch Loss= \" + \\\n",
+    "                      \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n",
+    "                      \"{:.3f}\".format(acc) + \", %i Examples/sec\" % int(len(batch_x)/te))\n",
+    "            step += 1\n",
+    "        print(\"Optimization Finished!\")\n",
+    "\n",
+    "        # Calculate accuracy for 1000 mnist test images\n",
+    "        print(\"Testing Accuracy:\", \\\n",
+    "            np.mean([sess.run(accuracy, feed_dict={X: mnist.test.images[i:i+batch_size],\n",
+    "            Y: mnist.test.labels[i:i+batch_size]}) for i in range(0, len(mnist.test.images), batch_size)]))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 2",
+   "language": "python",
+   "name": "python2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython2",
+   "version": "2.7.13"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}