{ "cells": [ { "cell_type": "markdown", "metadata": { "collapsed": false }, "source": [ "# TensorFlow Dataset API\n", "\n", "In this example, we will show how to load numpy array data into the new \n", "TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline\n", "with queues, that make data processing and training faster (especially on GPU).\n", "\n", "- Author: Aymeric Damien\n", "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting /tmp/data/train-images-idx3-ubyte.gz\n", "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n", "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n", "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "import tensorflow as tf\n", "\n", "# Import MNIST data (Numpy format)\n", "from tensorflow.examples.tutorials.mnist import input_data\n", "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Parameters\n", "learning_rate = 0.01\n", "num_steps = 1000\n", "batch_size = 128\n", "display_step = 100\n", "\n", "# Network Parameters\n", "n_input = 784 # MNIST data input (img shape: 28*28)\n", "n_classes = 10 # MNIST total classes (0-9 digits)\n", "dropout = 0.75 # Dropout, probability to keep units\n", "\n", "sess = tf.Session()\n", "\n", "# Create a dataset tensor from the images and the labels\n", "dataset = tf.contrib.data.Dataset.from_tensor_slices(\n", " (mnist.train.images, mnist.train.labels))\n", "# Create batches of data\n", "dataset = dataset.batch(batch_size)\n", "# Create an iterator, to go over the dataset\n", "iterator = dataset.make_initializable_iterator()\n", "# It is better to use 2 placeholders, to avoid to load all data into memory,\n", "# and avoid the 2Gb restriction length of a tensor.\n", "_data = tf.placeholder(tf.float32, [None, n_input])\n", "_labels = tf.placeholder(tf.float32, [None, n_classes])\n", "# Initialize the iterator\n", "sess.run(iterator.initializer, feed_dict={_data: mnist.train.images,\n", " _labels: mnist.train.labels})\n", "\n", "# Neural Net Input\n", "X, Y = iterator.get_next()" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# -----------------------------------------------\n", "# THIS IS A CLASSIC CNN (see examples, section 3)\n", "# -----------------------------------------------\n", "# Note that a few elements have changed (usage of sess run).\n", "\n", "# Create model\n", "def conv_net(x, n_classes, dropout, reuse, is_training):\n", " # Define a scope for reusing the variables\n", " with tf.variable_scope('ConvNet', reuse=reuse):\n", " # MNIST data input is a 1-D vector of 784 features (28*28 pixels)\n", " # Reshape to match picture format [Height x Width x Channel]\n", " # Tensor input become 4-D: [Batch Size, Height, Width, Channel]\n", " x = tf.reshape(x, shape=[-1, 28, 28, 1])\n", "\n", " # Convolution Layer with 32 filters and a kernel size of 5\n", " conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n", " # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n", " conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n", "\n", " # Convolution Layer with 32 filters and a kernel size of 5\n", " conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n", " # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n", " conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n", "\n", " # Flatten the data to a 1-D vector for the fully connected layer\n", " fc1 = tf.contrib.layers.flatten(conv2)\n", "\n", " # Fully connected layer (in contrib folder for now)\n", " fc1 = tf.layers.dense(fc1, 1024)\n", " # Apply Dropout (if is_training is False, dropout is not applied)\n", " fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n", "\n", " # Output layer, class prediction\n", " out = tf.layers.dense(fc1, n_classes)\n", " # Because 'softmax_cross_entropy_with_logits' already apply softmax,\n", " # we only apply softmax to testing network\n", " out = tf.nn.softmax(out) if not is_training else out\n", "\n", " return out\n", "\n", "\n", "# Because Dropout have different behavior at training and prediction time, we\n", "# need to create 2 distinct computation graphs that share the same weights.\n", "\n", "# Create a graph for training\n", "logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True)\n", "# Create another graph for testing that reuse the same weights, but has\n", "# different behavior for 'dropout' (not applied).\n", "logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False)\n", "\n", "# Define loss and optimizer (with train logits, for dropout to take effect)\n", "loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n", " logits=logits_train, labels=Y))\n", "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", "train_op = optimizer.minimize(loss_op)\n", "\n", "# Evaluate model (with test logits, for dropout to be disabled)\n", "correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1))\n", "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false, "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Step 1, Minibatch Loss= 7.9429, Training Accuracy= 0.070\n", "Step 100, Minibatch Loss= 0.3491, Training Accuracy= 0.922\n", "Step 200, Minibatch Loss= 0.2343, Training Accuracy= 0.922\n", "Step 300, Minibatch Loss= 0.1838, Training Accuracy= 0.969\n", "Step 400, Minibatch Loss= 0.1715, Training Accuracy= 0.953\n", "Step 500, Minibatch Loss= 0.2730, Training Accuracy= 0.938\n", "Step 600, Minibatch Loss= 0.3427, Training Accuracy= 0.953\n", "Step 700, Minibatch Loss= 0.2261, Training Accuracy= 0.961\n", "Step 800, Minibatch Loss= 0.1487, Training Accuracy= 0.953\n", "Step 900, Minibatch Loss= 0.1438, Training Accuracy= 0.945\n", "Step 1000, Minibatch Loss= 0.1786, Training Accuracy= 0.961\n", "Optimization Finished!\n" ] } ], "source": [ "# Initialize the variables (i.e. assign their default value)\n", "init = tf.global_variables_initializer()\n", "\n", "# Run the initializer\n", "sess.run(init)\n", "\n", "# Training cycle\n", "for step in range(1, num_steps + 1):\n", " \n", " try:\n", " # Run optimization\n", " sess.run(train_op)\n", " except tf.errors.OutOfRangeError:\n", " # Reload the iterator when it reaches the end of the dataset\n", " sess.run(iterator.initializer, \n", " feed_dict={_data: mnist.train.images,\n", " _labels: mnist.train.labels})\n", " sess.run(train_op)\n", " \n", " if step % display_step == 0 or step == 1:\n", " # Calculate batch loss and accuracy\n", " # (note that this consume a new batch of data)\n", " loss, acc = sess.run([loss_op, accuracy])\n", " print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n", " \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n", " \"{:.3f}\".format(acc))\n", "\n", "print(\"Optimization Finished!\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python [default]", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.12" } }, "nbformat": 4, "nbformat_minor": 2 }