{ "cells": [ { "cell_type": "markdown", "metadata": { "collapsed": false }, "source": [ "# Build an Image Dataset in TensorFlow.\n", "\n", "For this example, you need to make your own set of images (JPEG).\n", "We will show 2 different ways to build that dataset:\n", "\n", "- From a root folder, that will have a sub-folder containing images for each class\n", "\n", "```\n", " ROOT_FOLDER\n", " |-------- SUBFOLDER (CLASS 0)\n", " | |\n", " | | ----- image1.jpg\n", " | | ----- image2.jpg\n", " | | ----- etc...\n", " | \n", " |-------- SUBFOLDER (CLASS 1)\n", " | |\n", " | | ----- image1.jpg\n", " | | ----- image2.jpg\n", " | | ----- etc...\n", "\n", "```\n", "\n", "- From a plain text file, that will list all images with their class ID:\n", "\n", "```\n", " /path/to/image/1.jpg CLASS_ID\n", " /path/to/image/2.jpg CLASS_ID\n", " /path/to/image/3.jpg CLASS_ID\n", " /path/to/image/4.jpg CLASS_ID\n", " etc...\n", "```\n", "\n", "Below, there are some parameters that you need to change (Marked 'CHANGE HERE'), \n", "such as the dataset path.\n", "\n", "- Author: Aymeric Damien\n", "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "from __future__ import print_function\n", "\n", "import tensorflow as tf\n", "import os\n", "\n", "# Dataset Parameters - CHANGE HERE\n", "MODE = 'folder' # or 'file', if you choose a plain text file (see above).\n", "DATASET_PATH = '/path/to/dataset/' # the dataset file or root folder path.\n", "\n", "# Image Parameters\n", "N_CLASSES = 2 # CHANGE HERE, total number of classes\n", "IMG_HEIGHT = 64 # CHANGE HERE, the image height to be resized to\n", "IMG_WIDTH = 64 # CHANGE HERE, the image width to be resized to\n", "CHANNELS = 3 # The 3 color channels, change to 1 if grayscale" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Reading the dataset\n", "# 2 modes: 'file' or 'folder'\n", "def read_images(dataset_path, mode, batch_size):\n", " imagepaths, labels = list(), list()\n", " if mode == 'file':\n", " # Read dataset file\n", " data = open(dataset_path, 'r').read().splitlines()\n", " for d in data:\n", " imagepaths.append(d.split(' ')[0])\n", " labels.append(int(d.split(' ')[1]))\n", " elif mode == 'folder':\n", " # An ID will be affected to each sub-folders by alphabetical order\n", " label = 0\n", " # List the directory\n", " try: # Python 2\n", " classes = sorted(os.walk(dataset_path).next()[1])\n", " except Exception: # Python 3\n", " classes = sorted(os.walk(dataset_path).__next__()[1])\n", " # List each sub-directory (the classes)\n", " for c in classes:\n", " c_dir = os.path.join(dataset_path, c)\n", " try: # Python 2\n", " walk = os.walk(c_dir).next()\n", " except Exception: # Python 3\n", " walk = os.walk(c_dir).__next__()\n", " # Add each image to the training set\n", " for sample in walk[2]:\n", " # Only keeps jpeg images\n", " if sample.endswith('.jpg') or sample.endswith('.jpeg'):\n", " imagepaths.append(os.path.join(c_dir, sample))\n", " labels.append(label)\n", " label += 1\n", " else:\n", " raise Exception(\"Unknown mode.\")\n", "\n", " # Convert to Tensor\n", " imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)\n", " labels = tf.convert_to_tensor(labels, dtype=tf.int32)\n", " # Build a TF Queue, shuffle data\n", " image, label = tf.train.slice_input_producer([imagepaths, labels],\n", " shuffle=True)\n", "\n", " # Read images from disk\n", " image = tf.read_file(image)\n", " image = tf.image.decode_jpeg(image, channels=CHANNELS)\n", "\n", " # Resize images to a common size\n", " image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])\n", "\n", " # Normalize\n", " image = image * 1.0/127.5 - 1.0\n", "\n", " # Create batches\n", " X, Y = tf.train.batch([image, label], batch_size=batch_size,\n", " capacity=batch_size * 8,\n", " num_threads=4)\n", "\n", " return X, Y" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# -----------------------------------------------\n", "# THIS IS A CLASSIC CNN (see examples, section 3)\n", "# -----------------------------------------------\n", "# Note that a few elements have changed (usage of queues).\n", "\n", "# Parameters\n", "learning_rate = 0.001\n", "num_steps = 10000\n", "batch_size = 128\n", "display_step = 100\n", "\n", "# Network Parameters\n", "dropout = 0.75 # Dropout, probability to keep units\n", "\n", "# Build the data input\n", "X, Y = read_images(DATASET_PATH, MODE, batch_size)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Create model\n", "def conv_net(x, n_classes, dropout, reuse, is_training):\n", " # Define a scope for reusing the variables\n", " with tf.variable_scope('ConvNet', reuse=reuse):\n", "\n", " # Convolution Layer with 32 filters and a kernel size of 5\n", " conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)\n", " # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n", " conv1 = tf.layers.max_pooling2d(conv1, 2, 2)\n", "\n", " # Convolution Layer with 32 filters and a kernel size of 5\n", " conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)\n", " # Max Pooling (down-sampling) with strides of 2 and kernel size of 2\n", " conv2 = tf.layers.max_pooling2d(conv2, 2, 2)\n", "\n", " # Flatten the data to a 1-D vector for the fully connected layer\n", " fc1 = tf.contrib.layers.flatten(conv2)\n", "\n", " # Fully connected layer (in contrib folder for now)\n", " fc1 = tf.layers.dense(fc1, 1024)\n", " # Apply Dropout (if is_training is False, dropout is not applied)\n", " fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)\n", "\n", " # Output layer, class prediction\n", " out = tf.layers.dense(fc1, n_classes)\n", " # Because 'softmax_cross_entropy_with_logits' already apply softmax,\n", " # we only apply softmax to testing network\n", " out = tf.nn.softmax(out) if not is_training else out\n", "\n", " return out" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Because Dropout have different behavior at training and prediction time, we\n", "# need to create 2 distinct computation graphs that share the same weights.\n", "\n", "# Create a graph for training\n", "logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True)\n", "# Create another graph for testing that reuse the same weights\n", "logits_test = conv_net(X, N_CLASSES, dropout, reuse=True, is_training=False)\n", "\n", "# Define loss and optimizer (with train logits, for dropout to take effect)\n", "loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n", " logits=logits_train, labels=Y))\n", "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", "train_op = optimizer.minimize(loss_op)\n", "\n", "# Evaluate model (with test logits, for dropout to be disabled)\n", "correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))\n", "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n", "\n", "# Initialize the variables (i.e. assign their default value)\n", "init = tf.global_variables_initializer()\n", "\n", "# Saver object\n", "saver = tf.train.Saver()\n", "\n", "# Start training\n", "with tf.Session() as sess:\n", "\n", " # Run the initializer\n", " sess.run(init)\n", "\n", " # Start the data queue\n", " tf.train.start_queue_runners()\n", "\n", " # Training cycle\n", " for step in range(1, num_steps+1):\n", "\n", " if step % display_step == 0:\n", " # Run optimization and calculate batch loss and accuracy\n", " _, loss, acc = sess.run([train_op, loss_op, accuracy])\n", " print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n", " \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n", " \"{:.3f}\".format(acc))\n", " else:\n", " # Only run the optimization op (backprop)\n", " sess.run(train_op)\n", "\n", " print(\"Optimization Finished!\")\n", "\n", " # Save your model\n", " saver.save(sess, 'my_tf_model')" ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python [default]", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.12" } }, "nbformat": 4, "nbformat_minor": 2 }