Przeglądaj źródła

added inception model

Martin Wicke 9 lat temu
rodzic
commit
83ee52cc77
41 zmienionych plików z 80116 dodań i 0 usunięć
  1. 3 0
      .gitmodules
  2. 37 0
      WORKSPACE
  3. 178 0
      inception/BUILD
  4. 701 0
      inception/README.md
  5. 430 0
      inception/data/build_image_data.py
  6. 702 0
      inception/data/build_imagenet_data.py
  7. 96 0
      inception/data/download_and_preprocess_flowers.sh
  8. 101 0
      inception/data/download_and_preprocess_imagenet.sh
  9. 100 0
      inception/data/download_imagenet.sh
  10. 50000 0
      inception/data/imagenet_2012_validation_synset_labels.txt
  11. 1000 0
      inception/data/imagenet_lsvrc_2015_synsets.txt
  12. 21842 0
      inception/data/imagenet_metadata.txt
  13. 82 0
      inception/data/preprocess_imagenet_validation_data.py
  14. 252 0
      inception/data/process_bounding_boxes.py
  15. 103 0
      inception/dataset.py
  16. 52 0
      inception/flowers_data.py
  17. 40 0
      inception/flowers_eval.py
  18. 41 0
      inception/flowers_train.py
  19. BIN
      inception/g3doc/inception_v3_architecture.png
  20. 479 0
      inception/image_processing.py
  21. 59 0
      inception/imagenet_data.py
  22. 46 0
      inception/imagenet_eval.py
  23. 41 0
      inception/imagenet_train.py
  24. 171 0
      inception/inception_eval.py
  25. 160 0
      inception/inception_model.py
  26. 351 0
      inception/inception_train.py
  27. 112 0
      inception/slim/BUILD
  28. 650 0
      inception/slim/README.md
  29. 329 0
      inception/slim/inception_model.py
  30. 119 0
      inception/slim/inception_test.py
  31. 110 0
      inception/slim/losses.py
  32. 89 0
      inception/slim/losses_test.py
  33. 418 0
      inception/slim/ops.py
  34. 510 0
      inception/slim/ops_test.py
  35. 144 0
      inception/slim/scopes.py
  36. 118 0
      inception/slim/scopes_test.py
  37. 24 0
      inception/slim/slim.py
  38. 224 0
      inception/slim/variables.py
  39. 200 0
      inception/slim/variables_test.py
  40. 1 0
      third_party
  41. 1 0
      tools

+ 3 - 0
.gitmodules

@@ -0,0 +1,3 @@
+[submodule "tensorflow"]
+	path = tensorflow
+	url = https://github.com/tensorflow/tensorflow.git

+ 37 - 0
WORKSPACE

@@ -0,0 +1,37 @@
+local_repository(
+  name = "tf",
+  path = __workspace_dir__ + "/tensorflow",
+)
+
+load('//tensorflow/tensorflow:workspace.bzl', 'tf_workspace')
+tf_workspace("tensorflow/")
+# grpc expects //external:protobuf_clib and //external:protobuf_compiler
+# to point to the protobuf's compiler library.
+bind(
+    name = "protobuf_clib",
+    actual = "@tf//google/protobuf:protoc_lib",
+)
+
+bind(
+    name = "protobuf_compiler",
+    actual = "@tf//google/protobuf:protoc_lib",
+)
+
+git_repository(
+    name = "grpc",
+    commit = "73979f4",
+    init_submodules = True,
+    remote = "https://github.com/grpc/grpc.git",
+)
+
+# protobuf expects //external:grpc_cpp_plugin to point to grpc's
+# C++ plugin code generator.
+bind(
+    name = "grpc_cpp_plugin",
+    actual = "@grpc//:grpc_cpp_plugin",
+)
+
+bind(
+    name = "grpc_lib",
+    actual = "@grpc//:grpc++_unsecure",
+)

+ 178 - 0
inception/BUILD

@@ -0,0 +1,178 @@
+# Description:
+# Example TensorFlow models for ImageNet.
+
+package(default_visibility = [":internal"])
+
+licenses(["notice"])  # Apache 2.0
+
+exports_files(["LICENSE"])
+
+package_group(
+    name = "internal",
+    packages = ["//inception/..."],
+)
+
+py_library(
+    name = "dataset",
+    srcs = [
+        "dataset.py",
+    ],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+    ],
+)
+
+py_library(
+    name = "imagenet_data",
+    srcs = [
+        "imagenet_data.py",
+    ],
+    deps = [
+        ":dataset",
+    ],
+)
+
+py_library(
+    name = "flowers_data",
+    srcs = [
+        "flowers_data.py",
+    ],
+    deps = [
+        ":dataset",
+    ],
+)
+
+py_library(
+    name = "image_processing",
+    srcs = [
+        "image_processing.py",
+    ],
+)
+
+py_library(
+    name = "inception",
+    srcs = [
+        "inception_model.py",
+    ],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":dataset",
+        "//inception/slim",
+    ],
+)
+
+py_binary(
+    name = "imagenet_eval",
+    srcs = [
+        "imagenet_eval.py",
+    ],
+    deps = [
+        ":imagenet_data",
+        ":inception_eval",
+    ],
+)
+
+py_binary(
+    name = "flowers_eval",
+    srcs = [
+        "flowers_eval.py",
+    ],
+    deps = [
+        ":flowers_data",
+        ":inception_eval",
+    ],
+)
+
+py_library(
+    name = "inception_eval",
+    srcs = [
+        "inception_eval.py",
+    ],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":image_processing",
+        ":inception",
+    ],
+)
+
+py_binary(
+    name = "imagenet_train",
+    srcs = [
+        "imagenet_train.py",
+    ],
+    deps = [
+        ":imagenet_data",
+        ":inception_train",
+    ],
+)
+
+py_binary(
+    name = "flowers_train",
+    srcs = [
+        "flowers_train.py",
+    ],
+    deps = [
+        ":flowers_data",
+        ":inception_train",
+    ],
+)
+
+py_library(
+    name = "inception_train",
+    srcs = [
+        "inception_train.py",
+    ],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":image_processing",
+        ":inception",
+    ],
+)
+
+py_binary(
+    name = "build_image_data",
+    srcs = ["data/build_image_data.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+    ],
+)
+
+sh_binary(
+    name = "download_and_preprocess_flowers",
+    srcs = ["data/download_and_preprocess_flowers.sh"],
+    data = [
+        ":build_image_data",
+    ],
+)
+
+sh_binary(
+    name = "download_and_preprocess_imagenet",
+    srcs = ["data/download_and_preprocess_imagenet.sh"],
+    data = [
+        "data/download_imagenet.sh",
+        "data/imagenet_2012_validation_synset_labels.txt",
+        "data/imagenet_lsvrc_2015_synsets.txt",
+        "data/imagenet_metadata.txt",
+        "data/preprocess_imagenet_validation_data.py",
+        "data/process_bounding_boxes.py",
+        ":build_imagenet_data",
+    ],
+)
+
+py_binary(
+    name = "build_imagenet_data",
+    srcs = ["data/build_imagenet_data.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+    ],
+)
+
+filegroup(
+    name = "srcs",
+    srcs = glob(
+        [
+            "**/*.py",
+            "BUILD",
+        ],
+    ),
+)

+ 701 - 0
inception/README.md

@@ -0,0 +1,701 @@
+# Inception in TensorFlow
+[TOC]
+
+[ImageNet](http://www.image-net.org/) is a common academic data set in machine
+learning for training an image recognition system. Code in this directory
+demonstrates how to use TensorFlow to train and evaluate
+a type of convolutional neural network (CNN) on this academic data set.
+In particular, we demonstrate how to train the Inception v3 architecture
+as specified in:
+
+_Rethinking the Inception Architecture for Computer Vision_
+
+Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
+Zbigniew Wojna
+
+http://arxiv.org/abs/1512.00567
+
+This network achieves 21.2% top-1 and 5.6% top-5 error for single frame
+evaluation with a computational cost of 5 billion multiply-adds per inference
+and with using less than 25 million parameters. Below is a visualization
+of the model architecture.
+
+<center>
+![Inception-v3 Architecture](g3doc/inception_v3_architecture.png)
+</center>
+
+## Description of Code
+
+The code base provides three core binaries for:
+
+* Training an Inception v3 network from scratch across multiple GPUs and/or
+multiple machines using the ImageNet 2012 Challenge training data set.
+*  Evaluating an Inception v3 network using the ImageNet 2012 Challenge
+validation data set.
+*  Retraining an Inception v3 network on a novel task and back-propagating the
+errors to fine tune the network weights.
+
+The training procedure employs synchronous stochastic gradient desent across
+multiple GPUs. The user may specify the number of GPUs they wish harness.
+The synchronous training performs *batch-splitting* by dividing a given batch
+across multiple GPUs.
+
+The training set up is nearly identical to the section [Training a Model
+Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#training-a-model-using-multiple-gpu-cards)
+where we have substituted the CIFAR-10 model architecture
+with Inception v3. The primary differences with that setup are:
+
+* Calculate and update the batch-norm statistics during training so that they
+may be substituted in during evaluation.
+* Specify the model architecture using a (still experimental) higher
+level language called TensorFlow-Slim.
+
+For more details about TensorFlow-Slim, please see the
+[Slim README](slim/README.md). Please
+note that this higher-level language is still *experimental* and the API may
+change over time depending on usage and subsequent research.
+
+## Getting Started
+
+**NOTE** Before doing anything, we first need to build TensorFlow from source.
+Please follow the instructions at
+[Installing From Source](https://www.tensorflow.org/versions/r0.7/get_started/os_setup.html#installing-from-sources).
+
+Before you run the training script for the first time, you will need to
+download and convert the ImageNet data to native TFRecord format. The TFRecord
+format consists of a set of sharded files where each entry is a serialized
+`tf.Example` proto. Each `tf.Example` proto contains the ImageNet image (JPEG
+encoded) as well as metadata such as label and bounding box information. See
+[`parse_example_proto`](image_processing.py) for details.
+
+We provide a single
+[script](data/download_and_preprocess_imagenet.sh)
+for downloading and converting ImageNet data to TFRecord format. Downloading
+and preprocessing the data may take several hours (up to half a day) depending
+on your network and computer speed. Please be patient.
+
+To begin, you will need to sign up for an account with
+[ImageNet](http://image-net.org) to gain access to the data. Look for the
+sign up page, create an account and request an access key to download the data.
+
+After you have `USERNAME` and `PASSWORD`, you are ready to run our script.
+Make sure that your hard disk has at least 500 GB of free space for donwloading
+and storing the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a
+location but feel free to edit accordingly.
+
+When you run the below script, please enter *USERNAME* and *PASSWORD*
+when prompted. This will occur at the very beginning. Once these values are
+entered, you will not need to interact with the script again.
+
+```shell
+# location of where to place the ImageNet data
+DATA_DIR=$HOME/imagenet-data
+
+# build the preprocessing script.
+bazel build -c opt inception/download_and_preprocess_imagenet
+
+# run it
+bazel-bin/inception/download_and_preprocess_imagenet "${DATA_DIR}$"
+```
+
+The final line of the output script should read:
+
+```shell
+2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set.
+```
+
+When the script finishes you will find 1024 and 128 training and validation
+files in the `DATA_DIR`. The files will match the patterns
+`train-????-of-1024` and `validation-?????-of-00128`, respectively.
+
+[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0)
+You are now ready to train or evaluate with the ImageNet data set.
+
+## How to Train from Scratch
+
+**WARNING** Training an Inception v3 network from scratch is a computationally
+intensive task and depending on your compute setup may take several days or
+even weeks.
+
+*Before proceeding* please read the [Convolutional Neural
+Networks] (https://www.tensorflow.org/tutorials/deep_cnn/index.html)
+tutorial in particular focus on
+[Training a Model Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#training-a-model-using-multiple-gpu-cards)
+. The model training method is nearly identical to that
+described in the CIFAR-10 multi-GPU model training. Briefly, the model training
+
+* Places an individual model replica on each GPU. Split the batch
+across the GPUs.
+* Updates model parameters synchronously by waiting for all GPUs to finish
+processing a batch of data.
+
+The training procedure is encapsulated by this diagram of how operations and
+variables are placed on CPU and GPUs respecitvely.
+
+<div style="width:40%; margin:auto; margin-bottom:10px; margin-top:20px;">
+  <img style="width:100%" src="https://www.tensorflow.org/images/Parallelism.png">
+</div>
+
+Each tower computes the gradients for a portion of the batch and the gradients
+are combined and averaged across the multiple towers in order to provide a
+single update of the Variables stored on the CPU.
+
+A crucial aspect of training a network of this size is *training speed* in terms
+of wall-clock time. The training speed is dictated by many factors -- most
+importantly the batch size and the learning rate schedule. Both of these
+parameters are heavily coupled to the hardware set up.
+
+Generally speaking, a batch size is a difficult parameter to tune as it requires
+balancing memory demands of the model, memory available on the GPU and speed
+of computation. Generally speaking, employing larger batch sizes leads to
+more efficient computation and potentially more efficient training steps.
+
+We have tested several hardware setups for training this model from scratch but
+we emphasize that depending your hardware set up, you may need to adapt the
+batch size and learning rate schedule.
+
+Please see the comments in `inception_train.py` for a few selected learning rate
+plans based on some selected hardware setups.
+
+To train this model, you simply need to specify the following:
+
+```shell
+# Build the training binary to run on a GPU. If you do not have a GPU,
+# then exclude '--config=cuda'
+bazel build -c opt --config=cuda inception/imagenet_train
+
+# run it
+bazel-bin/inception/imagenet_train.py --num_gpus=1 --batch_size=32 --train_dir=/tmp/imagenet_train --data_dir=/tmp/imagenet_data
+```
+
+The model reads in the ImageNet training data from `--data_dir`. If you followed
+the instructions in [Getting Started](#getting-started), then set
+`--data_dir="${DATA_DIR}"`. The script assumes that there exists a set of
+sharded TFRecord files containing the ImageNet data. If you have not created
+TFRecord files, please refer to [Getting Started](#getting-started)
+
+Here is the output of the above command line when running on a Tesla K40c:
+
+```shell
+2016-03-07 12:24:59.922898: step 0, loss = 13.11 (5.3 examples/sec; 6.064 sec/batch)
+2016-03-07 12:25:55.206783: step 10, loss = 13.71 (9.4 examples/sec; 3.394 sec/batch)
+2016-03-07 12:26:28.905231: step 20, loss = 14.81 (9.5 examples/sec; 3.380 sec/batch)
+2016-03-07 12:27:02.699719: step 30, loss = 14.45 (9.5 examples/sec; 3.378 sec/batch)
+2016-03-07 12:27:36.515699: step 40, loss = 13.98 (9.5 examples/sec; 3.376 sec/batch)
+2016-03-07 12:28:10.220956: step 50, loss = 13.92 (9.6 examples/sec; 3.327 sec/batch)
+2016-03-07 12:28:43.658223: step 60, loss = 13.28 (9.6 examples/sec; 3.350 sec/batch)
+...
+```
+
+This example highlights several important points:
+
+* A log entry is printed every 10 step and the line includes the
+total loss (starts around 13.0-14.0) and the speed of processing in terms
+of throughput (examples / sec) and batch speed (sec/batch).
+
+* The first step in training is always slow. The primary reason for the slow
+speed is that during the first step of training, the preprocessing queue must
+first fill up the several thousand example images in order to reach its minimum
+capacity before training starts.
+
+The number of GPU devices is specified by `--num_gpus` (which defaults to 1).
+Specifying `--num_gpus` greater then 1 splits the batch evenly split across
+the GPU cards.
+
+```shell
+# Build the training binary to run on a GPU. If you do not have a GPU,
+# then exclude '--config=cuda'
+bazel build -c opt --config=cuda inception/imagenet_train
+
+# run it
+bazel-bin/inception/imagenet_train --num_gpus=2 --batch_size=64 --train_dir=/tmp/imagenet_train
+```
+
+This model splits the batch of 64 images across 2 GPUs and calculates
+the average gradient by waiting for both GPUs to finish calculating the
+gradients from their respective data (See diagram above). Generally speaking,
+using larger numbers of GPUs leads to higher throughput as well as the
+opportunity to use larger batch sizes. In turn, larger batch sizes imply
+better estimates of the gradient enabling the usage of higher learning rates.
+In summary, using more GPUs results in simply faster training speed.
+
+Note that selecting a batch size is a difficult parameter to tune as it requires
+balancing memory demands of the model, memory available on the GPU and speed
+of computation. Generally speaking, employing larger batch sizes leads to
+more efficient computation and potentially more efficient training steps.
+
+Note that there is considerable noise in the loss function on individual steps
+in the previous log. Because of this noise, it is difficult to discern how well
+a model is learning. The solution to the last problem is to launch TensorBoard
+pointing to the directory containing the events log.
+
+```shell
+tensorboard --logdir=/tmp/imagenet_train
+```
+
+TensorBoard has access to the many Summaries produced by the model that
+describe multitudes of statistics tracking the model behavior and the quality
+of the learned model. In particular, TensorBoard tracks a exponentially smoothed
+version of the loss. In practice, it is far easier to judge how well a model
+learns by monitoring the smoothed version of the loss.
+
+## How to Evaluate
+
+Evaluating an Inception v3 model on the ImageNet 2012 validation data set
+requires running a separate binary.
+
+The evaluation procedure is nearly identical to [Evaluating a Model]
+(https://www.tensorflow.org/tutorials/deep_cnn/index.html#evaluating-a-model)
+described in the [Convolutional Neural Network](https://www.tensorflow.org/tutorials/deep_cnn/index.html)
+tutorial.
+
+**WARNING** Be careful not to run the evaluation and training binary on the
+same GPU or else you might run out of memory. Consider running the evaluation on
+a separate GPU if available or suspending the training binary while running
+the evaluation on the same GPU.
+
+Briefly, one can evaluate the model by running:
+
+```shell
+# Build the training binary to run on a GPU. If you do not have a GPU,
+# then exclude '--config=cuda'
+bazel build -c opt --config=cuda inception/imagenet_eval
+
+# run it
+bazel-bin/inception/imagenet_eval --checkpoint_dir=/tmp/imagenet_train --eval_dir=/tmp/imagenet_eval
+```
+
+Note that we point ``--checkpoint_dir`` to the location of the checkpoints
+saved by `inception_train.py` above. Running the above command results in the
+following output:
+
+```shell
+2016-02-17 22:32:50.391206: precision @ 1 = 0.735
+...
+```
+
+The script calculates the precision @ 1 over the entire validation data
+periodically. The precision @ 1 measures the how often the highest scoring
+prediction from the model matched the ImageNet label -- in this case, 73.5%.
+If you wish to run the eval just once and not periodically, append the
+`--run_once` option.
+
+Much like the training script, `imagenet_eval.py` also
+exports summaries that may be visualized in TensorBoard. These summaries
+calculate additional statistics on the predictions (e.g. recall @ 5) as well
+as monitor the statistics of the model activations and weights during
+evaluation.
+
+## How to Fine-Tune a Pre-Trained Model on a New Task
+
+### Getting Started
+Much like training the ImageNet model we must first convert a new data set to
+the sharded TFRecord format which each entry is a serialized `tf.Example` proto.
+
+We have provided a script demonstrating how to do this for small data set of
+of a few thousand flower images spread across 5 labels:
+
+```shell
+daisy, dandelion, roses, sunflowers, tulips
+```
+There is a single automated script that downloads the data set and converts
+it to the TFRecord format. Much like the ImageNet data set, each record in the
+TFRecord format is a serialized `tf.Example` proto whose entries include
+a JPEG-encoded string and an integer label. Please see
+[`parse_example_proto`](image_processing.py) for details.
+
+The script just takes a few minutes to run depending your network connection
+speed for downloading and processing the images. Your hard disk requires 200MB
+of free storage. Here we select `DATA_DIR=$HOME/flowers-data` as such a
+location but feel free to edit accordingly.
+
+```shell
+# location of where to place the flowers data
+FLOWERS_DATA_DIR=$HOME/flowers-data
+
+# build the preprocessing script.
+bazel build -c opt inception/download_and_preprocess_flowers
+
+# run it
+bazel-bin/inception/download_and_preprocess_flowers "${FLOWERS_DATA_DIR}$"
+```
+
+If the script runs successfully, the final line of the terminal output should
+look like:
+
+```shell
+2016-02-24 20:42:25.067551: Finished writing all 3170 images in data set.
+```
+
+When the script finishes you will find 2 shards for the training and validation
+files in the `DATA_DIR`. The files will match the patterns
+`train-????-of-00001` and `validation-?????-of-00001`, respectively.
+
+**NOTE** If you wish to prepare a custom image data set for transfer learning,
+you will need to invoke [`build_image_data.py`](data/build_image_data.py)
+on your custom data set.
+Please see the associated options and assumptions behind this script by reading
+the comments section of  [`build_image_data.py`](data/build_image_data.py).
+
+The second piece you will need is a trained Inception v3 image model. You have
+the option of either training one yourself (See
+[How to Train from Scratch](#how-to-train-from-scratch) for details) or you can
+download a pre-trained model like so:
+
+```shell
+# location of where to place the Inception v3 model
+DATA_DIR=$HOME/inception-v3-model
+cd ${DATA_DIR}
+
+# download the Inception v3 model
+curl -O http://download.tensorflow.org/models/image/imagenet/inception-v3-2016-03-01.tar.gz
+tar xzf inception-v3-2016-03-01.tar.gz
+
+# this will create a directory called inception-v3 which contains the following files.
+> ls inception-v3
+README.txt
+checkpoint
+model.ckpt-157585
+```
+
+[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0)
+You are now ready to fine-tune your pre-trained Inception v3 model
+with the flower data set.
+
+### How to Retrain a Trained Model on the Flowers Data
+
+We are now ready to fine-tune a pre-trained Inception-v3 model on
+the flowers data set. This requires two distinct changes to our training
+procedure:
+
+1. Build the exact same model as previously except we change the number
+of labels in the final classification layer.
+
+2. Restore all weights from the pre-trained Inception-v3 except for the
+final classification layer; this will get randomly initialized instead.
+
+
+
+We can perform these two operations by specifying two flags:
+`--pretrained_model_checkpoint_path` and `--fine_tune`.
+The first flag is a string that points to the path of a pre-trained Inception-v3
+model. If this flag is specified, it will load the entire model from the
+checkpoint before the script begins training.
+
+The second flag `--fine_tune` is a boolean that indicates whether the last
+classification layer should be randomly initialized or restored.
+You may set this flag to false
+if you wish to continue training a pre-trained model from a checkpoint. If you
+set this flag to true, you can train a new classification layer from scratch.
+
+In order to understand how `--fine_tune` works, please see the discussion
+on `Variables` in the TensorFlow-Slim [`README.md`](slim/README.md).
+
+Putting this all together you can retrain a pre-trained Inception-v3 model
+on the flowers data set with the following command.
+
+```shell
+# Build the training binary to run on a GPU. If you do not have a GPU,
+# then exclude '--config=cuda'
+bazel build -c opt --config=cuda inception/flowers_train
+
+# Path to the downloaded Inception-v3 model.
+MODEL_PATH="${INCEPTION_MODEL_DIR}/model.ckpt-157585"
+
+# Directory where the flowers data resides.
+FLOWERS_DATA_DIR=/tmp/flowers-data/
+
+# Directory where to save the checkpoint and events files.
+TRAIN_DIR=/tmp/flowers_train/
+
+# Run the fine-tuning on the flowers data set starting from the pre-trained
+# Imagenet-v3 model.
+bazel-bin/inception/flowers_train \
+  --train_dir="${TRAIN_DIR}" \
+  --data_dir="${FLOWERS_DATA_DIR}" \
+  --pretrained_model_checkpoint_path="${MODEL_PATH}" \
+  --fine_tune=True \
+  --initial_learning_rate=0.001 \
+  --input_queue_memory_factor=1
+```
+
+We have added a few extra options to the training procedure.
+
+* Fine-tuning a model a separate data set requires significantly lowering the
+initial learning rate. We set the initial learning rate to 0.001.
+* The flowers data set is quite small so we shrink the size of the shuffling
+queue of examples. See [Adjusting Memory Demands](#adjusting-memory-demands)
+for more details.
+
+The training script will only reports the loss. To evaluate the quality of the
+fine-tuned model, you will need to run `flowers_eval`:
+
+
+```shell
+# Build the training binary to run on a GPU. If you do not have a GPU,
+# then exclude '--config=cuda'
+bazel build -c opt --config=cuda inception/flowers_eval
+
+# Directory where we saved the fine-tuned checkpoint and events files.
+TRAIN_DIR=/tmp/flowers_train/
+
+# Directory where the flowers data resides.
+FLOWERS_DATA_DIR=/tmp/flowers-data/
+
+# Directory where to save the evaluation events files.
+EVAL_DIR=/tmp/flowers_eval/
+
+# Evaluate the fine-tuned model on a hold-out of the flower data set.
+blaze-bin/third_party/tensorflow_models/inception/flowers_eval \
+  --eval_dir="${EVAL_DIR}" \
+  --data_dir="${FLOWERS_DATA_DIR}" \
+  --subset=validation \
+  --num_examples=500 \
+  --checkpoint_dir="${TRAIN_DIR}" \
+  --input_queue_memory_factfor=1 \
+  --run_once
+```
+
+We find that the evaluation arrives at roughly 93.4% precision@1 after the
+model has been running for 2000 steps.
+
+```shell
+Succesfully loaded model from /tmp/flowers/model.ckpt-1999 at step=1999.
+2016-03-01 16:52:51.761219: starting evaluation on (validation).
+2016-03-01 16:53:05.450419: [20 batches out of 20] (36.5 examples/sec; 0.684sec/batch)
+2016-03-01 16:53:05.450471: precision @ 1 = 0.9340 recall @ 5 = 0.9960 [500 examples]
+```
+
+
+## How to Construct a New Dataset for Retraining
+
+One can use the existing scripts supplied with this model to build a new
+dataset for training or fine-tuning. The main script to employ is
+[`build_image_data.py`](./build_image_data.py). Briefly,
+this script takes a structured
+directory of images and converts it to a sharded `TFRecord` that can be read
+by the Inception model.
+
+In particular, you will need to create a directory of training images
+that reside within `$TRAIN_DIR` and `$VALIDATION_DIR` arranged as such:
+
+```shell
+  $TRAIN_DIR/dog/image0.jpeg
+  $TRAIN_DIR/dog/image1.jpg
+  $TRAIN_DIR/dog/image2.png
+  ...
+  $TRAIN_DIR/cat/weird-image.jpeg
+  $TRAIN_DIR/cat/my-image.jpeg
+  $TRAIN_DIR/cat/my-image.JPG
+  ...
+  $VALIDATION_DIR/dog/imageA.jpeg
+  $VALIDATION_DIR/dog/imageB.jpg
+  $VALIDATION_DIR/dog/imageC.png
+  ...
+  $VALIDATION_DIR/cat/weird-image.PNG
+  $VALIDATION_DIR/cat/that-image.jpg
+  $VALIDATION_DIR/cat/cat.JPG
+  ...
+```
+Each sub-directory in `$TRAIN_DIR` and `$VALIDATION_DIR` corresponds to a
+unique label for the images that reside within that sub-directory. The images
+may be JPEG or PNG images. We do not support other images types currently.
+
+Once the data is arranged in this directory structure, we can run
+`build_image_data.py` on the data to generate the sharded `TFRecord` dataset.
+Each entry of the `TFRecord` is a serialized `tf.Example` protocol buffer.
+A complete list of information contained in the `tf.Example` is described
+in the comments of `build_image_data.py`.
+
+To run `build_image_data.py`, you can run the following command line:
+
+```shell
+# location to where to save the TFRecord data.
+OUTPUT_DIRECTORY=$HOME/my-custom-data/
+
+# build the preprocessing script.
+bazel build -c opt inception/build_image_data
+
+# convert the data.
+bazel-bin/inception/build_image_data \
+  --train_directory="${TRAIN_DIR}" \
+  --validation_directory="${VALIDATION_DIR}" \
+  --output_directory="${OUTPUT_DIRECTORY}" \
+  --labels_file="${LABELS_FILE}" \
+  --train_shards=128 \
+  --validation_shards=24 \
+  --num_threads=8
+```
+where the `$OUTPUT_DIRECTORY` is the location of the sharded `TFRecords`. The
+`$LABELS_FILE` will be a text file that is outputted by the script that
+provides a list of all of the labels. For instance, in the case flowers data
+set, the `$LABELS_FILE` contained the following data:
+
+```shell
+daisy
+dandelion
+roses
+sunflowers
+tulips
+```
+
+Note that each row of each label corresponds with the entry in the final
+classifier in the model. That is, the `daisy` corresponds to the classifier
+for entry `1`; `dandelion` is entry `2`, etc. We skip label `0` as a
+background class.
+
+After running this script produces files that look like the following:
+
+```shell
+  $TRAIN_DIR/train-00000-of-00024
+  $TRAIN_DIR/train-00001-of-00024
+  ...
+  $TRAIN_DIR/train-00023-of-00024
+
+and
+
+  $VALIDATION_DIR/validation-00000-of-00008
+  $VALIDATION_DIR/validation-00001-of-00008
+  ...
+  $VALIDATION_DIR/validation-00007-of-00008
+```
+where 24 and 8 are the number of shards specified for each
+dataset, respectively. Generally speaking, we aim for selecting the number
+of shards such that roughly 1024 images reside in each shard. One this
+data set is built you are ready to train or fine-tune an Inception model
+on this data set.
+
+## Practical Considerations for Training a Model
+
+The model architecture and training procedure is heavily dependent on the
+hardware used to train the model. If you wish to train or fine-tune this
+model on your machine **you will need to adjust and empirically determine
+a good set of training hyper-parameters for your setup**. What follows are
+some general considerations for novices.
+
+### Finding Good Hyperparameters
+
+Roughly 5-10 hyper-parameters govern the speed at which a network is trained.
+In addition to `--batch_size` and `--num_gpus`, there are several constants
+defined in [inception_train.py](./inception_train.py) which dictate the
+learning schedule.
+
+```shell
+RMSPROP_DECAY = 0.9                # Decay term for RMSProp.
+MOMENTUM = 0.9                     # Momentum in RMSProp.
+RMSPROP_EPSILON = 1.0              # Epsilon term for RMSProp.
+INITIAL_LEARNING_RATE = 0.1        # Initial learning rate.
+NUM_EPOCHS_PER_DECAY = 30.0        # Epochs after which learning rate decays.
+LEARNING_RATE_DECAY_FACTOR = 0.16  # Learning rate decay factor.
+```
+
+There are many papers tha discuss the various tricks and trade-offs associated
+with training a model with stochastic gradient descent. For those new to the
+field, some great references are:
+
+* Y Bengio, [Practical recommendations for gradient-based training of deep architectures](http://arxiv.org/abs/1206.5533)
+* I Goodfellow, Y Bengio and A Courville, [Deep Learning](http://www.deeplearningbook.org/)
+
+What follows is a summary of some general advice for identifying appropriate
+model hyper-parameters in the context of this particular
+model training setup. Namely,
+this library provides *sycnhronous* updates to model parameters based on
+batch-splitting the model across multiple GPUs.
+
+* Higher learning rates leads to faster training. Too high of learning rate
+leads to instability and will cause model parameters to diverge to infinity
+or NaN.
+
+* Larger batch sizes lead to higher quality estimates of the gradient and
+permit training the model with higher learning rates.
+
+* Often the GPU memory is a bottleneck that prevents employing larger batch
+sizes. Employing more GPUs allows one to user larger batch sizes because
+this model splits the batch across the GPUs.
+
+**NOTE** If one wishes to train this model with *asynchronous* gradient updates,
+one will need to substantially alter this model and new considerations need to
+be factored into hyperparameter tuning.
+See [Large Scale Distributed Deep Networks](http://research.google.com/archive/large_deep_networks_nips2012.html)
+for a discussion in this domain.
+
+### Adjusting Memory Demands
+
+Training this model has large memory demands in terms of the CPU and GPU. Let's
+discuss each item in turn.
+
+GPU memory is relatively small compared to CPU memory. Two items dictate the
+amount of GPU memory employed -- model architecture and batch size. Assuming
+that you keep the model architecture fixed, the sole parameter governing the
+GPU demand is the batch size. A good rule of thumb is to try employ as large
+of batch size as will fit on the GPU.
+
+If you run out of GPU memory, either lower the `--batch_size` or employ more
+GPUs on your desktop. The model performs batch-splitting across GPUs, thus N
+GPUs can handle N times the batch size of 1 GPU.
+
+The model requires a large amount of CPU memory as well. We have tuned the model
+to employ about ~40GB of CPU memory. Thus, having access to 64 or 128 GB of
+CPU memory would be ideal.
+
+If that is not possible, you can tune down the memory demands of the model
+via lowering `--input_queue_memory_factor`. Images are preprocessed
+asyncronously with respect to the main training across
+`--num_preprocess_threads` threads. The preprocessed images are stored in
+shuffling queue in which each GPU performs a dequeue operation in order
+to receive a `batch_size` worth of images.
+
+In order to guarantee good shuffling across the data, we maintain a large
+shuffling queue of 1024 x `input_queue_memory_factor` images. For the current
+model architecture, this corresponds to 16GB of CPU memory. You may lower
+`input_queue_memory_factor` in order to decrease the memory footprint. Keep
+in mind though that lowering this value drastically may result in a model
+with slighlty lower predictive accuracy when training from scratch. Please
+see comments in [`image_processing.py`](./image_processing.py) for more details.
+
+## Troubleshooting
+
+#### The model runs out of CPU memory.
+
+In lieu of buying more CPU memory, an easy fix is to
+decrease `--input_queue_memory_factor`. See
+[Adjusting Memory Demands](#adjusting-memory-demands).
+
+
+#### The model runs out of GPU memory.
+
+The data is not able to fit on the GPU card. The simplest solution is to
+decrease the batch size of the model. Otherwise, you will need to think about
+a more sophisticated method for specifying the training which cuts up the model
+across multiple `session.run()` calls or partitions the model across multiple
+GPUs. See [Using GPUs](https://www.tensorflow.org/versions/r0.7/how_tos/using_gpu/index.html)
+and
+[Adjusting Memory Demands](#adjusting-memory-demands)
+for more information.
+
+#### The model training results in NaN's.
+
+The learning rate of the model is too high. Turn down your learning rate.
+
+#### I wish to train a model with a different image size.
+
+The simplest solution is to artificially resize your images to `299x299`
+pixels. See
+[Images](https://www.tensorflow.org/versions/r0.7/api_docs/python/image.html)
+section for many resizing, cropping and padding methods.
+Note that the entire model architecture is predicated on a `299x299` image,
+thus if you wish to change the input image size, then you may need to redesign
+the entire model architecture.
+
+#### What hardware specification are these hyper-parameters targeted for?
+
+We targeted a desktop with 128GB of CPU ram connected to 8 NVIDIA Tesla K40
+GPU cards but we have run this on desktops with 32GB of CPU ram and 1 NVIDIA
+Tesla K40. You can get a sense of the various training configurations we
+tested by reading the comments in [`inception_train.py`](./inception_train.py).
+
+
+
+
+
+

+ 430 - 0
inception/data/build_image_data.py

@@ -0,0 +1,430 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Converts image data to TFRecords file format with Example protos.
+
+The image data set is expected to reside in JPEG files located in the
+following directory structure.
+
+  data_dir/label_0/image0.jpeg
+  data_dir/label_0/image1.jpg
+  ...
+  data_dir/label_1/weird-image.jpeg
+  data_dir/label_1/my-image.jpeg
+  ...
+
+where the sub-sirectory is the unique label associated with these images.
+
+This TensorFlow script converts the training and evaluation data into
+a sharded data set consisting of TFRecord files
+
+  train_directory/train-00000-of-01024
+  train_directory/train-00001-of-01024
+  ...
+  train_directory/train-00127-of-01024
+
+and
+
+  validation_directory/validation-00000-of-00128
+  validation_directory/validation-00001-of-00128
+  ...
+  validation_directory/validation-00127-of-00128
+
+where we have selected 1024 and 128 shards for each data set. Each record
+within the TFRecord file is a serialized Example proto. The Example proto
+contains the following fields:
+
+  image/encoded: string containing JPEG encoded image in RGB colorspace
+  image/height: integer, image height in pixels
+  image/width: integer, image width in pixels
+  image/colorspace: string, specifying the colorspace, always 'RGB'
+  image/channels: integer, specifying the number of channels, always 3
+  image/format: string, specifying the format, always'JPEG'
+
+  image/filename: string containing the basename of the image file
+            e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
+  image/class/label: integer specifying the index in a classification layer.
+    The label ranges from [0, num_labels] where 0 is unused and left as
+    the background class.
+  image/class/text: string specifying the human-readable version of the label
+    e.g. 'dog'
+
+If you data set involves bounding boxes, please look at build_imagenet_data.py.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import os
+import random
+import sys
+import threading
+
+
+import numpy as np
+import tensorflow as tf
+
+tf.app.flags.DEFINE_string('train_directory', '/tmp/',
+                           'Training data directory')
+tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
+                           'Validation data directory')
+tf.app.flags.DEFINE_string('output_directory', '/tmp/',
+                           'Output data directory')
+
+tf.app.flags.DEFINE_integer('train_shards', 2,
+                            'Number of shards in training TFRecord files.')
+tf.app.flags.DEFINE_integer('validation_shards', 2,
+                            'Number of shards in validation TFRecord files.')
+
+tf.app.flags.DEFINE_integer('num_threads', 2,
+                            'Number of threads to preprocess the images.')
+
+# The labels file contains a list of valid labels are held in this file.
+# Assumes that the file contains entries as such:
+#   dog
+#   cat
+#   flower
+# where each line corresponds to a label. We map each label contained in
+# the file to an integer corresponding to the line number starting from 0.
+tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
+
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def _int64_feature(value):
+  """Wrapper for inserting int64 features into Example proto."""
+  if not isinstance(value, list):
+    value = [value]
+  return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
+
+
+def _bytes_feature(value):
+  """Wrapper for inserting bytes features into Example proto."""
+  return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _convert_to_example(filename, image_buffer, label, text, height, width):
+  """Build an Example proto for an example.
+
+  Args:
+    filename: string, path to an image file, e.g., '/path/to/example.JPG'
+    image_buffer: string, JPEG encoding of RGB image
+    label: integer, identifier for the ground truth for the network
+    text: string, unique human-readable, e.g. 'dog'
+    height: integer, image height in pixels
+    width: integer, image width in pixels
+  Returns:
+    Example proto
+  """
+
+  colorspace = 'RGB'
+  channels = 3
+  image_format = 'JPEG'
+
+  example = tf.train.Example(features=tf.train.Features(feature={
+      'image/height': _int64_feature(height),
+      'image/width': _int64_feature(width),
+      'image/colorspace': _bytes_feature(colorspace),
+      'image/channels': _int64_feature(channels),
+      'image/class/label': _int64_feature(label),
+      'image/class/text': _bytes_feature(text),
+      'image/format': _bytes_feature(image_format),
+      'image/filename': _bytes_feature(os.path.basename(filename)),
+      'image/encoded': _bytes_feature(image_buffer)}))
+  return example
+
+
+class ImageCoder(object):
+  """Helper class that provides TensorFlow image coding utilities."""
+
+  def __init__(self):
+    # Create a single Session to run all image coding calls.
+    self._sess = tf.Session()
+
+    # Initializes function that converts PNG to JPEG data.
+    self._png_data = tf.placeholder(dtype=tf.string)
+    image = tf.image.decode_png(self._png_data, channels=3)
+    self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
+
+    # Initializes function that decodes RGB JPEG data.
+    self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
+    self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
+
+  def png_to_jpeg(self, image_data):
+    return self._sess.run(self._png_to_jpeg,
+                          feed_dict={self._png_data: image_data})
+
+  def decode_jpeg(self, image_data):
+    image = self._sess.run(self._decode_jpeg,
+                           feed_dict={self._decode_jpeg_data: image_data})
+    assert len(image.shape) == 3
+    assert image.shape[2] == 3
+    return image
+
+
+def _is_png(filename):
+  """Determine if a file contains a PNG format image.
+
+  Args:
+    filename: string, path of the image file.
+
+  Returns:
+    boolean indicating if the image is a PNG.
+  """
+  return '.png' in filename
+
+
+def _process_image(filename, coder):
+  """Process a single image file.
+
+  Args:
+    filename: string, path to an image file e.g., '/path/to/example.JPG'.
+    coder: instance of ImageCoder to provide TensorFlow image coding utils.
+  Returns:
+    image_buffer: string, JPEG encoding of RGB image.
+    height: integer, image height in pixels.
+    width: integer, image width in pixels.
+  """
+  # Read the image file.
+  image_data = tf.gfile.FastGFile(filename, 'r').read()
+
+  # Convert any PNG to JPEG's for consistency.
+  if _is_png(filename):
+    print('Converting PNG to JPEG for %s' % filename)
+    image_data = coder.png_to_jpeg(image_data)
+
+  # Decode the RGB JPEG.
+  image = coder.decode_jpeg(image_data)
+
+  # Check that image converted to RGB
+  assert len(image.shape) == 3
+  height = image.shape[0]
+  width = image.shape[1]
+  assert image.shape[2] == 3
+
+  return image_data, height, width
+
+
+def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
+                               texts, labels, num_shards):
+  """Processes and saves list of images as TFRecord in 1 thread.
+
+  Args:
+    coder: instance of ImageCoder to provide TensorFlow image coding utils.
+    thread_index: integer, unique batch to run index is within [0, len(ranges)).
+    ranges: list of pairs of integers specifying ranges of each batches to
+      analyze in parallel.
+    name: string, unique identifier specifying the data set
+    filenames: list of strings; each string is a path to an image file
+    texts: list of strings; each string is human readable, e.g. 'dog'
+    labels: list of integer; each integer identifies the ground truth
+    num_shards: integer number of shards for this data set.
+  """
+  # Each thread produces N shards where N = int(num_shards / num_threads).
+  # For instance, if num_shards = 128, and the num_threads = 2, then the first
+  # thread would produce shards [0, 64).
+  num_threads = len(ranges)
+  assert not num_shards % num_threads
+  num_shards_per_batch = int(num_shards / num_threads)
+
+  shard_ranges = np.linspace(ranges[thread_index][0],
+                             ranges[thread_index][1],
+                             num_shards_per_batch + 1).astype(int)
+  num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
+
+  counter = 0
+  for s in xrange(num_shards_per_batch):
+    # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
+    shard = thread_index * num_shards_per_batch + s
+    output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
+    output_file = os.path.join(FLAGS.output_directory, output_filename)
+    writer = tf.python_io.TFRecordWriter(output_file)
+
+    shard_counter = 0
+    files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
+    for i in files_in_shard:
+      filename = filenames[i]
+      label = labels[i]
+      text = texts[i]
+
+      image_buffer, height, width = _process_image(filename, coder)
+
+      example = _convert_to_example(filename, image_buffer, label,
+                                    text, height, width)
+      writer.write(example.SerializeToString())
+      shard_counter += 1
+      counter += 1
+
+      if not counter % 1000:
+        print('%s [thread %d]: Processed %d of %d images in thread batch.' %
+              (datetime.now(), thread_index, counter, num_files_in_thread))
+        sys.stdout.flush()
+
+    print('%s [thread %d]: Wrote %d images to %s' %
+          (datetime.now(), thread_index, shard_counter, output_file))
+    sys.stdout.flush()
+    shard_counter = 0
+  print('%s [thread %d]: Wrote %d images to %d shards.' %
+        (datetime.now(), thread_index, counter, num_files_in_thread))
+  sys.stdout.flush()
+
+
+def _process_image_files(name, filenames, texts, labels, num_shards):
+  """Process and save list of images as TFRecord of Example protos.
+
+  Args:
+    name: string, unique identifier specifying the data set
+    filenames: list of strings; each string is a path to an image file
+    texts: list of strings; each string is human readable, e.g. 'dog'
+    labels: list of integer; each integer identifies the ground truth
+    num_shards: integer number of shards for this data set.
+  """
+  assert len(filenames) == len(texts)
+  assert len(filenames) == len(labels)
+
+  # Break all images into batches with a [ranges[i][0], ranges[i][1]].
+  spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
+  ranges = []
+  threads = []
+  for i in xrange(len(spacing) - 1):
+    ranges.append([spacing[i], spacing[i+1]])
+
+  # Launch a thread for each batch.
+  print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
+  sys.stdout.flush()
+
+  # Create a mechanism for monitoring when all threads are finished.
+  coord = tf.train.Coordinator()
+
+  # Create a generic TensorFlow-based utility for converting all image codings.
+  coder = ImageCoder()
+
+  threads = []
+  for thread_index in xrange(len(ranges)):
+    args = (coder, thread_index, ranges, name, filenames,
+            texts, labels, num_shards)
+    t = threading.Thread(target=_process_image_files_batch, args=args)
+    t.start()
+    threads.append(t)
+
+  # Wait for all the threads to terminate.
+  coord.join(threads)
+  print('%s: Finished writing all %d images in data set.' %
+        (datetime.now(), len(filenames)))
+  sys.stdout.flush()
+
+
+def _find_image_files(data_dir, labels_file):
+  """Build a list of all images files and labels in the data set.
+
+  Args:
+    data_dir: string, path to the root directory of images.
+
+      Assumes that the image data set resides in JPEG files located in
+      the following directory structure.
+
+        data_dir/dog/another-image.JPEG
+        data_dir/dog/my-image.jpg
+
+      where 'dog' is the label associated with these images.
+
+    labels_file: string, path to the labels file.
+
+      The list of valid labels are held in this file. Assumes that the file
+      contains entries as such:
+        dog
+        cat
+        flower
+      where each line corresponds to a label. We map each label contained in
+      the file to an integer starting with the integer 0 corresponding to the
+      label contained in the first line.
+
+  Returns:
+    filenames: list of strings; each string is a path to an image file.
+    texts: list of strings; each string is the class, e.g. 'dog'
+    labels: list of integer; each integer identifies the ground truth.
+  """
+  print('Determining list of input files and labels from %s.' % data_dir)
+  unique_labels = [l.strip() for l in tf.gfile.FastGFile(
+      labels_file, 'r').readlines()]
+
+  labels = []
+  filenames = []
+  texts = []
+
+  # Leave label index 0 empty as a background class.
+  label_index = 1
+
+  # Construct the list of JPEG files and labels.
+  for text in unique_labels:
+    jpeg_file_path = '%s/%s/*' % (data_dir, text)
+    matching_files = tf.gfile.Glob(jpeg_file_path)
+
+    labels.extend([label_index] * len(matching_files))
+    texts.extend([text] * len(matching_files))
+    filenames.extend(matching_files)
+
+    if not label_index % 100:
+      print('Finished finding files in %d of %d classes.' % (
+          label_index, len(labels)))
+    label_index += 1
+
+  # Shuffle the ordering of all image files in order to guarantee
+  # random ordering of the images with respect to label in the
+  # saved TFRecord files. Make the randomization repeatable.
+  shuffled_index = range(len(filenames))
+  random.seed(12345)
+  random.shuffle(shuffled_index)
+
+  filenames = [filenames[i] for i in shuffled_index]
+  texts = [texts[i] for i in shuffled_index]
+  labels = [labels[i] for i in shuffled_index]
+
+  print('Found %d JPEG files across %d labels inside %s.' %
+        (len(filenames), len(unique_labels), data_dir))
+  return filenames, texts, labels
+
+
+def _process_dataset(name, directory, num_shards, labels_file):
+  """Process a complete data set and save it as a TFRecord.
+
+  Args:
+    name: string, unique identifier specifying the data set.
+    directory: string, root path to the data set.
+    num_shards: integer number of shards for this data set.
+    labels_file: string, path to the labels file.
+  """
+  filenames, texts, labels = _find_image_files(directory, labels_file)
+  _process_image_files(name, filenames, texts, labels, num_shards)
+
+
+def main(unused_argv):
+  assert not FLAGS.train_shards % FLAGS.num_threads, (
+      'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
+  assert not FLAGS.validation_shards % FLAGS.num_threads, (
+      'Please make the FLAGS.num_threads commensurate with '
+      'FLAGS.validation_shards')
+  print('Saving results to %s' % FLAGS.output_directory)
+
+  # Run it!
+  _process_dataset('validation', FLAGS.validation_directory,
+                   FLAGS.validation_shards, FLAGS.labels_file)
+  _process_dataset('train', FLAGS.train_directory,
+                   FLAGS.train_shards, FLAGS.labels_file)
+
+
+if __name__ == '__main__':
+  tf.app.run()

+ 702 - 0
inception/data/build_imagenet_data.py

@@ -0,0 +1,702 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Converts ImageNet data to TFRecords file format with Example protos.
+
+The raw ImageNet data set is expected to reside in JPEG files located in the
+following directory structure.
+
+  data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
+  data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
+  ...
+
+where 'n01440764' is the unique synset label associated with
+these images.
+
+The training data set consists of 1000 sub-directories (i.e. labels)
+each containing 1200 JPEG images for a total of 1.2M JPEG images.
+
+The evaluation data set consists of 1000 sub-directories (i.e. labels)
+each containing 50 JPEG images for a total of 50K JPEG images.
+
+This TensorFlow script converts the training and evaluation data into
+a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
+
+  train_directory/train-00000-of-01024
+  train_directory/train-00001-of-01024
+  ...
+  train_directory/train-00127-of-01024
+
+and
+
+  validation_directory/validation-00000-of-00128
+  validation_directory/validation-00001-of-00128
+  ...
+  validation_directory/validation-00127-of-00128
+
+Each validation TFRecord file contains ~390 records. Each training TFREcord
+file contains ~1250 records. Each record within the TFRecord file is a
+serialized Example proto. The Example proto contains the following fields:
+
+  image/encoded: string containing JPEG encoded image in RGB colorspace
+  image/height: integer, image height in pixels
+  image/width: integer, image width in pixels
+  image/colorspace: string, specifying the colorspace, always 'RGB'
+  image/channels: integer, specifying the number of channels, always 3
+  image/format: string, specifying the format, always'JPEG'
+
+  image/filename: string containing the basename of the image file
+            e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
+  image/class/label: integer specifying the index in a classification layer.
+    The label ranges from [1, 1000] where 0 is not used.
+  image/class/synset: string specifying the unique ID of the label,
+    e.g. 'n01440764'
+  image/class/text: string specifying the human-readable version of the label
+    e.g. 'red fox, Vulpes vulpes'
+
+  image/object/bbox/xmin: list of integers specifying the 0+ human annotated
+    bounding boxes
+  image/object/bbox/xmax: list of integers specifying the 0+ human annotated
+    bounding boxes
+  image/object/bbox/ymin: list of integers specifying the 0+ human annotated
+    bounding boxes
+  image/object/bbox/ymax: list of integers specifying the 0+ human annotated
+    bounding boxes
+  image/object/bbox/label: integer specifying the index in a classification
+    layer. The label ranges from [1, 1000] where 0 is not used. Note this is
+    always identical to the image label.
+
+Note that the length of xmin is identical to the length of xmax, ymin and ymax
+for each example.
+
+Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import os
+import random
+import sys
+import threading
+
+
+import numpy as np
+import tensorflow as tf
+
+tf.app.flags.DEFINE_string('train_directory', '/tmp/',
+                           'Training data directory')
+tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
+                           'Validation data directory')
+tf.app.flags.DEFINE_string('output_directory', '/tmp/',
+                           'Output data directory')
+
+tf.app.flags.DEFINE_integer('train_shards', 1024,
+                            'Number of shards in training TFRecord files.')
+tf.app.flags.DEFINE_integer('validation_shards', 128,
+                            'Number of shards in validation TFRecord files.')
+
+tf.app.flags.DEFINE_integer('num_threads', 8,
+                            'Number of threads to preprocess the images.')
+
+# The labels file contains a list of valid labels are held in this file.
+# Assumes that the file contains entries as such:
+#   n01440764
+#   n01443537
+#   n01484850
+# where each line corresponds to a label expressed as a synset. We map
+# each synset contained in the file to an integer (based on the alphabetical
+# ordering). See below for details.
+tf.app.flags.DEFINE_string('labels_file',
+                           'imagenet_lsvrc_2015_synsets.txt',
+                           'Labels file')
+
+# This file containing mapping from synset to human-readable label.
+# Assumes each line of the file looks like:
+#
+#   n02119247    black fox
+#   n02119359    silver fox
+#   n02119477    red fox, Vulpes fulva
+#
+# where each line corresponds to a unique mapping. Note that each line is
+# formatted as <synset>\t<human readable label>.
+tf.app.flags.DEFINE_string('imagenet_metadata_file',
+                           'imagenet_metadata.txt',
+                           'ImageNet metadata file')
+
+# This file is the output of process_bounding_box.py
+# Assumes each line of the file looks like:
+#
+#   n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
+#
+# where each line corresponds to one bounding box annotation associated
+# with an image. Each line can be parsed as:
+#
+#   <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
+#
+# Note that there might exist mulitple bounding box annotations associated
+# with an image file.
+tf.app.flags.DEFINE_string('bounding_box_file',
+                           './imagenet_2012_bounding_boxes.csv',
+                           'Bounding box file')
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def _int64_feature(value):
+  """Wrapper for inserting int64 features into Example proto."""
+  if not isinstance(value, list):
+    value = [value]
+  return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
+
+
+def _float_feature(value):
+  """Wrapper for inserting float features into Example proto."""
+  if not isinstance(value, list):
+    value = [value]
+  return tf.train.Feature(float_list=tf.train.FloatList(value=value))
+
+
+def _bytes_feature(value):
+  """Wrapper for inserting bytes features into Example proto."""
+  return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
+
+
+def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
+                        height, width):
+  """Build an Example proto for an example.
+
+  Args:
+    filename: string, path to an image file, e.g., '/path/to/example.JPG'
+    image_buffer: string, JPEG encoding of RGB image
+    label: integer, identifier for the ground truth for the network
+    synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
+    human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
+    bbox: list of bounding boxes; each box is a list of integers
+      specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
+      the same label as the image label.
+    height: integer, image height in pixels
+    width: integer, image width in pixels
+  Returns:
+    Example proto
+  """
+  xmin = []
+  ymin = []
+  xmax = []
+  ymax = []
+  for b in bbox:
+    assert len(b) == 4
+    # pylint: disable=expression-not-assigned
+    [l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
+    # pylint: enable=expression-not-assigned
+
+  colorspace = 'RGB'
+  channels = 3
+  image_format = 'JPEG'
+
+  example = tf.train.Example(features=tf.train.Features(feature={
+      'image/height': _int64_feature(height),
+      'image/width': _int64_feature(width),
+      'image/colorspace': _bytes_feature(colorspace),
+      'image/channels': _int64_feature(channels),
+      'image/class/label': _int64_feature(label),
+      'image/class/synset': _bytes_feature(synset),
+      'image/class/text': _bytes_feature(human),
+      'image/object/bbox/xmin': _float_feature(xmin),
+      'image/object/bbox/xmax': _float_feature(xmax),
+      'image/object/bbox/ymin': _float_feature(ymin),
+      'image/object/bbox/ymax': _float_feature(ymax),
+      'image/object/bbox/label': _int64_feature([label] * len(xmin)),
+      'image/format': _bytes_feature(image_format),
+      'image/filename': _bytes_feature(os.path.basename(filename)),
+      'image/encoded': _bytes_feature(image_buffer)}))
+  return example
+
+
+class ImageCoder(object):
+  """Helper class that provides TensorFlow image coding utilities."""
+
+  def __init__(self):
+    # Create a single Session to run all image coding calls.
+    self._sess = tf.Session()
+
+    # Initializes function that converts PNG to JPEG data.
+    self._png_data = tf.placeholder(dtype=tf.string)
+    image = tf.image.decode_png(self._png_data, channels=3)
+    self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
+
+    # Initializes function that converts CMYK JPEG data to RGB JPEG data.
+    self._cmyk_data = tf.placeholder(dtype=tf.string)
+    image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
+    self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
+
+    # Initializes function that decodes RGB JPEG data.
+    self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
+    self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
+
+  def png_to_jpeg(self, image_data):
+    return self._sess.run(self._png_to_jpeg,
+                          feed_dict={self._png_data: image_data})
+
+  def cmyk_to_rgb(self, image_data):
+    return self._sess.run(self._cmyk_to_rgb,
+                          feed_dict={self._cmyk_data: image_data})
+
+  def decode_jpeg(self, image_data):
+    image = self._sess.run(self._decode_jpeg,
+                           feed_dict={self._decode_jpeg_data: image_data})
+    assert len(image.shape) == 3
+    assert image.shape[2] == 3
+    return image
+
+
+def _is_png(filename):
+  """Determine if a file contains a PNG format image.
+
+  Args:
+    filename: string, path of the image file.
+
+  Returns:
+    boolean indicating if the image is a PNG.
+  """
+  # File list from:
+  # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
+  return 'n02105855_2933.JPEG' in filename
+
+
+def _is_cmyk(filename):
+  """Determine if file contains a CMYK JPEG format image.
+
+  Args:
+    filename: string, path of the image file.
+
+  Returns:
+    boolean indicating if the image is a JPEG encoded with CMYK color space.
+  """
+  # File list from:
+  # https://github.com/cytsai/ilsvrc-cmyk-image-list
+  blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
+               'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
+               'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
+               'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
+               'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
+               'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
+               'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
+               'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
+               'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
+               'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
+               'n07583066_647.JPEG', 'n13037406_4650.JPEG']
+  return filename.split('/')[-1] in blacklist
+
+
+def _process_image(filename, coder):
+  """Process a single image file.
+
+  Args:
+    filename: string, path to an image file e.g., '/path/to/example.JPG'.
+    coder: instance of ImageCoder to provide TensorFlow image coding utils.
+  Returns:
+    image_buffer: string, JPEG encoding of RGB image.
+    height: integer, image height in pixels.
+    width: integer, image width in pixels.
+  """
+  # Read the image file.
+  image_data = tf.gfile.FastGFile(filename, 'r').read()
+
+  # Clean the dirty data.
+  if _is_png(filename):
+    # 1 image is a PNG.
+    print('Converting PNG to JPEG for %s' % filename)
+    image_data = coder.png_to_jpeg(image_data)
+  elif _is_cmyk(filename):
+    # 22 JPEG images are in CMYK colorspace.
+    print('Converting CMYK to RGB for %s' % filename)
+    image_data = coder.cmyk_to_rgb(image_data)
+
+  # Decode the RGB JPEG.
+  image = coder.decode_jpeg(image_data)
+
+  # Check that image converted to RGB
+  assert len(image.shape) == 3
+  height = image.shape[0]
+  width = image.shape[1]
+  assert image.shape[2] == 3
+
+  return image_data, height, width
+
+
+def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
+                               synsets, labels, humans, bboxes, num_shards):
+  """Processes and saves list of images as TFRecord in 1 thread.
+
+  Args:
+    coder: instance of ImageCoder to provide TensorFlow image coding utils.
+    thread_index: integer, unique batch to run index is within [0, len(ranges)).
+    ranges: list of pairs of integers specifying ranges of each batches to
+      analyze in parallel.
+    name: string, unique identifier specifying the data set
+    filenames: list of strings; each string is a path to an image file
+    synsets: list of strings; each string is a unique WordNet ID
+    labels: list of integer; each integer identifies the ground truth
+    humans: list of strings; each string is a human-readable label
+    bboxes: list of bounding boxes for each image. Note that each entry in this
+      list might contain from 0+ entries corresponding to the number of bounding
+      box annotations for the image.
+    num_shards: integer number of shards for this data set.
+  """
+  # Each thread produces N shards where N = int(num_shards / num_threads).
+  # For instance, if num_shards = 128, and the num_threads = 2, then the first
+  # thread would produce shards [0, 64).
+  num_threads = len(ranges)
+  assert not num_shards % num_threads
+  num_shards_per_batch = int(num_shards / num_threads)
+
+  shard_ranges = np.linspace(ranges[thread_index][0],
+                             ranges[thread_index][1],
+                             num_shards_per_batch + 1).astype(int)
+  num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
+
+  counter = 0
+  for s in xrange(num_shards_per_batch):
+    # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
+    shard = thread_index * num_shards_per_batch + s
+    output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
+    output_file = os.path.join(FLAGS.output_directory, output_filename)
+    writer = tf.python_io.TFRecordWriter(output_file)
+
+    shard_counter = 0
+    files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
+    for i in files_in_shard:
+      filename = filenames[i]
+      label = labels[i]
+      synset = synsets[i]
+      human = humans[i]
+      bbox = bboxes[i]
+
+      image_buffer, height, width = _process_image(filename, coder)
+
+      example = _convert_to_example(filename, image_buffer, label,
+                                    synset, human, bbox,
+                                    height, width)
+      writer.write(example.SerializeToString())
+      shard_counter += 1
+      counter += 1
+
+      if not counter % 1000:
+        print('%s [thread %d]: Processed %d of %d images in thread batch.' %
+              (datetime.now(), thread_index, counter, num_files_in_thread))
+        sys.stdout.flush()
+
+    print('%s [thread %d]: Wrote %d images to %s' %
+          (datetime.now(), thread_index, shard_counter, output_file))
+    sys.stdout.flush()
+    shard_counter = 0
+  print('%s [thread %d]: Wrote %d images to %d shards.' %
+        (datetime.now(), thread_index, counter, num_files_in_thread))
+  sys.stdout.flush()
+
+
+def _process_image_files(name, filenames, synsets, labels, humans,
+                         bboxes, num_shards):
+  """Process and save list of images as TFRecord of Example protos.
+
+  Args:
+    name: string, unique identifier specifying the data set
+    filenames: list of strings; each string is a path to an image file
+    synsets: list of strings; each string is a unique WordNet ID
+    labels: list of integer; each integer identifies the ground truth
+    humans: list of strings; each string is a human-readable label
+    bboxes: list of bounding boxes for each image. Note that each entry in this
+      list might contain from 0+ entries corresponding to the number of bounding
+      box annotations for the image.
+    num_shards: integer number of shards for this data set.
+  """
+  assert len(filenames) == len(synsets)
+  assert len(filenames) == len(labels)
+  assert len(filenames) == len(humans)
+  assert len(filenames) == len(bboxes)
+
+  # Break all images into batches with a [ranges[i][0], ranges[i][1]].
+  spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
+  ranges = []
+  threads = []
+  for i in xrange(len(spacing) - 1):
+    ranges.append([spacing[i], spacing[i+1]])
+
+  # Launch a thread for each batch.
+  print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
+  sys.stdout.flush()
+
+  # Create a mechanism for monitoring when all threads are finished.
+  coord = tf.train.Coordinator()
+
+  # Create a generic TensorFlow-based utility for converting all image codings.
+  coder = ImageCoder()
+
+  threads = []
+  for thread_index in xrange(len(ranges)):
+    args = (coder, thread_index, ranges, name, filenames,
+            synsets, labels, humans, bboxes, num_shards)
+    t = threading.Thread(target=_process_image_files_batch, args=args)
+    t.start()
+    threads.append(t)
+
+  # Wait for all the threads to terminate.
+  coord.join(threads)
+  print('%s: Finished writing all %d images in data set.' %
+        (datetime.now(), len(filenames)))
+  sys.stdout.flush()
+
+
+def _find_image_files(data_dir, labels_file):
+  """Build a list of all images files and labels in the data set.
+
+  Args:
+    data_dir: string, path to the root directory of images.
+
+      Assumes that the ImageNet data set resides in JPEG files located in
+      the following directory structure.
+
+        data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
+        data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
+
+      where 'n01440764' is the unique synset label associated with these images.
+
+    labels_file: string, path to the labels file.
+
+      The list of valid labels are held in this file. Assumes that the file
+      contains entries as such:
+        n01440764
+        n01443537
+        n01484850
+      where each line corresponds to a label expressed as a synset. We map
+      each synset contained in the file to an integer (based on the alphabetical
+      ordering) starting with the integer 1 corresponding to the synset
+      contained in the first line.
+
+      The reason we start the integer labels at 1 is to reserve label 0 as an
+      unused background class.
+
+  Returns:
+    filenames: list of strings; each string is a path to an image file.
+    synsets: list of strings; each string is a unique WordNet ID.
+    labels: list of integer; each integer identifies the ground truth.
+  """
+  print('Determining list of input files and labels from %s.' % data_dir)
+  challenge_synsets = [l.strip() for l in
+                       tf.gfile.FastGFile(labels_file, 'r').readlines()]
+
+  labels = []
+  filenames = []
+  synsets = []
+
+  # Leave label index 0 empty as a background class.
+  label_index = 1
+
+  # Construct the list of JPEG files and labels.
+  for synset in challenge_synsets:
+    jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
+    matching_files = tf.gfile.Glob(jpeg_file_path)
+
+    labels.extend([label_index] * len(matching_files))
+    synsets.extend([synset] * len(matching_files))
+    filenames.extend(matching_files)
+
+    if not label_index % 100:
+      print('Finished finding files in %d of %d classes.' % (
+          label_index, len(challenge_synsets)))
+    label_index += 1
+
+  # Shuffle the ordering of all image files in order to guarantee
+  # random ordering of the images with respect to label in the
+  # saved TFRecord files. Make the randomization repeatable.
+  shuffled_index = range(len(filenames))
+  random.seed(12345)
+  random.shuffle(shuffled_index)
+
+  filenames = [filenames[i] for i in shuffled_index]
+  synsets = [synsets[i] for i in shuffled_index]
+  labels = [labels[i] for i in shuffled_index]
+
+  print('Found %d JPEG files across %d labels inside %s.' %
+        (len(filenames), len(challenge_synsets), data_dir))
+  return filenames, synsets, labels
+
+
+def _find_human_readable_labels(synsets, synset_to_human):
+  """Build a list of human-readable labels.
+
+  Args:
+    synsets: list of strings; each string is a unique WordNet ID.
+    synset_to_human: dict of synset to human labels, e.g.,
+      'n02119022' --> 'red fox, Vulpes vulpes'
+
+  Returns:
+    List of human-readable strings corresponding to each synset.
+  """
+  humans = []
+  for s in synsets:
+    assert s in synset_to_human, ('Failed to find: %s' % s)
+    humans.append(synset_to_human[s])
+  return humans
+
+
+def _find_image_bounding_boxes(filenames, image_to_bboxes):
+  """Find the bounding boxes for a given image file.
+
+  Args:
+    filenames: list of strings; each string is a path to an image file.
+    image_to_bboxes: dictionary mapping image file names to a list of
+      bounding boxes. This list contains 0+ bounding boxes.
+  Returns:
+    List of bounding boxes for each image. Note that each entry in this
+    list might contain from 0+ entries corresponding to the number of bounding
+    box annotations for the image.
+  """
+  num_image_bbox = 0
+  bboxes = []
+  for f in filenames:
+    basename = os.path.basename(f)
+    if basename in image_to_bboxes:
+      bboxes.append(image_to_bboxes[basename])
+      num_image_bbox += 1
+    else:
+      bboxes.append([])
+  print('Found %d images with bboxes out of %d images' % (
+      num_image_bbox, len(filenames)))
+  return bboxes
+
+
+def _process_dataset(name, directory, num_shards, synset_to_human,
+                     image_to_bboxes):
+  """Process a complete data set and save it as a TFRecord.
+
+  Args:
+    name: string, unique identifier specifying the data set.
+    directory: string, root path to the data set.
+    num_shards: integer number of shards for this data set.
+    synset_to_human: dict of synset to human labels, e.g.,
+      'n02119022' --> 'red fox, Vulpes vulpes'
+    image_to_bboxes: dictionary mapping image file names to a list of
+      bounding boxes. This list contains 0+ bounding boxes.
+  """
+  filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
+  humans = _find_human_readable_labels(synsets, synset_to_human)
+  bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
+  _process_image_files(name, filenames, synsets, labels,
+                       humans, bboxes, num_shards)
+
+
+def _build_synset_lookup(imagenet_metadata_file):
+  """Build lookup for synset to human-readable label.
+
+  Args:
+    imagenet_metadata_file: string, path to file containing mapping from
+      synset to human-readable label.
+
+      Assumes each line of the file looks like:
+
+        n02119247    black fox
+        n02119359    silver fox
+        n02119477    red fox, Vulpes fulva
+
+      where each line corresponds to a unique mapping. Note that each line is
+      formatted as <synset>\t<human readable label>.
+
+  Returns:
+    Dictionary of synset to human labels, such as:
+      'n02119022' --> 'red fox, Vulpes vulpes'
+  """
+  lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
+  synset_to_human = {}
+  for l in lines:
+    if l:
+      parts = l.strip().split('\t')
+      assert len(parts) == 2
+      synset = parts[0]
+      human = parts[1]
+      synset_to_human[synset] = human
+  return synset_to_human
+
+
+def _build_bounding_box_lookup(bounding_box_file):
+  """Build a lookup from image file to bounding boxes.
+
+  Args:
+    bounding_box_file: string, path to file with bounding boxes annotations.
+
+      Assumes each line of the file looks like:
+
+        n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
+
+      where each line corresponds to one bounding box annotation associated
+      with an image. Each line can be parsed as:
+
+        <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
+
+      Note that there might exist mulitple bounding box annotations associated
+      with an image file. This file is the output of process_bounding_boxes.py.
+
+  Returns:
+    Dictionary mapping image file names to a list of bounding boxes. This list
+    contains 0+ bounding boxes.
+  """
+  lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
+  images_to_bboxes = {}
+  num_bbox = 0
+  num_image = 0
+  for l in lines:
+    if l:
+      parts = l.split(',')
+      assert len(parts) == 5, ('Failed to parse: %s' % l)
+      filename = parts[0]
+      xmin = float(parts[1])
+      ymin = float(parts[2])
+      xmax = float(parts[3])
+      ymax = float(parts[4])
+      box = [xmin, ymin, xmax, ymax]
+
+      if filename not in images_to_bboxes:
+        images_to_bboxes[filename] = []
+        num_image += 1
+      images_to_bboxes[filename].append(box)
+      num_bbox += 1
+
+  print('Successfully read %d bounding boxes '
+        'across %d images.' % (num_bbox, num_image))
+  return images_to_bboxes
+
+
+def main(unused_argv):
+  assert not FLAGS.train_shards % FLAGS.num_threads, (
+      'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
+  assert not FLAGS.validation_shards % FLAGS.num_threads, (
+      'Please make the FLAGS.num_threads commensurate with '
+      'FLAGS.validation_shards')
+  print('Saving results to %s' % FLAGS.output_directory)
+
+  # Build a map from synset to human-readable label.
+  synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
+  image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
+
+  # Run it!
+  _process_dataset('validation', FLAGS.validation_directory,
+                   FLAGS.validation_shards, synset_to_human, image_to_bboxes)
+  _process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
+                   synset_to_human, image_to_bboxes)
+
+
+if __name__ == '__main__':
+  tf.app.run()

+ 96 - 0
inception/data/download_and_preprocess_flowers.sh

@@ -0,0 +1,96 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# Script to download and preprocess the flowers data set. This data set
+# provides a demonstration for how to perform fine-tuning (i.e. tranfer
+# learning) from one model to a new data set.
+#
+# This script provides a demonstration for how to prepare an arbitrary
+# data set for training an Inception v3 model.
+#
+# We demonstrate this with the flowers data set which consists of images
+# of labeled flower images from 5 classes:
+#
+# daisy, dandelion, roses, sunflowers, tulips
+#
+# The final output of this script are sharded TFRecord files containing
+# serialized Example protocol buffers. See build_image_data.py for
+# details of how the Example protocol buffer contains image data.
+#
+# usage:
+#  ./download_and_preprocess_flowers.sh [data-dir]
+set -e
+
+if [ -z "$1" ]; then
+  echo "usage download_and_preprocess_flowers.sh [data dir]"
+  exit
+fi
+
+# Create the output and temporary directories.
+DATA_DIR="${1%/}"
+SCRATCH_DIR="${DATA_DIR}/raw-data/"
+mkdir -p "${DATA_DIR}"
+mkdir -p "${SCRATCH_DIR}"
+WORK_DIR="$0.runfiles/inception"
+
+# Download the flowers data.
+DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz"
+CURRENT_DIR=$(pwd)
+cd "${DATA_DIR}"
+TARBALL="flower_photos.tgz"
+if [ ! -f ${TARBALL} ]; then
+  echo "Downloading flower data set."
+  wget -O ${TARBALL} "${DATA_URL}"
+else
+  echo "Skipping download of flower data."
+fi
+
+# Note the locations of the train and validation data.
+TRAIN_DIRECTORY="${SCRATCH_DIR}train/"
+VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/"
+
+# Expands the data into the flower_photos/ directory and rename it as the
+# train directory.
+tar xf flower_photos.tgz
+rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}"
+mv flower_photos "${TRAIN_DIRECTORY}"
+
+# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips
+LABELS_FILE="${SCRATCH_DIR}/labels.txt"
+ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}"
+
+# Generate the validation data set.
+while read LABEL; do
+  VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}${LABEL}"
+  TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}${LABEL}"
+
+  # Move the first randomly selected 100 images to the validation set.
+  mkdir -p "${VALIDATION_DIR_FOR_LABEL}"
+  VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | shuf | head -100)
+  for IMAGE in ${VALIDATION_IMAGES}; do
+    mv -f "${TRAIN_DIRECTORY}${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}"
+  done
+done < "${LABELS_FILE}"
+
+# Build the TFRecords version of the image data.
+cd "${CURRENT_DIR}"
+BUILD_SCRIPT="${WORK_DIR}/build_image_data"
+OUTPUT_DIRECTORY="${DATA_DIR}"
+"${BUILD_SCRIPT}" \
+  --train_directory="${TRAIN_DIRECTORY}" \
+  --validation_directory="${VALIDATION_DIRECTORY}" \
+  --output_directory="${OUTPUT_DIRECTORY}" \
+  --labels_file="${LABELS_FILE}"

+ 101 - 0
inception/data/download_and_preprocess_imagenet.sh

@@ -0,0 +1,101 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# Script to download and preprocess ImageNet Challenge 2012
+# training and validation data set.
+#
+# The final output of this script are sharded TFRecord files containing
+# serialized Example protocol buffers. See build_imagenet_data.py for
+# details of how the Example protocol buffers contain the ImageNet data.
+#
+# The final output of this script appears as such:
+#
+#   data_dir/train-00000-of-01024
+#   data_dir/train-00001-of-01024
+#    ...
+#   data_dir/train-00127-of-01024
+#
+# and
+#
+#   data_dir/validation-00000-of-00128
+#   data_dir/validation-00001-of-00128
+#   ...
+#   data_dir/validation-00127-of-00128
+#
+# Note that this script may take several hours to run to completion. The
+# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending
+# on the speed of your machine. Please be patient.
+#
+# **IMPORTANT**
+# To download the raw images, the user must create an account with image-net.org
+# and generate a username and access_key. The latter two are required for
+# downloading the raw images.
+#
+# usage:
+#  ./download_and_preprocess_imagenet.sh [data-dir]
+set -e
+
+if [ -z "$1" ]; then
+  echo "usage download_and_preprocess_imagenet.sh [data dir]"
+  exit
+fi
+
+# Create the output and temporary directories.
+DATA_DIR="${1%/}"
+SCRATCH_DIR="${DATA_DIR}/raw-data/"
+mkdir -p "${DATA_DIR}"
+mkdir -p "${SCRATCH_DIR}"
+WORK_DIR="$0.runfiles/inception"
+
+# Download the ImageNet data.
+LABELS_FILE="${WORK_DIR}/data/imagenet_lsvrc_2015_synsets.txt"
+DOWNLOAD_SCRIPT="${WORK_DIR}/data/download_imagenet.sh"
+"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}"
+
+# Note the locations of the train and validation data.
+TRAIN_DIRECTORY="${SCRATCH_DIR}train/"
+VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/"
+
+# Preprocess the validation data by moving the images into the appropriate
+# sub-directory based on the label (synset) of the image.
+echo "Organizing the validation data into sub-directories."
+PREPROCESS_VAL_SCRIPT="${WORK_DIR}/data/preprocess_imagenet_validation_data.py"
+VAL_LABELS_FILE="${WORK_DIR}/data/imagenet_2012_validation_synset_labels.txt"
+
+"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}"
+
+# Convert the XML files for bounding box annotations into a single CSV.
+echo "Extracting bounding box information from XML."
+BOUNDING_BOX_SCRIPT="${WORK_DIR}/data/process_bounding_boxes.py"
+BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv"
+BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/"
+
+"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \
+ | sort >"${BOUNDING_BOX_FILE}"
+echo "Finished downloading and preprocessing the ImageNet data."
+
+# Build the TFRecords version of the ImageNet data.
+BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data"
+OUTPUT_DIRECTORY="${DATA_DIR}"
+IMAGENET_METADATA_FILE="${WORK_DIR}/data/imagenet_metadata.txt"
+
+"${BUILD_SCRIPT}" \
+  --train_directory="${TRAIN_DIRECTORY}" \
+  --validation_directory="${VALIDATION_DIRECTORY}" \
+  --output_directory="${OUTPUT_DIRECTORY}" \
+  --imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \
+  --labels_file="${LABELS_FILE}" \
+  --bounding_box_file="${BOUNDING_BOX_FILE}"

+ 100 - 0
inception/data/download_imagenet.sh

@@ -0,0 +1,100 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# Script to download ImageNet Challenge 2012 training and validation data set.
+#
+# Downloads and decompresses raw images and bounding boxes.
+#
+# **IMPORTANT**
+# To download the raw images, the user must create an account with image-net.org
+# and generate a username and access_key. The latter two are required for
+# downloading the raw images.
+#
+# usage:
+#  ./download_imagenet.sh [dirname]
+set -e
+
+if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then
+  cat <<END
+In order to download the imagenet data, you have to create an account with
+image-net.org. This will get you a username and an access key. You can set the
+IMAGENET_USERNAME and IMAGENET_ACCESS_KEY environment variables, or you can
+enter the credentials here.
+END
+  read -p "Username: " IMAGENET_USERNAME
+  read -p "Access key: " IMAGENET_ACCESS_KEY
+fi
+
+OUTDIR="${1:-./imagenet-data}"
+SYNSETS_FILE="${2:-./synsets.txt}"
+SYNSETS_FILE="${PWD}/${SYNSETS_FILE}"
+
+echo "Saving downloaded files to $OUTDIR"
+mkdir -p "${OUTDIR}"
+CURRENT_DIR=$(pwd)
+BBOX_DIR="${OUTDIR}bounding_boxes"
+mkdir -p "${BBOX_DIR}"
+cd "${OUTDIR}"
+
+# Download and process all of the ImageNet bounding boxes.
+BASE_URL="http://www.image-net.org/challenges/LSVRC/2012/nonpub"
+
+# See here for details: http://www.image-net.org/download-bboxes
+BOUNDING_BOX_ANNOTATIONS="${BASE_URL}/ILSVRC2012_bbox_train_v2.tar.gz"
+BBOX_TAR_BALL="${BBOX_DIR}/annotations.tar.gz"
+echo "Downloading bounding box annotations."
+wget "${BOUNDING_BOX_ANNOTATIONS}" -O "${BBOX_TAR_BALL}"
+echo "Uncompressing bounding box annotations ..."
+tar xzf "${BBOX_TAR_BALL}" -C "${BBOX_DIR}"
+
+LABELS_ANNOTATED="${BBOX_DIR}/*"
+NUM_XML=$(ls -1 ${LABELS_ANNOTATED} | wc -l)
+echo "Identified ${NUM_XML} bounding box annotations."
+
+# Download and uncompress all images from the ImageNet 2012 validation dataset.
+VALIDATION_TARBALL="ILSVRC2012_img_val.tar"
+OUTPUT_PATH="${OUTDIR}validation/"
+mkdir -p "${OUTPUT_PATH}"
+cd "${OUTDIR}/.."
+echo "Downloading ${VALIDATION_TARBALL} to ${OUTPUT_PATH}."
+wget -nd -c "${BASE_URL}/${VALIDATION_TARBALL}"
+tar xf "${VALIDATION_TARBALL}" -C "${OUTPUT_PATH}"
+
+# Download all images from the ImageNet 2012 train dataset.
+TRAIN_TARBALL="ILSVRC2012_img_train.tar"
+OUTPUT_PATH="${OUTDIR}train/"
+mkdir -p "${OUTPUT_PATH}"
+cd "${OUTDIR}/.."
+echo "Downloading ${TRAIN_TARBALL} to ${OUTPUT_PATH}."
+wget -nd -c "${BASE_URL}/${TRAIN_TARBALL}"
+
+# Un-compress the individual tar-files within the train tar-file.
+echo "Uncompressing individual train tar-balls in the training data."
+
+while read SYNSET; do
+  echo "Processing: ${SYNSET}"
+
+  # Create a directory and delete anything there.
+  mkdir -p "${OUTPUT_PATH}/${SYNSET}"
+  rm -rf "${OUTPUT_PATH}/${SYNSET}/*"
+
+  # Uncompress into the directory.
+  tar xf "${TRAIN_TARBALL}" "${SYNSET}.tar"
+  tar xf "${SYNSET}.tar" -C "${OUTPUT_PATH}/${SYNSET}/"
+  rm -f "${SYNSET}.tar"
+
+  echo "Finished processing: ${SYNSET}"
+done < "${SYNSETS_FILE}"

Plik diff jest za duży
+ 50000 - 0
inception/data/imagenet_2012_validation_synset_labels.txt


Plik diff jest za duży
+ 1000 - 0
inception/data/imagenet_lsvrc_2015_synsets.txt


Plik diff jest za duży
+ 21842 - 0
inception/data/imagenet_metadata.txt


+ 82 - 0
inception/data/preprocess_imagenet_validation_data.py

@@ -0,0 +1,82 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
+
+Associate the ImageNet 2012 Challenge validation data set with labels.
+
+The raw ImageNet validation data set is expected to reside in JPEG files
+located in the following directory structure.
+
+ data_dir/ILSVRC2012_val_00000001.JPEG
+ data_dir/ILSVRC2012_val_00000002.JPEG
+ ...
+ data_dir/ILSVRC2012_val_00050000.JPEG
+
+This script moves the files into a directory structure like such:
+ data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
+ data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
+ ...
+where 'n01440764' is the unique synset label associated with
+these images.
+
+This directory reorganization requires a mapping from validation image
+number (i.e. suffix of the original file) to the associated label. This
+is provided in the ImageNet development kit via a Matlab file.
+
+In order to make life easier and divorce ourselves from Matlab, we instead
+supply a custom text file that provides this mapping for us.
+
+Sample usage:
+  ./preprocess_imagenet_validation_data.py ILSVRC2012_img_val \
+  imagenet_2012_validation_synset_labels.txt
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import os.path
+import sys
+
+
+if __name__ == '__main__':
+  if len(sys.argv) < 3:
+    print('Invalid usage\n'
+          'usage: preprocess_imagenet_validation_data.py '
+          '<validation data dir> <validation labels file>')
+    sys.exit(-1)
+  data_dir = sys.argv[1]
+  validation_labels_file = sys.argv[2]
+
+  # Read in the 50000 synsets associated with the validation data set.
+  labels = [l.strip() for l in open(validation_labels_file).readlines()]
+  unique_labels = set(labels)
+
+  # Make all sub-directories in the validation data dir.
+  for label in unique_labels:
+    labeled_data_dir = os.path.join(data_dir, label)
+    os.makedirs(labeled_data_dir)
+
+  # Move all of the image to the appropriate sub-directory.
+  for i in xrange(len(labels)):
+    basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1)
+    original_filename = os.path.join(data_dir, basename)
+    if not os.path.exists(original_filename):
+      print('Failed to find: ' % original_filename)
+      sys.exit(-1)
+    new_filename = os.path.join(data_dir, labels[i], basename)
+    os.rename(original_filename, new_filename)

+ 252 - 0
inception/data/process_bounding_boxes.py

@@ -0,0 +1,252 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
+
+This script is called as
+
+process_bounding_boxes.py <dir> [synsets-file]
+
+Where <dir> is a directory containing the downloaded and unpacked bounding box
+data. If [synsets-file] is supplied, then only the bounding boxes whose
+synstes are contained within this file are returned. Note that the
+[synsets-file] file contains synset ids, one per line.
+
+The script dumps out a CSV text file in which each line contains an entry.
+  n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
+
+The entry can be read as:
+  <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
+
+The bounding box for <JPEG file name> contains two points (xmin, ymin) and
+(xmax, ymax) specifying the lower-left corner and upper-right corner of a
+bounding box in *relative* coordinates.
+
+The user supplies a directory where the XML files reside. The directory
+structure in the directory <dir> is assumed to look like this:
+
+<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
+
+Each XML file contains a bounding box annotation. The script:
+
+ (1) Parses the XML file and extracts the filename, label and bounding box info.
+
+ (2) The bounding box is specified in the XML files as integer (xmin, ymin) and
+    (xmax, ymax) *relative* to image size displayed to the human annotator. The
+    size of the image displayed to the human annotator is stored in the XML file
+    as integer (height, width).
+
+    Note that the displayed size will differ from the actual size of the image
+    downloaded from image-net.org. To make the bounding box annotation useable,
+    we convert bounding box to floating point numbers relative to displayed
+    height and width of the image.
+
+    Note that each XML file might contain N bounding box annotations.
+
+    Note that the points are all clamped at a range of [0.0, 1.0] because some
+    human annotations extend outside the range of the supplied image.
+
+    See details here: http://image-net.org/download-bboxes
+
+(3) By default, the script outputs all valid bounding boxes. If a
+    [synsets-file] is supplied, only the subset of bounding boxes associated
+    with those synsets are outputted. Importantly, one can supply a list of
+    synsets in the ImageNet Challenge and output the list of bounding boxes
+    associated with the training images of the ILSVRC.
+
+    We use these bounding boxes to inform the random distortion of images
+    supplied to the network.
+
+If you run this script successfully, you will see the following output
+to stderr:
+> Finished processing 544546 XML files.
+> Skipped 0 XML files not in ImageNet Challenge.
+> Skipped 0 bounding boxes not in ImageNet Challenge.
+> Wrote 615299 bounding boxes from 544546 annotated images.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import glob
+import os.path
+import sys
+import xml.etree.ElementTree as ET
+
+
+class BoundingBox(object):
+  pass
+
+
+def GetItem(name, root, index=0):
+  count = 0
+  for item in root.iter(name):
+    if count == index:
+      return item.text
+    count += 1
+  # Failed to find "index" occurrence of item.
+  return -1
+
+
+def GetInt(name, root, index=0):
+  return int(GetItem(name, root, index))
+
+
+def FindNumberBoundingBoxes(root):
+  index = 0
+  while True:
+    if GetInt('xmin', root, index) == -1:
+      break
+    index += 1
+  return index
+
+
+def ProcessXMLAnnotation(xml_file):
+  """Process a single XML file containing a bounding box."""
+  # pylint: disable=broad-except
+  try:
+    tree = ET.parse(xml_file)
+  except Exception:
+    print('Failed to parse: ' + xml_file, file=sys.stderr)
+    return None
+  # pylint: enable=broad-except
+  root = tree.getroot()
+
+  num_boxes = FindNumberBoundingBoxes(root)
+  boxes = []
+
+  for index in xrange(num_boxes):
+    box = BoundingBox()
+    # Grab the 'index' annotation.
+    box.xmin = GetInt('xmin', root, index)
+    box.ymin = GetInt('ymin', root, index)
+    box.xmax = GetInt('xmax', root, index)
+    box.ymax = GetInt('ymax', root, index)
+
+    box.width = GetInt('width', root)
+    box.height = GetInt('height', root)
+    box.filename = GetItem('filename', root) + '.JPEG'
+    box.label = GetItem('name', root)
+
+    xmin = float(box.xmin) / float(box.width)
+    xmax = float(box.xmax) / float(box.width)
+    ymin = float(box.ymin) / float(box.height)
+    ymax = float(box.ymax) / float(box.height)
+
+    # Some images contain bounding box annotations that
+    # extend outside of the supplied image. See, e.g.
+    # n03127925/n03127925_147.xml
+    # Additionally, for some bounding boxes, the min > max
+    # or the box is entirely outside of the image.
+    min_x = min(xmin, xmax)
+    max_x = max(xmin, xmax)
+    box.xmin_scaled = min(max(min_x, 0.0), 1.0)
+    box.xmax_scaled = min(max(max_x, 0.0), 1.0)
+
+    min_y = min(ymin, ymax)
+    max_y = max(ymin, ymax)
+    box.ymin_scaled = min(max(min_y, 0.0), 1.0)
+    box.ymax_scaled = min(max(max_y, 0.0), 1.0)
+
+    boxes.append(box)
+
+  return boxes
+
+if __name__ == '__main__':
+  if len(sys.argv) < 2 or len(sys.argv) > 3:
+    print('Invalid usage\n'
+          'usage: process_bounding_boxes.py <dir> [synsets-file]',
+          file=sys.stderr)
+    sys.exit(-1)
+
+  xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
+  print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
+        file=sys.stderr)
+
+  if len(sys.argv) == 3:
+    labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
+    print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
+          file=sys.stderr)
+  else:
+    labels = None
+
+  skipped_boxes = 0
+  skipped_files = 0
+  saved_boxes = 0
+  saved_files = 0
+  for file_index, one_file in enumerate(xml_files):
+    # Example: <...>/n06470073/n00141669_6790.xml
+    label = os.path.basename(os.path.dirname(one_file))
+
+    # Determine if the annotation is from an ImageNet Challenge label.
+    if labels is not None and label not in labels:
+      skipped_files += 1
+      continue
+
+    bboxes = ProcessXMLAnnotation(one_file)
+    assert bboxes is not None, 'No bounding boxes found in ' + one_file
+
+    found_box = False
+    for bbox in bboxes:
+      if labels is not None:
+        if bbox.label != label:
+          # Note: There is a slight bug in the bounding box annotation data.
+          # Many of the dog labels have the human label 'Scottish_deerhound'
+          # instead of the synset ID 'n02092002' in the bbox.label field. As a
+          # simple hack to overcome this issue, we only exclude bbox labels
+          # *which are synset ID's* that do not match original synset label for
+          # the XML file.
+          if bbox.label in labels:
+            skipped_boxes += 1
+            continue
+
+      # Guard against improperly specified boxes.
+      if (bbox.xmin_scaled >= bbox.xmax_scaled or
+          bbox.ymin_scaled >= bbox.ymax_scaled):
+        skipped_boxes += 1
+        continue
+
+      # Note bbox.filename occasionally contains '%s' in the name. This is
+      # data set noise that is fixed by just using the basename of the XML file.
+      image_filename = os.path.splitext(os.path.basename(one_file))[0]
+      print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
+            (image_filename,
+             bbox.xmin_scaled, bbox.ymin_scaled,
+             bbox.xmax_scaled, bbox.ymax_scaled))
+
+      saved_boxes += 1
+      found_box = True
+    if found_box:
+      saved_files += 1
+    else:
+      skipped_files += 1
+
+    if not file_index % 5000:
+      print('--> processed %d of %d XML files.' %
+            (file_index + 1, len(xml_files)),
+            file=sys.stderr)
+      print('--> skipped %d boxes and %d XML files.' %
+            (skipped_boxes, skipped_files), file=sys.stderr)
+
+  print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
+  print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
+        file=sys.stderr)
+  print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
+        file=sys.stderr)
+  print('Wrote %d bounding boxes from %d annotated images.' %
+        (saved_boxes, saved_files),
+        file=sys.stderr)
+  print('Finished.', file=sys.stderr)

+ 103 - 0
inception/dataset.py

@@ -0,0 +1,103 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Small library that points to a data set.
+
+Methods of Data class:
+  data_files: Returns a python list of all (sharded) data set files.
+  num_examples_per_epoch: Returns the number of examples in the data set.
+  num_classes: Returns the number of classes in the data set.
+  reader: Return a reader for a single entry from the data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from abc import ABCMeta
+from abc import abstractmethod
+import os
+
+
+import tensorflow as tf
+
+FLAGS = tf.app.flags.FLAGS
+
+# Basic model parameters.
+tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata',
+                           """Path to the processed data, i.e. """
+                           """TFRecord of Example protos.""")
+
+
+class Dataset(object):
+  """A simple class for handling data sets."""
+  __metaclass__ = ABCMeta
+
+  def __init__(self, name, subset):
+    """Initialize dataset using a subset and the path to the data."""
+    assert subset in self.available_subsets(), self.available_subsets()
+    self.name = name
+    self.subset = subset
+
+  @abstractmethod
+  def num_classes(self):
+    """Returns the number of classes in the data set."""
+    pass
+    # return 10
+
+  @abstractmethod
+  def num_examples_per_epoch(self):
+    """Returns the number of examples in the data subset."""
+    pass
+    # if self.subset == 'train':
+    #   return 10000
+    # if self.subset == 'validation':
+    #   return 1000
+
+  @abstractmethod
+  def download_message(self):
+    """Prints a download message for the Dataset."""
+    pass
+
+  def available_subsets(self):
+    """Returns the list of available subsets."""
+    return ['train', 'validation']
+
+  def data_files(self):
+    """Returns a python list of all (sharded) data subset files.
+
+    Returns:
+      python list of all (sharded) data set files.
+    Raises:
+      ValueError: if there are not data_files matching the subset.
+    """
+    tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)
+    data_files = tf.gfile.Glob(tf_record_pattern)
+    if not data_files:
+      print('No files found for dataset %s/%s at %s' % (self.name,
+                                                        self.subset,
+                                                        FLAGS.data_dir))
+
+      self.download_message()
+      exit(-1)
+    return data_files
+
+  def reader(self):
+    """Return a reader for a single entry from the data set.
+
+    See io_ops.py for details of Reader class.
+
+    Returns:
+      Reader object that reads the data set.
+    """
+    return tf.TFRecordReader()

+ 52 - 0
inception/flowers_data.py

@@ -0,0 +1,52 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Small library that points to the flowers data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+from inception.dataset import Dataset
+
+
+class FlowersData(Dataset):
+  """Flowers data set."""
+
+  def __init__(self, subset):
+    super(FlowersData, self).__init__('Flowers', subset)
+
+  def num_classes(self):
+    """Returns the number of classes in the data set."""
+    return 5
+
+  def num_examples_per_epoch(self):
+    """Returns the number of examples in the data subset."""
+    if self.subset == 'train':
+      return 3170
+    if self.subset == 'validation':
+      return 500
+
+  def download_message(self):
+    """Instruction to download and extract the tarball from Flowers website."""
+
+    print('Failed to find any Flowers %s files'% self.subset)
+    print('')
+    print('If you have already downloaded and processed the data, then make '
+          'sure to set --data_dir to point to the directory containing the '
+          'location of the sharded TFRecords.\n')
+    print('Please see README.md for instructions on how to build '
+          'the flowers dataset using download_and_preprocess_flowers.\n')

+ 40 - 0
inception/flowers_eval.py

@@ -0,0 +1,40 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A binary to evaluate Inception on the flowers data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception import inception_eval
+from inception.flowers_data import FlowersData
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def main(unused_argv=None):
+  dataset = FlowersData(subset=FLAGS.subset)
+  assert dataset.data_files()
+  if tf.gfile.Exists(FLAGS.eval_dir):
+    tf.gfile.DeleteRecursively(FLAGS.eval_dir)
+  tf.gfile.MakeDirs(FLAGS.eval_dir)
+  inception_eval.evaluate(dataset)
+
+
+if __name__ == '__main__':
+  tf.app.run()

+ 41 - 0
inception/flowers_train.py

@@ -0,0 +1,41 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A binary to train Inception on the flowers data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import tensorflow as tf
+
+from inception import inception_train
+from inception.flowers_data import FlowersData
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def main(_):
+  dataset = FlowersData(subset=FLAGS.subset)
+  assert dataset.data_files()
+  if tf.gfile.Exists(FLAGS.train_dir):
+    tf.gfile.DeleteRecursively(FLAGS.train_dir)
+  tf.gfile.MakeDirs(FLAGS.train_dir)
+  inception_train.train(dataset)
+
+
+if __name__ == '__main__':
+  tf.app.run()

BIN
inception/g3doc/inception_v3_architecture.png


+ 479 - 0
inception/image_processing.py

@@ -0,0 +1,479 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Read and preprocess image data.
+
+ Image processing occurs on a single image at a time. Image are read and
+ preprocessed in pararllel across mulitple threads. The resulting images
+ are concatenated together to form a single batch for training or evaluation.
+
+ -- Provide processed image data for a network:
+ inputs: Construct batches of evaluation examples of images.
+ distorted_inputs: Construct batches of training examples of images.
+ batch_inputs: Construct batches of training or evaluation examples of images.
+
+ -- Data processing:
+ parse_example_proto: Parses an Example proto containing a training example
+   of an image.
+
+ -- Image decoding:
+ decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
+
+ -- Image preprocessing:
+ image_preprocessing: Decode and preprocess one image for evaluation or training
+ distort_image: Distort one image for training a network.
+ eval_image: Prepare one image for evaluation.
+ distort_color: Distort the color in one image for training.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_integer('batch_size', 32,
+                            """Number of images to process in a batch.""")
+tf.app.flags.DEFINE_integer('image_size', 299,
+                            """Provide square images of this size.""")
+tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
+                            """Number of preprocessing threads per tower. """
+                            """Please make this a multiple of 4.""")
+
+# Images are preprocessed asynchronously using multiple threads specifed by
+# --num_preprocss_threads and the resulting processed images are stored in a
+# random shuffling queue. The shuffling queue dequeues --batch_size images
+# for processing on a given Inception tower. A larger shuffling queue guarantees
+# better mixing across examples within a batch and results in slightly higher
+# predictive performance in a trained model. Empirically,
+# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
+# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
+# 16GB. If the machine is memory limited, then decrease this factor to
+# decrease the CPU memory footprint, accordingly.
+tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
+                            """Size of the queue of preprocessed images. """
+                            """Default is ideal but try smaller values, e.g. """
+                            """4, 2 or 1, if host memory is constrained. See """
+                            """comments in code for more details.""")
+
+
+def inputs(dataset, batch_size=None, num_preprocess_threads=None):
+  """Generate batches of ImageNet images for evaluation.
+
+  Use this function as the inputs for evaluating a network.
+
+  Note that some (minimal) image preprocessing occurs during evaluation
+  including central cropping and resizing of the image to fit the network.
+
+  Args:
+    dataset: instance of Dataset class specifying the dataset.
+    batch_size: integer, number of examples in batch
+    num_preprocess_threads: integer, total number of preprocessing threads but
+      None defaults to FLAGS.num_preprocess_threads.
+
+  Returns:
+    images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
+                                       image_size, 3].
+    labels: 1-D integer Tensor of [FLAGS.batch_size].
+  """
+  if not batch_size:
+    batch_size = FLAGS.batch_size
+
+  # Force all input processing onto CPU in order to reserve the GPU for
+  # the forward inference and back-propagation.
+  with tf.device('/cpu:0'):
+    images, labels = batch_inputs(
+        dataset, batch_size, train=False,
+        num_preprocess_threads=num_preprocess_threads)
+
+  return images, labels
+
+
+def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
+  """Generate batches of distorted versions of ImageNet images.
+
+  Use this function as the inputs for training a network.
+
+  Distorting images provides a useful technique for augmenting the data
+  set during training in order to make the network invariant to aspects
+  of the image that do not effect the label.
+
+  Args:
+    dataset: instance of Dataset class specifying the dataset.
+    batch_size: integer, number of examples in batch
+    num_preprocess_threads: integer, total number of preprocessing threads but
+      None defaults to FLAGS.num_preprocess_threads.
+
+  Returns:
+    images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
+                                       FLAGS.image_size, 3].
+    labels: 1-D integer Tensor of [batch_size].
+  """
+  if not batch_size:
+    batch_size = FLAGS.batch_size
+
+  # Force all input processing onto CPU in order to reserve the GPU for
+  # the forward inference and back-propagation.
+  with tf.device('/cpu:0'):
+    images, labels = batch_inputs(
+        dataset, batch_size, train=True,
+        num_preprocess_threads=num_preprocess_threads)
+  return images, labels
+
+
+def decode_jpeg(image_buffer, scope=None):
+  """Decode a JPEG string into one 3-D float image Tensor.
+
+  Args:
+    image_buffer: scalar string Tensor.
+    scope: Optional scope for op_scope.
+  Returns:
+    3-D float Tensor with values ranging from [0, 1).
+  """
+  with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
+    # Decode the string as an RGB JPEG.
+    # Note that the resulting image contains an unknown height and width
+    # that is set dynamically by decode_jpeg. In other words, the height
+    # and width of image is unknown at compile-time.
+    image = tf.image.decode_jpeg(image_buffer, channels=3)
+
+    # After this point, all image pixels reside in [0,1)
+    # until the very end, when they're rescaled to (-1, 1).  The various
+    # adjust_* ops all require this range for dtype float.
+    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
+    return image
+
+
+def distort_color(image, thread_id=0, scope=None):
+  """Distort the color of the image.
+
+  Each color distortion is non-commutative and thus ordering of the color ops
+  matters. Ideally we would randomly permute the ordering of the color ops.
+  Rather then adding that level of complication, we select a distinct ordering
+  of color ops for each preprocessing thread.
+
+  Args:
+    image: Tensor containing single image.
+    thread_id: preprocessing thread ID.
+    scope: Optional scope for op_scope.
+  Returns:
+    color-distorted image
+  """
+  with tf.op_scope([image], scope, 'distort_color'):
+    color_ordering = thread_id % 2
+
+    if color_ordering == 0:
+      image = tf.image.random_brightness(image, max_delta=32. / 255.)
+      image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
+      image = tf.image.random_hue(image, max_delta=0.2)
+      image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
+    elif color_ordering == 1:
+      image = tf.image.random_brightness(image, max_delta=32. / 255.)
+      image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
+      image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
+      image = tf.image.random_hue(image, max_delta=0.2)
+
+    # The random_* ops do not necessarily clamp.
+    image = tf.clip_by_value(image, 0.0, 1.0)
+    return image
+
+
+def distort_image(image, height, width, bbox, thread_id=0, scope=None):
+  """Distort one image for training a network.
+
+  Distorting images provides a useful technique for augmenting the data
+  set during training in order to make the network invariant to aspects
+  of the image that do not effect the label.
+
+  Args:
+    image: 3-D float Tensor of image
+    height: integer
+    width: integer
+    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+      where each coordinate is [0, 1) and the coordinates are arranged
+      as [ymin, xmin, ymax, xmax].
+    thread_id: integer indicating the preprocessing thread.
+    scope: Optional scope for op_scope.
+  Returns:
+    3-D float Tensor of distorted image used for training.
+  """
+  with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
+    # Each bounding box has shape [1, num_boxes, box coords] and
+    # the coordinates are ordered [ymin, xmin, ymax, xmax].
+
+    # Display the bounding box in the first thread only.
+    if not thread_id:
+      image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+                                                    bbox)
+      tf.image_summary('image_with_bounding_boxes', image_with_box)
+
+  # A large fraction of image datasets contain a human-annotated bounding
+  # box delineating the region of the image containing the object of interest.
+  # We choose to create a new bounding box for the object which is a randomly
+  # distorted version of the human-annotated bounding box that obeys an allowed
+  # range of aspect ratios, sizes and overlap with the human-annotated
+  # bounding box. If no box is supplied, then we assume the bounding box is
+  # the entire image.
+    sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
+        tf.shape(image),
+        bounding_boxes=bbox,
+        min_object_covered=0.1,
+        aspect_ratio_range=[0.75, 1.33],
+        area_range=[0.05, 1.0],
+        max_attempts=100,
+        use_image_if_no_bounding_boxes=True)
+    bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
+    if not thread_id:
+      image_with_distorted_box = tf.image.draw_bounding_boxes(
+          tf.expand_dims(image, 0), distort_bbox)
+      tf.image_summary('images_with_distorted_bounding_box',
+                       image_with_distorted_box)
+
+    # Crop the image to the specified bounding box.
+    distorted_image = tf.slice(image, bbox_begin, bbox_size)
+
+    # This resizing operation may distort the images because the aspect
+    # ratio is not respected. We select a resize method in a round robin
+    # fashion based on the thread number.
+    # Note that ResizeMethod contains 4 enumerated resizing methods.
+    resize_method = thread_id % 4
+    distorted_image = tf.image.resize_images(distorted_image, height, width,
+                                             resize_method)
+    # Restore the shape since the dynamic slice based upon the bbox_size loses
+    # the third dimension.
+    distorted_image.set_shape([height, width, 3])
+    if not thread_id:
+      tf.image_summary('cropped_resized_image',
+                       tf.expand_dims(distorted_image, 0))
+
+    # Randomly flip the image horizontally.
+    distorted_image = tf.image.random_flip_left_right(distorted_image)
+
+    # Randomly distort the colors.
+    distorted_image = distort_color(distorted_image, thread_id)
+
+    if not thread_id:
+      tf.image_summary('final_distorted_image',
+                       tf.expand_dims(distorted_image, 0))
+    return distorted_image
+
+
+def eval_image(image, height, width, scope=None):
+  """Prepare one image for evaluation.
+
+  Args:
+    image: 3-D float Tensor
+    height: integer
+    width: integer
+    scope: Optional scope for op_scope.
+  Returns:
+    3-D float Tensor of prepared image.
+  """
+  with tf.op_scope([image, height, width], scope, 'eval_image'):
+    # Crop the central region of the image with an area containing 87.5% of
+    # the original image.
+    image = tf.image.central_crop(image, central_fraction=0.875)
+
+    # Resize the image to the original height and width.
+    image = tf.expand_dims(image, 0)
+    image = tf.image.resize_bilinear(image, [height, width],
+                                     align_corners=False)
+    image = tf.squeeze(image, [0])
+    return image
+
+
+def image_preprocessing(image_buffer, bbox, train, thread_id=0):
+  """Decode and preprocess one image for evaluation or training.
+
+  Args:
+    image_buffer: JPEG encoded string Tensor
+    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+      where each coordinate is [0, 1) and the coordinates are arranged as
+      [ymin, xmin, ymax, xmax].
+    train: boolean
+    thread_id: integer indicating preprocessing thread
+
+  Returns:
+    3-D float Tensor containing an appropriately scaled image
+
+  Raises:
+    ValueError: if user does not provide bounding box
+  """
+  if bbox is None:
+    raise ValueError('Please supply a bounding box.')
+
+  image = decode_jpeg(image_buffer)
+  height = FLAGS.image_size
+  width = FLAGS.image_size
+
+  if train:
+    image = distort_image(image, height, width, bbox, thread_id)
+  else:
+    image = eval_image(image, height, width)
+
+  # Finally, rescale to [-1,1] instead of [0, 1)
+  image = tf.sub(image, 0.5)
+  image = tf.mul(image, 2.0)
+  return image
+
+
+def parse_example_proto(example_serialized):
+  """Parses an Example proto containing a training example of an image.
+
+  The output of the build_image_data.py image preprocessing script is a dataset
+  containing serialized Example protocol buffers. Each Example proto contains
+  the following fields:
+
+    image/height: 462
+    image/width: 581
+    image/colorspace: 'RGB'
+    image/channels: 3
+    image/class/label: 615
+    image/class/synset: 'n03623198'
+    image/class/text: 'knee pad'
+    image/object/bbox/xmin: 0.1
+    image/object/bbox/xmax: 0.9
+    image/object/bbox/ymin: 0.2
+    image/object/bbox/ymax: 0.6
+    image/object/bbox/label: 615
+    image/format: 'JPEG'
+    image/filename: 'ILSVRC2012_val_00041207.JPEG'
+    image/encoded: <JPEG encoded string>
+
+  Args:
+    example_serialized: scalar Tensor tf.string containing a serialized
+      Example protocol buffer.
+
+  Returns:
+    image_buffer: Tensor tf.string containing the contents of a JPEG file.
+    label: Tensor tf.int32 containing the label.
+    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
+      where each coordinate is [0, 1) and the coordinates are arranged as
+      [ymin, xmin, ymax, xmax].
+    text: Tensor tf.string containing the human-readable label.
+  """
+  # Dense features in Example proto.
+  feature_map = {
+      'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
+                                          default_value=''),
+      'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
+                                              default_value=-1),
+      'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
+                                             default_value=''),
+  }
+  sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
+  # Sparse features in Example proto.
+  feature_map.update(
+      {k: sparse_float32 for k in ['image/object/bbox/xmin',
+                                   'image/object/bbox/ymin',
+                                   'image/object/bbox/xmax',
+                                   'image/object/bbox/ymax']})
+
+  features = tf.parse_single_example(example_serialized, feature_map)
+  label = tf.cast(features['image/class/label'], dtype=tf.int32)
+
+  xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
+  ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
+  xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
+  ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
+
+  # Note that we impose an ordering of (y, x) just to make life difficult.
+  bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
+
+  # Force the variable number of bounding boxes into the shape
+  # [1, num_boxes, coords].
+  bbox = tf.expand_dims(bbox, 0)
+  bbox = tf.transpose(bbox, [0, 2, 1])
+
+  return features['image/encoded'], label, bbox, features['image/class/text']
+
+
+def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None):
+  """Contruct batches of training or evaluation examples from the image dataset.
+
+  Args:
+    dataset: instance of Dataset class specifying the dataset.
+      See dataset.py for details.
+    batch_size: integer
+    train: boolean
+    num_preprocess_threads: integer, total number of preprocessing threads
+
+  Returns:
+    images: 4-D float Tensor of a batch of images
+    labels: 1-D integer Tensor of [batch_size].
+
+  Raises:
+    ValueError: if data is not found
+  """
+  with tf.name_scope('batch_processing'):
+    data_files = dataset.data_files()
+    if data_files is None:
+      raise ValueError('No data files found for this dataset')
+    filename_queue = tf.train.string_input_producer(data_files, capacity=16)
+
+    if num_preprocess_threads is None:
+      num_preprocess_threads = FLAGS.num_preprocess_threads
+
+    if num_preprocess_threads % 4:
+      raise ValueError('Please make num_preprocess_threads a multiple '
+                       'of 4 (%d % 4 != 0).', num_preprocess_threads)
+    # Create a subgraph with its own reader (but sharing the
+    # filename_queue) for each preprocessing thread.
+    images_and_labels = []
+    for thread_id in range(num_preprocess_threads):
+      reader = dataset.reader()
+      _, example_serialized = reader.read(filename_queue)
+
+      # Parse a serialized Example proto to extract the image and metadata.
+      image_buffer, label_index, bbox, _ = parse_example_proto(
+          example_serialized)
+      image = image_preprocessing(image_buffer, bbox, train, thread_id)
+      images_and_labels.append([image, label_index])
+
+    # Approximate number of examples per shard.
+    examples_per_shard = 1024
+    # Size the random shuffle queue to balance between good global
+    # mixing (more examples) and memory use (fewer examples).
+    # 1 image uses 299*299*3*4 bytes = 1MB
+    # The default input_queue_memory_factor is 16 implying a shuffling queue
+    # size: examples_per_shard * 16 * 1MB = 17.6GB
+    min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
+
+    # Create a queue that produces the examples in batches after shuffling.
+    if train:
+      images, label_index_batch = tf.train.shuffle_batch_join(
+          images_and_labels,
+          batch_size=batch_size,
+          capacity=min_queue_examples + 3 * batch_size,
+          min_after_dequeue=min_queue_examples)
+    else:
+      images, label_index_batch = tf.train.batch_join(
+          images_and_labels,
+          batch_size=batch_size,
+          capacity=min_queue_examples + 3 * batch_size)
+
+    # Reshape images into these desired dimensions.
+    height = FLAGS.image_size
+    width = FLAGS.image_size
+    depth = 3
+
+    images = tf.cast(images, tf.float32)
+    images = tf.reshape(images, shape=[batch_size, height, width, depth])
+
+    # Display the training images in the visualizer.
+    tf.image_summary('images', images)
+
+    return images, tf.reshape(label_index_batch, [batch_size])

+ 59 - 0
inception/imagenet_data.py

@@ -0,0 +1,59 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Small library that points to the ImageNet data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+from inception.dataset import Dataset
+
+
+class ImagenetData(Dataset):
+  """ImageNet data set."""
+
+  def __init__(self, subset):
+    super(ImagenetData, self).__init__('ImageNet', subset)
+
+  def num_classes(self):
+    """Returns the number of classes in the data set."""
+    return 1000
+
+  def num_examples_per_epoch(self):
+    """Returns the number of examples in the data set."""
+    # Bounding box data consists of 615299 bounding boxes for 544546 images.
+    if self.subset == 'train':
+      return 1281167
+    if self.subset == 'validation':
+      return 50000
+
+  def download_message(self):
+    """Instruction to download and extract the tarball from Flowers website."""
+
+    print('Failed to find any ImageNet %s files'% self.subset)
+    print('')
+    print('If you have already downloaded and processed the data, then make '
+          'sure to set --data_dir to point to the directory containing the '
+          'location of the sharded TFRecords.\n')
+    print('If you have not downloaded and prepared the ImageNet data in the '
+          'TFRecord format, you will need to do this at least once. This '
+          'process could take several hours depending on the speed of your '
+          'computer and network connection\n')
+    print('Please see README.md for instructions on how to build '
+          'the ImageNet dataset using download_and_preprocess_imagenet.\n')
+    print('Note that the raw data size is 300 GB and the processed data size '
+          'is 150 GB. Please ensure you have at least 500GB disk space.')

+ 46 - 0
inception/imagenet_eval.py

@@ -0,0 +1,46 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A binary to evaluate Inception on the flowers data set.
+
+Note that using the supplied pre-trained inception checkpoint, the eval should
+achieve:
+  precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples]
+
+See the README.md for more details.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception import inception_eval
+from inception.imagenet_data import ImagenetData
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def main(unused_argv=None):
+  dataset = ImagenetData(subset=FLAGS.subset)
+  assert dataset.data_files()
+  if tf.gfile.Exists(FLAGS.eval_dir):
+    tf.gfile.DeleteRecursively(FLAGS.eval_dir)
+  tf.gfile.MakeDirs(FLAGS.eval_dir)
+  inception_eval.evaluate(dataset)
+
+
+if __name__ == '__main__':
+  tf.app.run()

+ 41 - 0
inception/imagenet_train.py

@@ -0,0 +1,41 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A binary to train Inception on the ImageNet data set.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import tensorflow as tf
+
+from inception import inception_train
+from inception.imagenet_data import ImagenetData
+
+FLAGS = tf.app.flags.FLAGS
+
+
+def main(_):
+  dataset = ImagenetData(subset=FLAGS.subset)
+  assert dataset.data_files()
+  if tf.gfile.Exists(FLAGS.train_dir):
+    tf.gfile.DeleteRecursively(FLAGS.train_dir)
+  tf.gfile.MakeDirs(FLAGS.train_dir)
+  inception_train.train(dataset)
+
+
+if __name__ == '__main__':
+  tf.app.run()

+ 171 - 0
inception/inception_eval.py

@@ -0,0 +1,171 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A library to evaluate Inception on a single GPU.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datetime import datetime
+import math
+import os.path
+import time
+
+
+import numpy as np
+import tensorflow as tf
+
+from inception import image_processing
+from inception import inception_model as inception
+
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval',
+                           """Directory where to write event logs.""")
+tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train',
+                           """Directory where to read model checkpoints.""")
+
+# Flags governing the frequency of the eval.
+tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
+                            """How often to run the eval.""")
+tf.app.flags.DEFINE_boolean('run_once', False,
+                            """Whether to run eval only once.""")
+
+# Flags governing the data used for the eval.
+tf.app.flags.DEFINE_integer('num_examples', 50000,
+                            """Number of examples to run. Note that the eval """
+                            """ImageNet dataset contains 50000 examples.""")
+tf.app.flags.DEFINE_string('subset', 'validation',
+                           """Either 'validation' or 'train'.""")
+
+
+def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
+  """Runs Eval once.
+
+  Args:
+    saver: Saver.
+    summary_writer: Summary writer.
+    top_1_op: Top 1 op.
+    top_5_op: Top 5 op.
+    summary_op: Summary op.
+  """
+  with tf.Session() as sess:
+    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
+    if ckpt and ckpt.model_checkpoint_path:
+      if os.path.isabs(ckpt.model_checkpoint_path):
+        # Restores from checkpoint with absolute path.
+        saver.restore(sess, ckpt.model_checkpoint_path)
+      else:
+        # Restores from checkpoint with relative path.
+        saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
+                                         ckpt.model_checkpoint_path))
+
+      # Assuming model_checkpoint_path looks something like:
+      #   /my-favorite-path/imagenet_train/model.ckpt-0,
+      # extract global_step from it.
+      global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
+      print('Succesfully loaded model from %s at step=%s.' %
+            (ckpt.model_checkpoint_path, global_step))
+    else:
+      print('No checkpoint file found')
+      return
+
+    # Start the queue runners.
+    coord = tf.train.Coordinator()
+    try:
+      threads = []
+      for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
+        threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
+                                         start=True))
+
+      num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
+      # Counts the number of correct predictions.
+      count_top_1 = 0.0
+      count_top_5 = 0.0
+      total_sample_count = num_iter * FLAGS.batch_size
+      step = 0
+
+      print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset))
+      start_time = time.time()
+      while step < num_iter and not coord.should_stop():
+        top_1, top_5 = sess.run([top_1_op, top_5_op])
+        count_top_1 += np.sum(top_1)
+        count_top_5 += np.sum(top_5)
+        step += 1
+        if step % 20 == 0:
+          duration = time.time() - start_time
+          sec_per_batch = duration / 20.0
+          examples_per_sec = FLAGS.batch_size / sec_per_batch
+          print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f'
+                'sec/batch)' % (datetime.now(), step, num_iter,
+                                examples_per_sec, sec_per_batch))
+          start_time = time.time()
+
+      # Compute precision @ 1.
+      precision_at_1 = count_top_1 / total_sample_count
+      recall_at_5 = count_top_5 / total_sample_count
+      print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' %
+            (datetime.now(), precision_at_1, recall_at_5, total_sample_count))
+
+      summary = tf.Summary()
+      summary.ParseFromString(sess.run(summary_op))
+      summary.value.add(tag='Precision @ 1', simple_value=precision_at_1)
+      summary.value.add(tag='Recall @ 5', simple_value=recall_at_5)
+      summary_writer.add_summary(summary, global_step)
+
+    except Exception as e:  # pylint: disable=broad-except
+      coord.request_stop(e)
+
+    coord.request_stop()
+    coord.join(threads, stop_grace_period_secs=10)
+
+
+def evaluate(dataset):
+  """Evaluate model on Dataset for a number of steps."""
+  with tf.Graph().as_default():
+    # Get images and labels from the dataset.
+    images, labels = image_processing.inputs(dataset)
+
+    # Number of classes in the Dataset label set plus 1.
+    # Label 0 is reserved for an (unused) background class.
+    num_classes = dataset.num_classes() + 1
+
+    # Build a Graph that computes the logits predictions from the
+    # inference model.
+    logits, _ = inception.inference(images, num_classes)
+
+    # Calculate predictions.
+    top_1_op = tf.nn.in_top_k(logits, labels, 1)
+    top_5_op = tf.nn.in_top_k(logits, labels, 5)
+
+    # Restore the moving average version of the learned variables for eval.
+    variable_averages = tf.train.ExponentialMovingAverage(
+        inception.MOVING_AVERAGE_DECAY)
+    variables_to_restore = variable_averages.variables_to_restore()
+    saver = tf.train.Saver(variables_to_restore)
+
+    # Build the summary operation based on the TF collection of Summaries.
+    summary_op = tf.merge_all_summaries()
+
+    graph_def = tf.get_default_graph().as_graph_def()
+    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
+                                            graph_def=graph_def)
+
+    while True:
+      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
+      if FLAGS.run_once:
+        break
+      time.sleep(FLAGS.eval_interval_secs)

+ 160 - 0
inception/inception_model.py

@@ -0,0 +1,160 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Build the Inception v3 network on ImageNet data set.
+
+The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567
+
+Summary of available functions:
+ inference: Compute inference on the model inputs to make a prediction
+ loss: Compute the loss of the prediction with respect to the labels
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import re
+
+
+import tensorflow as tf
+
+from inception.slim import slim
+
+FLAGS = tf.app.flags.FLAGS
+
+# If a model is trained using multiple GPUs, prefix all Op names with tower_name
+# to differentiate the operations. Note that this prefix is removed from the
+# names of the summaries when visualizing a model.
+TOWER_NAME = 'tower'
+
+# Batch normalization. Constant governing the exponential moving average of
+# the 'global' mean and variance for all activations.
+BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
+
+# The decay to use for the moving average.
+MOVING_AVERAGE_DECAY = 0.9999
+
+
+def inference(images, num_classes, for_training=False, restore_logits=True,
+              scope=None):
+  """Build Inception v3 model architecture.
+
+  See here for reference: http://arxiv.org/abs/1512.00567
+
+  Args:
+    images: Images returned from inputs() or distorted_inputs().
+    num_classes: number of classes
+    for_training: If set to `True`, build the inference model for training.
+      Kernels that operate differently for inference during training
+      e.g. dropout, are appropriately configured.
+    restore_logits: whether or not the logits layers should be restored.
+      Useful for fine-tuning a model with different num_classes.
+    scope: optional prefix string identifying the ImageNet tower.
+
+  Returns:
+    Logits. 2-D float Tensor.
+    Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
+  """
+  # Parameters for BatchNorm.
+  batch_norm_params = {
+      # Decay for the moving averages.
+      'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
+      # epsilon to prevent 0s in variance.
+      'epsilon': 0.001,
+  }
+  # Set weight_decay for weights in Conv and FC layers.
+  with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
+    with slim.arg_scope([slim.ops.conv2d],
+                        stddev=0.1,
+                        activation=tf.nn.relu,
+                        batch_norm_params=batch_norm_params):
+      # Force all Variables to reside on the CPU.
+      with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
+        logits, endpoints = slim.inception.inception_v3(
+            images,
+            dropout_keep_prob=0.8,
+            num_classes=num_classes,
+            is_training=for_training,
+            restore_logits=restore_logits,
+            scope=scope)
+
+  # Add summaries for viewing model statistics on TensorBoard.
+  _activation_summaries(endpoints)
+
+  # Grab the logits associated with the side head. Employed during training.
+  auxiliary_logits = endpoints['aux_logits']
+
+  return logits, auxiliary_logits
+
+
+def loss(logits, labels, batch_size=None):
+  """Adds all losses for the model.
+
+  Note the final loss is not returned. Instead, the list of losses are collected
+  by slim.losses. The losses are accumulated in tower_loss() and summed to
+  calculate the total loss.
+
+  Args:
+    logits: List of logits from inference(). Each entry is a 2-D float Tensor.
+    labels: Labels from distorted_inputs or inputs(). 1-D tensor
+            of shape [batch_size]
+    batch_size: integer
+  """
+  if not batch_size:
+    batch_size = FLAGS.batch_size
+
+  # Reshape the labels into a dense Tensor of
+  # shape [FLAGS.batch_size, num_classes].
+  sparse_labels = tf.reshape(labels, [batch_size, 1])
+  indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
+  concated = tf.concat(1, [indices, sparse_labels])
+  num_classes = logits[0].get_shape()[-1].value
+  dense_labels = tf.sparse_to_dense(concated,
+                                    [batch_size, num_classes],
+                                    1.0, 0.0)
+
+  # Cross entropy loss for the main softmax prediction.
+  slim.losses.cross_entropy_loss(logits[0],
+                                 dense_labels,
+                                 label_smoothing=0.1,
+                                 weight=1.0)
+
+  # Cross entropy loss for the auxiliary softmax head.
+  slim.losses.cross_entropy_loss(logits[1],
+                                 dense_labels,
+                                 label_smoothing=0.1,
+                                 weight=0.4,
+                                 scope='aux_loss')
+
+
+def _activation_summary(x):
+  """Helper to create summaries for activations.
+
+  Creates a summary that provides a histogram of activations.
+  Creates a summary that measure the sparsity of activations.
+
+  Args:
+    x: Tensor
+  """
+  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
+  # session. This helps the clarity of presentation on tensorboard.
+  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
+  tf.histogram_summary(tensor_name + '/activations', x)
+  tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
+
+
+def _activation_summaries(endpoints):
+  with tf.name_scope('summaries'):
+    for act in endpoints.values():
+      _activation_summary(act)

+ 351 - 0
inception/inception_train.py

@@ -0,0 +1,351 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A library to train Inception using multiple GPU's with synchronous updates.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+from datetime import datetime
+import os.path
+import re
+import time
+
+
+
+import numpy as np
+import tensorflow as tf
+
+from inception import image_processing
+from inception import inception_model as inception
+from inception.slim import slim
+
+FLAGS = tf.app.flags.FLAGS
+
+tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
+                           """Directory where to write event logs """
+                           """and checkpoint.""")
+tf.app.flags.DEFINE_integer('max_steps', 10000000,
+                            """Number of batches to run.""")
+tf.app.flags.DEFINE_string('subset', 'train',
+                           """Either 'train' or 'validation'.""")
+
+# Flags governing the hardware employed for running TensorFlow.
+tf.app.flags.DEFINE_integer('num_gpus', 1,
+                            """How many GPUs to use.""")
+tf.app.flags.DEFINE_boolean('log_device_placement', False,
+                            """Whether to log device placement.""")
+
+# Flags governing the type of training.
+tf.app.flags.DEFINE_boolean('fine_tune', False,
+                            """If set, randomly initialize the final layer """
+                            """of weights in order to train the network on a """
+                            """new task.""")
+tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
+                           """If specified, restore this pretrained model """
+                           """before beginning any training.""")
+
+# **IMPORTANT**
+# Please note that this learning rate schedule is heavily dependent on the
+# hardware architecture, batch size and any changes to the model architecture
+# specification. Selecting a finely tuned learning rate schedule is an
+# empirical process that requires some experimentation. Please see README.md
+# more guidance and discussion.
+#
+# With 8 Tesla K40's and a batch size = 256, the following setup achieves
+# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
+# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
+tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
+                          """Initial learning rate.""")
+tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
+                          """Epochs after which learning rate decays.""")
+tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
+                          """Learning rate decay factor.""")
+
+# Constants dictating the learning rate schedule.
+RMSPROP_DECAY = 0.9                # Decay term for RMSProp.
+RMSPROP_MOMENTUM = 0.9             # Momentum in RMSProp.
+RMSPROP_EPSILON = 1.0              # Epsilon term for RMSProp.
+
+
+def _tower_loss(images, labels, num_classes, scope):
+  """Calculate the total loss on a single tower running the ImageNet model.
+
+  We perform 'batch splitting'. This means that we cut up a batch across
+  multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
+  then each tower will operate on an batch of 16 images.
+
+  Args:
+    images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
+                                       FLAGS.image_size, 3].
+    labels: 1-D integer Tensor of [batch_size].
+    num_classes: number of classes
+    scope: unique prefix string identifying the ImageNet tower, e.g.
+      'tower_0'.
+
+  Returns:
+     Tensor of shape [] containing the total loss for a batch of data
+  """
+  # When fine-tuning a model, we do not restore the logits but instead we
+  # randomly initialize the logits. The number of classes in the output of the
+  # logit is the number of classes in specified Dataset.
+  restore_logits = not FLAGS.fine_tune
+
+  # Build inference Graph.
+  logits = inception.inference(images, num_classes, for_training=True,
+                               restore_logits=restore_logits,
+                               scope=scope)
+
+  # Build the portion of the Graph calculating the losses. Note that we will
+  # assemble the total_loss using a custom function below.
+  split_batch_size = images.get_shape().as_list()[0]
+  inception.loss(logits, labels, batch_size=split_batch_size)
+
+  # Assemble all of the losses for the current tower only.
+  losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
+
+  # Calculate the total loss for the current tower.
+  regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
+  total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
+
+  # Compute the moving average of all individual losses and the total loss.
+  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
+  loss_averages_op = loss_averages.apply(losses + [total_loss])
+
+  # Attach a scalar summmary to all individual losses and the total loss; do the
+  # same for the averaged version of the losses.
+  for l in losses + [total_loss]:
+    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
+    # session. This helps the clarity of presentation on TensorBoard.
+    loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
+    # Name each loss as '(raw)' and name the moving average version of the loss
+    # as the original loss name.
+    tf.scalar_summary(loss_name +' (raw)', l)
+    tf.scalar_summary(loss_name, loss_averages.average(l))
+
+  with tf.control_dependencies([loss_averages_op]):
+    total_loss = tf.identity(total_loss)
+  return total_loss
+
+
+def _average_gradients(tower_grads):
+  """Calculate the average gradient for each shared variable across all towers.
+
+  Note that this function provides a synchronization point across all towers.
+
+  Args:
+    tower_grads: List of lists of (gradient, variable) tuples. The outer list
+      is over individual gradients. The inner list is over the gradient
+      calculation for each tower.
+  Returns:
+     List of pairs of (gradient, variable) where the gradient has been averaged
+     across all towers.
+  """
+  average_grads = []
+  for grad_and_vars in zip(*tower_grads):
+    # Note that each grad_and_vars looks like the following:
+    #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
+    grads = []
+    for g, _ in grad_and_vars:
+      # Add 0 dimension to the gradients to represent the tower.
+      expanded_g = tf.expand_dims(g, 0)
+
+      # Append on a 'tower' dimension which we will average over below.
+      grads.append(expanded_g)
+
+    # Average over the 'tower' dimension.
+    grad = tf.concat(0, grads)
+    grad = tf.reduce_mean(grad, 0)
+
+    # Keep in mind that the Variables are redundant because they are shared
+    # across towers. So .. we will just return the first tower's pointer to
+    # the Variable.
+    v = grad_and_vars[0][1]
+    grad_and_var = (grad, v)
+    average_grads.append(grad_and_var)
+  return average_grads
+
+
+def train(dataset):
+  """Train on dataset for a number of steps."""
+  with tf.Graph().as_default(), tf.device('/cpu:0'):
+    # Create a variable to count the number of train() calls. This equals the
+    # number of batches processed * FLAGS.num_gpus.
+    global_step = tf.get_variable(
+        'global_step', [],
+        initializer=tf.constant_initializer(0), trainable=False)
+
+    # Calculate the learning rate schedule.
+    num_batches_per_epoch = (dataset.num_examples_per_epoch() /
+                             FLAGS.batch_size)
+    decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
+
+    # Decay the learning rate exponentially based on the number of steps.
+    lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
+                                    global_step,
+                                    decay_steps,
+                                    FLAGS.learning_rate_decay_factor,
+                                    staircase=True)
+
+    # Create an optimizer that performs gradient descent.
+    opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
+                                    momentum=RMSPROP_MOMENTUM,
+                                    epsilon=RMSPROP_EPSILON)
+
+    # Get images and labels for ImageNet and split the batch across GPUs.
+    assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
+        'Batch size must be divisible by number of GPUs')
+    split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
+
+    # Override the number of preprocessing threads to account for the increased
+    # number of GPU towers.
+    num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
+    images, labels = image_processing.distorted_inputs(
+        dataset,
+        batch_size=split_batch_size,
+        num_preprocess_threads=num_preprocess_threads)
+
+    input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
+
+    # Number of classes in the Dataset label set plus 1.
+    # Label 0 is reserved for an (unused) background class.
+    num_classes = dataset.num_classes() + 1
+
+    # Calculate the gradients for each model tower.
+    tower_grads = []
+    for i in xrange(FLAGS.num_gpus):
+      with tf.device('/gpu:%d' % i):
+        with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
+          # Calculate the loss for one tower of the ImageNet model. This
+          # function constructs the entire ImageNet model but shares the
+          # variables across all towers.
+          loss = _tower_loss(images, labels, num_classes, scope)
+
+          # Reuse variables for the next tower.
+          tf.get_variable_scope().reuse_variables()
+
+          # Retain the summaries from the final tower.
+          summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
+
+          # Retain the Batch Normalization updates operations only from the
+          # final tower. Ideally, we should grab the updates from all towers
+          # but these stats accumulate extremely fast so we can ignore the
+          # other stats from the other towers without significant detriment.
+          batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
+                                                scope)
+
+          # Calculate the gradients for the batch of data on this ImageNet
+          # tower.
+          grads = opt.compute_gradients(loss)
+
+          # Keep track of the gradients across all towers.
+          tower_grads.append(grads)
+
+    # We must calculate the mean of each gradient. Note that this is the
+    # synchronization point across all towers.
+    grads = _average_gradients(tower_grads)
+
+    # Add a summaries for the input processing and global_step.
+    summaries.extend(input_summaries)
+
+    # Add a summary to track the learning rate.
+    summaries.append(tf.scalar_summary('learning_rate', lr))
+
+    # Add histograms for gradients.
+    for grad, var in grads:
+      if grad:
+        summaries.append(
+            tf.histogram_summary(var.op.name + '/gradients', grad))
+
+    # Apply the gradients to adjust the shared variables.
+    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
+
+    # Add histograms for trainable variables.
+    for var in tf.trainable_variables():
+      summaries.append(tf.histogram_summary(var.op.name, var))
+
+    # Track the moving averages of all trainable variables.
+    # Note that we maintain a "double-average" of the BatchNormalization
+    # global statistics. This is more complicated then need be but we employ
+    # this for backward-compatibility with our previous models.
+    variable_averages = tf.train.ExponentialMovingAverage(
+        inception.MOVING_AVERAGE_DECAY, global_step)
+
+    # Another possiblility is to use tf.slim.get_variables().
+    variables_to_average = (tf.trainable_variables() +
+                            tf.moving_average_variables())
+    variables_averages_op = variable_averages.apply(variables_to_average)
+
+    # Group all updates to into a single train op.
+    batchnorm_updates_op = tf.group(*batchnorm_updates)
+    train_op = tf.group(apply_gradient_op, variables_averages_op,
+                        batchnorm_updates_op)
+
+    # Create a saver.
+    saver = tf.train.Saver(tf.all_variables())
+
+    # Build the summary operation from the last tower summaries.
+    summary_op = tf.merge_summary(summaries)
+
+    # Build an initialization operation to run below.
+    init = tf.initialize_all_variables()
+
+    # Start running operations on the Graph. allow_soft_placement must be set to
+    # True to build towers on GPU, as some of the ops do not have GPU
+    # implementations.
+    sess = tf.Session(config=tf.ConfigProto(
+        allow_soft_placement=True,
+        log_device_placement=FLAGS.log_device_placement))
+    sess.run(init)
+
+    if FLAGS.pretrained_model_checkpoint_path:
+      assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
+      variables_to_restore = tf.get_collection(
+          slim.variables.VARIABLES_TO_RESTORE)
+      restorer = tf.train.Saver(variables_to_restore)
+      restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
+      print('%s: Pre-trained model restored from %s' %
+            (datetime.now(), FLAGS.pretrained_model_checkpoint_path))
+
+    # Start the queue runners.
+    tf.train.start_queue_runners(sess=sess)
+
+    summary_writer = tf.train.SummaryWriter(
+        FLAGS.train_dir,
+        graph_def=sess.graph.as_graph_def(add_shapes=True))
+
+    for step in xrange(FLAGS.max_steps):
+      start_time = time.time()
+      _, loss_value = sess.run([train_op, loss])
+      duration = time.time() - start_time
+
+      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
+
+      if step % 10 == 0:
+        examples_per_sec = FLAGS.batch_size / float(duration)
+        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
+                      'sec/batch)')
+        print(format_str % (datetime.now(), step, loss_value,
+                            examples_per_sec, duration))
+
+      if step % 100 == 0:
+        summary_str = sess.run(summary_op)
+        summary_writer.add_summary(summary_str, step)
+
+      # Save the model checkpoint periodically.
+      if step % 5000 == 0 or (step + 1) == FLAGS.max_steps:
+        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
+        saver.save(sess, checkpoint_path, global_step=step)

+ 112 - 0
inception/slim/BUILD

@@ -0,0 +1,112 @@
+# Description:
+#   Contains the operations and nets for building TensorFlow-Slim models.
+
+package(default_visibility = ["//inception:internal"])
+
+licenses(["notice"])  # Apache 2.0
+
+exports_files(["LICENSE"])
+
+py_library(
+    name = "scopes",
+    srcs = ["scopes.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+    ],
+)
+
+py_test(
+    name = "scopes_test",
+    size = "small",
+    srcs = ["scopes_test.py"],
+    deps = [
+        ":scopes",
+    ],
+)
+
+py_library(
+    name = "variables",
+    srcs = ["variables.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":scopes",
+    ],
+)
+
+py_test(
+    name = "variables_test",
+    size = "small",
+    srcs = ["variables_test.py"],
+    deps = [
+        ":variables",
+    ],
+)
+
+py_library(
+    name = "losses",
+    srcs = ["losses.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+    ],
+)
+
+py_test(
+    name = "losses_test",
+    size = "small",
+    srcs = ["losses_test.py"],
+    deps = [
+        ":losses",
+    ],
+)
+
+py_library(
+    name = "ops",
+    srcs = ["ops.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":losses",
+        ":scopes",
+        ":variables",
+    ],
+)
+
+py_test(
+    name = "ops_test",
+    size = "small",
+    srcs = ["ops_test.py"],
+    deps = [
+        ":ops",
+        ":variables",
+    ],
+)
+
+py_library(
+    name = "inception",
+    srcs = ["inception_model.py"],
+    deps = [
+        "@tf//tensorflow:tensorflow_py",
+        ":ops",
+        ":scopes",
+    ],
+)
+
+py_test(
+    name = "inception_test",
+    size = "medium",
+    srcs = ["inception_test.py"],
+    deps = [
+        ":inception",
+    ],
+)
+
+py_library(
+    name = "slim",
+    srcs = ["slim.py"],
+    deps = [
+        ":inception",
+        ":losses",
+        ":ops",
+        ":scopes",
+        ":variables",
+    ],
+)

+ 650 - 0
inception/slim/README.md

@@ -0,0 +1,650 @@
+# TensorFlow-Slim
+
+TF-Slim is a lightweight library for defining, training and evaluating models
+in TensorFlow. It enables defining complex networks quickly and concisely while
+keeping a model's architecture transparent and its hyperparameters explicit.
+
+
+[TOC]
+
+## Teaser
+
+As a demonstration of the simplicity of using TF-Slim, compare the simplicity
+of the code necessary for defining the entire
+[VGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) network using TF-Slim
+to the lengthy and verbose nature of defining just the first three layers (out
+of 16) using native tensorflow:
+
+```python{.good}
+# VGG16 in TF-Slim.
+def vgg16(inputs):
+  with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005):
+    net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool1')
+    net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool2')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool3')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool4')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool5')
+    net = slim.ops.flatten(net, scope='flatten5')
+    net = slim.ops.fc(net, 4096, scope='fc6')
+    net = slim.ops.dropout(net, 0.5, scope='dropout6')
+    net = slim.ops.fc(net, 4096, scope='fc7')
+    net = slim.ops.dropout(net, 0.5, scope='dropout7')
+    net = slim.ops.fc(net, 1000, activation=None, scope='fc8')
+  return net
+```
+
+```python{.bad}
+# Layers 1-3 (out of 16) of VGG16 in native tensorflow.
+def vgg16(inputs):
+  with tf.name_scope('conv1_1') as scope:
+    kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights')
+    conv = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME')
+    biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')
+    bias = tf.nn.bias_add(conv, biases)
+    conv1 = tf.nn.relu(bias, name=scope)
+  with tf.name_scope('conv1_2') as scope:
+    kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights')
+    conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
+    biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases')
+    bias = tf.nn.bias_add(conv, biases)
+    conv1 = tf.nn.relu(bias, name=scope)
+  with tf.name_scope('pool1')
+    pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1')
+```
+
+## Why TF-Slim?
+
+TF-Slim offers several advantages over just the built-in tensorflow libraries:
+
+* Allows one to define models much more compactly by eliminating
+boilerplate code. This is accomplished through the use of
+[argument scoping](scopes.py)
+and numerous high level
+[operations](ops.py).
+These tools increase readability and maintainability, reduce the likelihood
+of an error from copy-and-pasting hyperparameter values and simplifies
+hyperparameter tuning.
+* Makes developing models simple by providing commonly used
+[loss functions](losses.py)
+* Provides a concise
+[definition](inception.py)
+of [Inception v3](http://arxiv.org/abs/1512.00567) network architecture
+ready to be used out-of-the-box or subsumed into new models.
+
+Additionally TF-Slim was designed with several principles in mind:
+
+* The various modules of TF-Slim (scopes, variables, ops, losses) are
+independent. This flexibility allows users to pick and choose
+components of TF-Slim completely à la carte.
+* TF-Slim is written using a Functional Programming style. That means it's
+super-lightweight and can be used right alongside any of TensorFlow's native
+operations.
+* Makes re-using network architectures easy. This allows users to build new
+networks on top of existing ones as well as fine-tuning pre-trained models on
+new tasks.
+
+## What are the various components of TF-Slim?
+
+TF-Slim is composed of several parts which were design to exist independently.
+These include:
+
+* [scopes.py](./scopes.py):
+provides a new scope named `arg_scope` that allows a user to define default
+arguments for specific operations within that scope.
+* [variables.py](./variables.py):
+provides convenience wrappers for variable creation and manipulation.
+* [ops.py](./ops.py):
+provides high level operations for building models using tensorflow.
+* [losses.py](./losses.py):
+contains commonly used loss functions.
+
+## Defining Models
+
+Models can be succinctly defined using TF-Slim by combining its variables,
+operations and scopes. Each of these elements are defined below.
+
+### Variables
+
+Creating
+[`Variables`](https://www.tensorflow.org/how_tos/variables/index.html)
+in native tensorflow requires either a predefined value or an initialization
+mechanism
+(random, normally distributed). Furthermore, if a variable needs to be created
+on a specific device, such as a GPU, the specification must be
+[made explicit](https://www.tensorflow.org/how_tos/using_gpu/index.html).
+To alleviate the code required for variable creation, TF-Slim provides a set
+of thin wrapper functions in [variables.py](./variables.py)
+which allow callers to easily define variables.
+
+For example, to create a `weight` variable, initialize it using a truncated
+normal distribution, regularize it with an `l2_loss` and place it on the `CPU`,
+one need only declare the following:
+
+```python
+weights = variables.variable('weights',
+                             shape=[10, 10, 3 , 3],
+                             initializer=tf.truncated_normal_initializer(stddev=0.1),
+                             regularizer=lambda t: losses.l2_loss(t, weight=0.05),
+                             device='/cpu:0')
+```
+
+In addition to the functionality provided by `tf.Variable`, `slim.variables`
+keeps track of the variables created by `slim.ops` to define a model, which
+allows one to distinguish variables that belong to the model versus other
+variables.
+
+```python
+# Get all the variables defined by the model.
+model_variables = slim.variables.get_variables()
+
+# Get all the variables with the same given name, i.e. 'weights', 'biases'.
+weights = slim.variables.get_variables_by_name('weights')
+biases = slim.variables.get_variables_by_name('biases')
+
+# Get all the variables in VARIABLES_TO_RESTORE collection.
+variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+
+
+weights = variables.variable('weights',
+                             shape=[10, 10, 3 , 3],
+                             initializer=tf.truncated_normal_initializer(stddev=0.1),
+                             regularizer=lambda t: losses.l2_loss(t, weight=0.05),
+                             device='/cpu:0')
+```
+
+### Operations (Layers)
+
+While the set of TensorFlow operations is quite extensive, builders of
+neural networks typically think of models in terms of "layers". A layer,
+such as a Convolutional Layer, a Fully Connected Layer or a BatchNorm Layer
+are more abstract than a single TensorFlow operation and typically involve
+many such operations. For example, a Convolutional Layer in a neural network
+is built using several steps:
+
+1. Creating the weight variables
+2. Creating the bias variables
+3. Convolving the weights with the input from the previous layer
+4. Adding the biases to the result of the convolution.
+
+In python code this can be rather laborious:
+
+
+```python
+input = ...
+with tf.name_scope('conv1_1') as scope:
+  kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
+                                           stddev=1e-1), name='weights')
+  conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME')
+  biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
+                       trainable=True, name='biases')
+  bias = tf.nn.bias_add(conv, biases)
+  conv1 = tf.nn.relu(bias, name=scope)
+```
+
+To alleviate the need to duplicate this code repeatedly, TF-Slim provides a
+number of convenient operations defined at the (more abstract) level of
+neural network layers. For example, compare the code above to an invocation
+of the TF-Slim code:
+
+```python
+input = ...
+net = slim.ops.conv2d(input, [3, 3], 128, scope='conv1_1')
+```
+
+TF-Slim provides numerous operations used in building neural networks which
+roughly correspond to such layers. These include:
+
+Layer | TF-Slim Op
+------- | --------
+Convolutional Layer | [ops.conv2d](ops.py)
+Fully Connected Layer | [ops.fc](ops.py)
+BatchNorm layer | [ops.batch_norm](ops.py)
+Max Pooling Layer | [ops.max_pool](ops.py)
+Avg Pooling Layer | [ops.avg_pool](ops.py)
+Dropout Layer | [ops.dropout](ops.py)
+
+[ops.py](./ops.py)
+also includes operations that are not really "layers" per se, but are
+often used to manipulate hidden unit representations during inference:
+
+Operation | TF-Slim Op
+------- | --------
+Flatten | [ops.flatten](ops.py)
+
+TF-Slim also provides a meta-operation called `repeat_op` that allows one to
+repeatedly perform the same operation. Consider the following snippet from the
+[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) network whose layers
+perform several convolutions in a row between pooling layers:
+
+```python
+net = ...
+net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_1')
+net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_2')
+net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_3')
+net = slim.ops.max_pool(net, [2, 2], scope='pool3')
+```
+
+This clear duplication of code can be removed via a standard loop:
+
+```python
+net = ...
+for i in range(3):
+  net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_' % (i+1))
+net = slim.ops.max_pool(net, [2, 2], scope='pool3')
+```
+
+While this does reduce the amount of duplication, it can be made even cleaner
+by using the `RepeatOp`:
+
+```python
+net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3')
+net = slim.ops.max_pool(net, [2, 2], scope='pool2')
+```
+
+Notice that the RepeatOp not only applies the same argument in-line, it also
+is smart enough to unroll the scopes such that the scopes assigned to each
+subsequent call of `ops.conv2d` is appended with an underscore and iteration
+number. More concretely, the scopes in the example above would be 'conv3_1',
+'conv3_2' and 'conv3_3'.
+
+
+### Scopes
+
+In addition to the types of scope mechanisms in TensorFlow
+([name_scope](https://www.tensorflow.org/api_docs/python/framework.html#name_scope),
+[op_scope](https://www.tensorflow.org/api_docs/python/framework.html#op_scope),
+[variable_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_scope),
+[variable_op_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_op_scope)),
+TF-Slim adds a new scoping mechanism called "argument scope" or
+[arg_scope](scopes.py).
+This new scope allows a user to specify one or more operations and a set of
+arguments which will be passed to each of the operations defined in the
+`arg_scope`. This functionality is best illustrated by example. Consider the
+following code snippet:
+
+
+```python
+net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv1')
+net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=0.01, weight_decay=0.0005, scope='conv2')
+net = slim.ops.conv2d(net, 256, [11, 11], padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv3')
+```
+
+It should be clear that these three Convolution layers share many of the same
+hyperparameters. Two have the same padding, all three have the same weight_decay
+and standard deviation of its weights. Not only do the duplicated values make
+the code more difficult to read, it also adds the addition burder to the writer
+of needing to doublecheck that all of the values are identical in each step.
+One solution would be to specify default values using variables:
+
+```python
+padding='SAME'
+stddev=0.01
+weight_decay=0.0005
+net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv1')
+net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=stddev, weight_decay=weight_decay, scope='conv2')
+net = slim.ops.conv2d(net, 256, [11, 11], padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv3')
+
+```
+
+This solution ensures that all three convolutions share the exact same variable
+values but doesn't reduce the code clutter. By using an `arg_scope`, we can both
+ensure that each layer uses the same values and simplify the code:
+
+```python
+  with slim.arg_scope([slim.ops.conv2d], padding='SAME', stddev=0.01, weight_decay=0.0005):
+    net = slim.ops.conv2d(inputs, 64, [11, 11], scope='conv1')
+    net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2')
+    net = slim.ops.conv2d(net, 256, [11, 11], scope='conv3')
+```
+
+As the example illustrates, the use of arg_scope makes the code cleaner,
+simpler and easier to maintain. Notice that while argument values are specifed
+in the arg_scope, they can be overwritten locally. In particular, while
+the padding argument has been set to 'SAME', the second convolution overrides
+it with the value of 'VALID'.
+
+One can also nest `arg_scope`s and use multiple operations in the same scope.
+For example:
+
+```python
+with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005):
+  with arg_scope([slim.ops.conv2d], padding='SAME'), slim.arg_scope([slim.ops.fc], bias=1.0):
+    net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
+    net = slim.ops.conv2d(net, 256, [5, 5], stddev=0.03, scope='conv2')
+    net = slim.ops.flatten(net)
+    net = slim.ops.fc(net, 1000, activation=None, scope='fc')
+```
+
+In this example, the first `arg_scope` applies the same `stddev` and `weight_decay`
+arguments to the `conv2d` and `fc` ops in its scope. In the second `arg_scope`,
+additional default arguments to `conv2d` only are specified.
+
+In addition to `arg_scope`, TF-Slim provides several decorators that wrap the
+use of tensorflow arg scopes. These include `@AddArgScope`, `@AddNameScope`,
+`@AddVariableScope`, `@AddOpScope` and `@AddVariableOpScope`. To illustrate
+their use, consider the following example.
+
+```python
+def MyNewOp(inputs):
+  varA = ...
+  varB = ...
+  outputs = tf.mul(varA, inputs) + varB
+  return outputs
+
+```
+
+In this example, the user has created a new op which creates two variables. To
+ensure that these variables exist within a certain variable scope (to avoid
+collisions with variables with the same name), in standard TF, the op must be
+called within a variable scope:
+
+```python
+inputs = ...
+with tf.variable_scope('layer1'):
+  outputs = MyNewOp(inputs)
+```
+
+As an alternative, one can use TF-Slim's decorators to decorate the function
+and simplify the call:
+
+```python
+@AddVariableScope
+def MyNewOp(inputs):
+  ...
+  return outputs
+
+
+inputs = ...
+outputs = MyNewOp('layer1')
+```
+
+The `@AddVariableScope` decorater simply applies the `tf.variable_scope` scoping
+to the called function taking "layer1" as its argument. This allows the code
+to be written more concisely.
+
+### Losses
+
+The loss function defines a quantity that we want to minimize. For
+classification problems, this is typically the cross entropy between the true
+(one-hot) distribution and the predicted probability distribution across
+classes. For regression problems, this is often the sum-of-squares differences
+between the predicted and true values.
+
+Certain models, such as multi-task
+learning models, require the use of multiple loss functions simultaneously. In
+other words, the loss function ultimatey being minimized is the sum of various
+other loss functions. For example, consider a model that predicts both
+the type of scene in an image as well as the depth from the
+camera of each pixel. This model's loss function would be the sum of the
+classification loss and depth prediction loss.
+
+TF-Slim provides an easy-to-use mechanism for defining and keeping track of
+loss functions via the
+[losses.py](./losses.py)
+module. Consider the simple case where we want to train the VGG network:
+
+
+```python
+# Load the images and labels.
+images, labels = ...
+
+# Create the model.
+predictions =  ...
+
+# Define the loss functions and get the total loss.
+loss = losses.ClassificationLoss(predictions, labels)
+```
+
+In this example, we start by creating the model (using TF-Slim's VGG
+implementation), and add the standard classification loss. Now, lets turn
+to the case where we have a multi-task model that produces multiple outputs:
+
+
+```python
+# Load the images and labels.
+images, scene_labels, depth_labels = ...
+
+# Create the model.
+scene_predictions, depth_predictions = CreateMultiTaskModel(images)
+
+# Define the loss functions and get the total loss.
+classification_loss = slim.losses.ClassificationLoss(scene_predictions, scene_labels)
+sum_of_squares_loss = slim.losses.SumOfSquaresLoss(depth_predictions, depth_labels)
+
+# The following two lines have the same effect:
+total_loss1 = classification_loss + sum_of_squares_loss
+total_loss2 = tf.get_collection(slim.losses.LOSSES_COLLECTION)
+```
+
+In this example, we have two losses which we add by calling
+`losses.ClassificationLoss` and `losses.SumOfSquaresLoss`. We can obtain the
+total loss by adding them together (`total_loss1`) or by calling
+`losses.GetTotalLoss()`. How did this work?
+When you create a loss function via TF-Slim, TF-Slim adds the loss to a
+special TensorFlow collection of loss functions. This enables you to either
+manage the total loss manually, or allow TF-Slim to manage them for you.
+
+What if you want to let TF-Slim manage the losses for you but have a custom loss
+function?
+[losses.py](./losses.py)
+also has a function that adds this loss to TF-Slims collection. For example:
+
+
+```python
+# Load the images and labels.
+images, scene_labels, depth_labels, pose_labels = ...
+
+# Create the model.
+scene_predictions, depth_predictions, pose_predictions = CreateMultiTaskModel(images)
+
+# Define the loss functions and get the total loss.
+classification_loss = slim.losses.ClassificationLoss(scene_predictions, scene_labels)
+sum_of_squares_loss = slim.losses.SumOfSquaresLoss(depth_predictions, depth_labels)
+pose_loss = MyCustomLossFunction(pose_predictions, pose_labels)
+tf.add_to_collection(slim.losses.LOSSES_COLLECTION, pose_loss) # Letting TF-Slim know about the additional loss.
+
+# The following two lines have the same effect:
+total_loss1 = classification_loss + sum_of_squares_loss + pose_loss
+total_loss2 = losses.GetTotalLoss()
+```
+In this example, we can again either produce the total loss function manually
+or let TF-Slim know about the additional loss and let TF-Slim handle the losses.
+
+
+## Putting the Pieces Together
+
+By combining TF-Slim Variables, Operations and scopes, we can write a normally
+very complex network with very few lines of code. For example, the entire
+[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) architecture can be
+defined with just the following snippet:
+
+```python
+with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005):
+  net = slim.ops.repeat_op(1, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1')
+  net = slim.ops.max_pool(net, [2, 2], scope='pool1')
+  net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 128, [3, 3], scope='conv2')
+  net = slim.ops.max_pool(net, [2, 2], scope='pool2')
+  net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 256, [3, 3], scope='conv3')
+  net = slim.ops.max_pool(net, [2, 2], scope='pool3')
+  net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 512, [3, 3], scope='conv4')
+  net = slim.ops.max_pool(net, [2, 2], scope='pool4')
+  net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 512, [3, 3], scope='conv5')
+  net = slim.ops.max_pool(net, [2, 2], scope='pool5')
+  net = slim.ops.flatten(net, scope='flatten5')
+  net = slim.ops.fc(net, 4096, scope='fc6')
+  net = slim.ops.dropout(net, 0.5, scope='dropout6')
+  net = slim.ops.fc(net, 4096, scope='fc7')
+  net = slim.ops.dropout(net, 0.5, scope='dropout7')
+  net = slim.ops.fc(net, 1000, activation=None, scope='fc8')
+return net
+```
+
+## Re-using previously defined network architectures and pre-trained models.
+
+### Brief Recap on Restoring Variables from a Checkpoint
+
+After a model has been trained, it can be restored using `tf.train.Saver()`
+which restores `Variables` from a given checkpoint. For many cases,
+`tf.train.Saver()` provides a simple mechanism to restore all or just a
+few variables.
+
+```python
+# Create some variables.
+v1 = tf.Variable(..., name="v1")
+v2 = tf.Variable(..., name="v2")
+...
+# Add ops to restore all the variables.
+restorer = tf.train.Saver()
+
+# Add ops to restore some variables.
+restorer = tf.train.Saver([v1, v2])
+
+# Later, launch the model, use the saver to restore variables from disk, and
+# do some work with the model.
+with tf.Session() as sess:
+  # Restore variables from disk.
+  restorer.restore(sess, "/tmp/model.ckpt")
+  print("Model restored.")
+  # Do some work with the model
+  ...
+```
+
+See [Restoring Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#restoring-variables)
+and
+[Choosing which Variables to Save and Restore](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#choosing-which-variables-to-save-and-restore)
+sections of the [Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html)
+page for more details.
+
+### Using slim.variables to Track which Variables need to be Restored
+
+It is often desirable to fine-tune a pre-trained model on an entirely new
+dataset or even a new task. In these situations, one must specify which layers
+of the model should be reused (and consequently loaded from a checkpoint)
+and which layers are new. Indicating which variables or layers should be
+restored is a process that quickly becomes cumbersome when done manually.
+
+To help keep track of which variables to restore, `slim.variables` provides a
+`restore` argument when creating each Variable. By default, all variables are
+marked as `restore=True`, which results in all variables defined by the model
+being restored.
+
+```python
+# Create some variables.
+v1 = slim.variables.variable(name="v1", ..., restore=False)
+v2 = slim.variables.variable(name="v2", ...) # By default restore=True
+...
+# Get list of variables to restore (which contains only 'v2')
+variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+restorer = tf.train.Saver(variables_to_restore)
+with tf.Session() as sess:
+  # Restore variables from disk.
+  restorer.restore(sess, "/tmp/model.ckpt")
+  print("Model restored.")
+  # Do some work with the model
+  ...
+```
+
+Additionally, every layer in `slim.ops` that creates slim.variables (such as
+`slim.ops.conv2d`, `slim.ops.fc`, `slim.ops.batch_norm`) also has a `restore`
+argument which controls whether the variables created by that layer should be
+restored or not.
+
+
+```python
+# Create a small network.
+net = slim.ops.conv2d(images, 32, [7, 7], stride=2, scope='conv1')
+net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
+net = slim.ops.conv2d(net, 128, [3, 3], scope='conv3')
+net = slim.ops.max_pool(net, [3, 3], stride=2, scope='pool3')
+net = slim.ops.flatten(net)
+net = slim.ops.fc(net, 10, scope='logits', restore=False)
+...
+
+# VARIABLES_TO_RESTORE would contain the 'weights' and 'bias' defined by 'conv1'
+# 'conv2' and 'conv3' but not the ones defined by 'logits'
+variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+
+# Create a restorer that would restore only the needed variables.
+restorer = tf.train.Saver(variables_to_restore)
+
+# Create a saver that would save all the variables (including 'logits').
+saver = tf.train.Saver()
+with tf.Session() as sess:
+  # Restore variables from disk.
+  restorer.restore(sess, "/tmp/model.ckpt")
+  print("Model restored.")
+
+  # Do some work with the model
+  ...
+  saver.save(sess, "/tmp/new_model.ckpt")
+```
+
+Note: When restoring variables from a checkpoint, the `Saver` locates the
+variable names in a checkpoint file and maps them to variables in the current
+graph. Above, we created a saver by passing to it a list of variables. In this
+case, the names of the variables to locate in the checkpoint file were
+implicitly obtained from each provided variable's `var.op.name`.
+
+This works well when the variable names in the checkpoint file match those in
+the graph. However, sometimes, we want to restore a model from a checkpoint
+whose variables have different names those in the current graph. In this case,
+we must provide the `Saver` a dictionary that maps from each checkpoint variable
+name to each graph variable. Consider the following example where the checkpoint
+variables names are obtained via a simple function:
+
+```python
+# Assuming than 'conv1/weights' should be restored from 'vgg16/conv1/weights'
+def name_in_checkpoint(var):
+  return 'vgg16/' + var.op.name
+
+# Assuming than 'conv1/weights' and 'conv1/bias' should be restored from 'conv1/params1' and 'conv1/params2'
+def name_in_checkpoint(var):
+  if "weights" in var.op.name:
+    return var.op.name.replace("weights", "params1")
+  if "bias" in var.op.name:
+    return var.op.name.replace("bias", "params2")
+
+variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore}
+restorer = tf.train.Saver(variables_to_restore)
+with tf.Session() as sess:
+  # Restore variables from disk.
+  restorer.restore(sess, "/tmp/model.ckpt")
+```
+
+### Reusing the VGG16 network defined in TF-Slim on a different task, i.e. PASCAL-VOC.
+
+Assuming one have already a pre-trained VGG16 model, one just need to replace
+the last layer `fc8` with a new layer `fc8_pascal` and use `restore=False`.
+
+```python
+def vgg16_pascal(inputs):
+  with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005):
+    net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool1')
+    net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool2')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool3')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool4')
+    net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5')
+    net = slim.ops.max_pool(net, [2, 2], scope='pool5')
+    net = slim.ops.flatten(net, scope='flatten5')
+    net = slim.ops.fc(net, 4096, scope='fc6')
+    net = slim.ops.dropout(net, 0.5, scope='dropout6')
+    net = slim.ops.fc(net, 4096, scope='fc7')
+    net = slim.ops.dropout(net, 0.5, scope='dropout7')
+    # To reuse vgg16 on PASCAL-VOC, just change the last layer.
+    net = slim.ops.fc(net, 21, activation=None, scope='fc8_pascal', restore=False)
+  return net
+```
+
+## Authors
+
+Sergio Guadarrama and Nathan Silberman

+ 329 - 0
inception/slim/inception_model.py

@@ -0,0 +1,329 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Inception-v3 expressed in TensorFlow-Slim.
+
+  Usage:
+
+  # Parameters for BatchNorm.
+  batch_norm_params = {
+      # Decay for the batch_norm moving averages.
+      'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
+      # epsilon to prevent 0s in variance.
+      'epsilon': 0.001,
+  }
+  # Set weight_decay for weights in Conv and FC layers.
+  with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
+    with slim.arg_scope([slim.ops.conv2d],
+                        stddev=0.1,
+                        activation=tf.nn.relu,
+                        batch_norm_params=batch_norm_params):
+      # Force all Variables to reside on the CPU.
+      with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
+        logits, endpoints = slim.inception.inception_v3(
+            images,
+            dropout_keep_prob=0.8,
+            num_classes=num_classes,
+            is_training=for_training,
+            restore_logits=restore_logits,
+            scope=scope)
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception.slim import ops
+from inception.slim import scopes
+
+
+def inception_v3(inputs,
+                 dropout_keep_prob=0.8,
+                 num_classes=1000,
+                 is_training=True,
+                 restore_logits=True,
+                 scope=''):
+  """Latest Inception from http://arxiv.org/abs/1512.00567.
+
+    "Rethinking the Inception Architecture for Computer Vision"
+
+    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
+    Zbigniew Wojna
+
+  Args:
+    inputs: a tensor of size [batch_size, height, width, channels].
+    dropout_keep_prob: dropout keep_prob.
+    num_classes: number of predicted classes.
+    is_training: whether is training or not.
+    restore_logits: whether or not the logits layers should be restored.
+      Useful for fine-tuning a model with different num_classes.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    a list containing 'logits', 'aux_logits' Tensors.
+  """
+  # end_points will collect relevant activations for external use, for example
+  # summaries or losses.
+  end_points = {}
+  with tf.op_scope([inputs], scope, 'inception_v3'):
+    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
+                          is_training=is_training):
+      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
+                            stride=1, padding='VALID'):
+        # 299 x 299 x 3
+        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
+                                         scope='conv0')
+        # 149 x 149 x 32
+        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
+                                         scope='conv1')
+        # 147 x 147 x 32
+        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
+                                         padding='SAME', scope='conv2')
+        # 147 x 147 x 64
+        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
+                                           stride=2, scope='pool1')
+        # 73 x 73 x 64
+        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
+                                         scope='conv3')
+        # 71 x 71 x 80.
+        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
+                                         scope='conv4')
+        # 69 x 69 x 192.
+        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
+                                           stride=2, scope='pool2')
+        # 35 x 35 x 192.
+        net = end_points['pool2']
+      # Inception blocks
+      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
+                            stride=1, padding='SAME'):
+        # mixed: 35 x 35 x 256.
+        with tf.variable_scope('mixed_35x35x256a'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 64, [1, 1])
+          with tf.variable_scope('branch5x5'):
+            branch5x5 = ops.conv2d(net, 48, [1, 1])
+            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
+          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
+          end_points['mixed_35x35x256a'] = net
+        # mixed_1: 35 x 35 x 288.
+        with tf.variable_scope('mixed_35x35x288a'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 64, [1, 1])
+          with tf.variable_scope('branch5x5'):
+            branch5x5 = ops.conv2d(net, 48, [1, 1])
+            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
+          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
+          end_points['mixed_35x35x288a'] = net
+        # mixed_2: 35 x 35 x 288.
+        with tf.variable_scope('mixed_35x35x288b'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 64, [1, 1])
+          with tf.variable_scope('branch5x5'):
+            branch5x5 = ops.conv2d(net, 48, [1, 1])
+            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
+          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
+          end_points['mixed_35x35x288b'] = net
+        # mixed_3: 17 x 17 x 768.
+        with tf.variable_scope('mixed_17x17x768a'):
+          with tf.variable_scope('branch3x3'):
+            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
+                                      stride=2, padding='VALID')
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
+          net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
+          end_points['mixed_17x17x768a'] = net
+        # mixed4: 17 x 17 x 768.
+        with tf.variable_scope('mixed_17x17x768b'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 192, [1, 1])
+          with tf.variable_scope('branch7x7'):
+            branch7x7 = ops.conv2d(net, 128, [1, 1])
+            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
+            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
+          with tf.variable_scope('branch7x7dbl'):
+            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
+          end_points['mixed_17x17x768b'] = net
+        # mixed_5: 17 x 17 x 768.
+        with tf.variable_scope('mixed_17x17x768c'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 192, [1, 1])
+          with tf.variable_scope('branch7x7'):
+            branch7x7 = ops.conv2d(net, 160, [1, 1])
+            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
+            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
+          with tf.variable_scope('branch7x7dbl'):
+            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
+          end_points['mixed_17x17x768c'] = net
+        # mixed_6: 17 x 17 x 768.
+        with tf.variable_scope('mixed_17x17x768d'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 192, [1, 1])
+          with tf.variable_scope('branch7x7'):
+            branch7x7 = ops.conv2d(net, 160, [1, 1])
+            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
+            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
+          with tf.variable_scope('branch7x7dbl'):
+            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
+          end_points['mixed_17x17x768d'] = net
+        # mixed_7: 17 x 17 x 768.
+        with tf.variable_scope('mixed_17x17x768e'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 192, [1, 1])
+          with tf.variable_scope('branch7x7'):
+            branch7x7 = ops.conv2d(net, 192, [1, 1])
+            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
+            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
+          with tf.variable_scope('branch7x7dbl'):
+            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
+            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
+          end_points['mixed_17x17x768e'] = net
+        # Auxiliary Head logits
+        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
+        with tf.variable_scope('aux_logits'):
+          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
+                                    padding='VALID')
+          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
+          # Shape of feature map before the final layer.
+          shape = aux_logits.get_shape()
+          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
+                                  padding='VALID')
+          aux_logits = ops.flatten(aux_logits)
+          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
+                              stddev=0.001, restore=restore_logits)
+          end_points['aux_logits'] = aux_logits
+        # mixed_8: 17 x 17 x 1280.
+        with tf.variable_scope('mixed_17x17x1280a'):
+          with tf.variable_scope('branch3x3'):
+            branch3x3 = ops.conv2d(net, 192, [1, 1])
+            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
+                                   padding='VALID')
+          with tf.variable_scope('branch7x7x3'):
+            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
+            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
+            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
+            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
+                                     stride=2, padding='VALID')
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
+          net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
+          end_points['mixed_17x17x1280a'] = net
+        # mixed_9: 8 x 8 x 2048.
+        with tf.variable_scope('mixed_8x8x2048a'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 320, [1, 1])
+          with tf.variable_scope('branch3x3'):
+            branch3x3 = ops.conv2d(net, 384, [1, 1])
+            branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
+                                      ops.conv2d(branch3x3, 384, [3, 1])])
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
+            branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
+                                         ops.conv2d(branch3x3dbl, 384, [3, 1])])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
+          end_points['mixed_8x8x2048a'] = net
+        # mixed_10: 8 x 8 x 2048.
+        with tf.variable_scope('mixed_8x8x2048b'):
+          with tf.variable_scope('branch1x1'):
+            branch1x1 = ops.conv2d(net, 320, [1, 1])
+          with tf.variable_scope('branch3x3'):
+            branch3x3 = ops.conv2d(net, 384, [1, 1])
+            branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
+                                      ops.conv2d(branch3x3, 384, [3, 1])])
+          with tf.variable_scope('branch3x3dbl'):
+            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
+            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
+            branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
+                                         ops.conv2d(branch3x3dbl, 384, [3, 1])])
+          with tf.variable_scope('branch_pool'):
+            branch_pool = ops.avg_pool(net, [3, 3])
+            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
+          net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
+          end_points['mixed_8x8x2048b'] = net
+        # Final pooling and prediction
+        with tf.variable_scope('logits'):
+          shape = net.get_shape()
+          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
+          # 1 x 1 x 2048
+          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
+          net = ops.flatten(net, scope='flatten')
+          # 2048
+          logits = ops.fc(net, num_classes, activation=None, scope='logits',
+                          restore=restore_logits)
+          # 1000
+          end_points['logits'] = logits
+          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
+      return logits, end_points
+

+ 119 - 0
inception/slim/inception_test.py

@@ -0,0 +1,119 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for slim.inception."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception.slim import inception_model as inception
+
+
+class InceptionTest(tf.test.TestCase):
+
+  def testBuildLogits(self):
+    batch_size = 5
+    height, width = 299, 299
+    num_classes = 1000
+    with self.test_session():
+      inputs = tf.random_uniform((batch_size, height, width, 3))
+      logits, _ = inception.inception_v3(inputs, num_classes)
+      self.assertTrue(logits.op.name.startswith('logits'))
+      self.assertListEqual(logits.get_shape().as_list(),
+                           [batch_size, num_classes])
+
+  def testBuildEndPoints(self):
+    batch_size = 5
+    height, width = 299, 299
+    num_classes = 1000
+    with self.test_session():
+      inputs = tf.random_uniform((batch_size, height, width, 3))
+      _, end_points = inception.inception_v3(inputs, num_classes)
+      self.assertTrue('logits' in end_points)
+      logits = end_points['logits']
+      self.assertListEqual(logits.get_shape().as_list(),
+                           [batch_size, num_classes])
+      self.assertTrue('aux_logits' in end_points)
+      aux_logits = end_points['aux_logits']
+      self.assertListEqual(aux_logits.get_shape().as_list(),
+                           [batch_size, num_classes])
+      pre_pool = end_points['mixed_8x8x2048b']
+      self.assertListEqual(pre_pool.get_shape().as_list(),
+                           [batch_size, 8, 8, 2048])
+
+  def testHalfSizeImages(self):
+    batch_size = 5
+    height, width = 150, 150
+    num_classes = 1000
+    with self.test_session():
+      inputs = tf.random_uniform((batch_size, height, width, 3))
+      logits, end_points = inception.inception_v3(inputs, num_classes)
+      self.assertTrue(logits.op.name.startswith('logits'))
+      self.assertListEqual(logits.get_shape().as_list(),
+                           [batch_size, num_classes])
+      pre_pool = end_points['mixed_8x8x2048b']
+      self.assertListEqual(pre_pool.get_shape().as_list(),
+                           [batch_size, 3, 3, 2048])
+
+  def testUnknowBatchSize(self):
+    batch_size = 1
+    height, width = 299, 299
+    num_classes = 1000
+    with self.test_session() as sess:
+      inputs = tf.placeholder(tf.float32, (None, height, width, 3))
+      logits, _ = inception.inception_v3(inputs, num_classes)
+      self.assertTrue(logits.op.name.startswith('logits'))
+      self.assertListEqual(logits.get_shape().as_list(),
+                           [None, num_classes])
+      images = tf.random_uniform((batch_size, height, width, 3))
+      sess.run(tf.initialize_all_variables())
+      output = sess.run(logits, {inputs: images.eval()})
+      self.assertEquals(output.shape, (batch_size, num_classes))
+
+  def testEvaluation(self):
+    batch_size = 2
+    height, width = 299, 299
+    num_classes = 1000
+    with self.test_session() as sess:
+      eval_inputs = tf.random_uniform((batch_size, height, width, 3))
+      logits, _ = inception.inception_v3(eval_inputs, num_classes,
+                                         is_training=False)
+      predictions = tf.argmax(logits, 1)
+      sess.run(tf.initialize_all_variables())
+      output = sess.run(predictions)
+      self.assertEquals(output.shape, (batch_size,))
+
+  def testTrainEvalWithReuse(self):
+    train_batch_size = 5
+    eval_batch_size = 2
+    height, width = 150, 150
+    num_classes = 1000
+    with self.test_session() as sess:
+      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
+      inception.inception_v3(train_inputs, num_classes)
+      tf.get_variable_scope().reuse_variables()
+      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
+      logits, _ = inception.inception_v3(eval_inputs, num_classes,
+                                         is_training=False)
+      predictions = tf.argmax(logits, 1)
+      sess.run(tf.initialize_all_variables())
+      output = sess.run(predictions)
+      self.assertEquals(output.shape, (eval_batch_size,))
+
+
+if __name__ == '__main__':
+  tf.test.main()

+ 110 - 0
inception/slim/losses.py

@@ -0,0 +1,110 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Contains convenience wrappers for various Neural Network TensorFlow losses.
+
+  All the losses defined here add themselves to the LOSSES_COLLECTION
+  collection.
+
+  l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso.
+  l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay.
+  cross_entropy_loss: Define a cross entropy loss using
+    softmax_cross_entropy_with_logits. Useful for classification.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+# In order to gather all losses in a network, the user should use this
+# key for get_collection, i.e:
+#   losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
+LOSSES_COLLECTION = '_losses'
+
+
+def l1_loss(tensor, weight=1.0, scope=None):
+  """Define a L1Loss, useful for regularize, i.e. lasso.
+
+  Args:
+    tensor: tensor to regularize.
+    weight: scale the loss by this factor.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    the L1 loss op.
+  """
+  with tf.op_scope([tensor], scope, 'L1Loss'):
+    weight = tf.convert_to_tensor(weight,
+                                  dtype=tensor.dtype.base_dtype,
+                                  name='loss_weight')
+    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
+    tf.add_to_collection(LOSSES_COLLECTION, loss)
+    return loss
+
+
+def l2_loss(tensor, weight=1.0, scope=None):
+  """Define a L2Loss, useful for regularize, i.e. weight decay.
+
+  Args:
+    tensor: tensor to regularize.
+    weight: an optional weight to modulate the loss.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    the L2 loss op.
+  """
+  with tf.op_scope([tensor], scope, 'L2Loss'):
+    weight = tf.convert_to_tensor(weight,
+                                  dtype=tensor.dtype.base_dtype,
+                                  name='loss_weight')
+    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
+    tf.add_to_collection(LOSSES_COLLECTION, loss)
+    return loss
+
+
+def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0,
+                       weight=1.0, scope=None):
+  """Define a Cross Entropy loss using softmax_cross_entropy_with_logits.
+
+  It can scale the loss by weight factor, and smooth the labels.
+
+  Args:
+    logits: [batch_size, num_classes] logits outputs of the network .
+    one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
+    label_smoothing: if greater than 0 then smooth the labels.
+    weight: scale the loss by this factor.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    A tensor with the softmax_cross_entropy loss.
+  """
+  logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())
+  with tf.op_scope([logits, one_hot_labels], scope, 'CrossEntropyLoss'):
+    num_classes = one_hot_labels.get_shape()[-1].value
+    one_hot_labels = tf.cast(one_hot_labels, logits.dtype)
+    if label_smoothing > 0:
+      smooth_positives = 1.0 - label_smoothing
+      smooth_negatives = label_smoothing / num_classes
+      one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
+    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
+                                                            one_hot_labels,
+                                                            name='xentropy')
+    weight = tf.convert_to_tensor(weight,
+                                  dtype=logits.dtype.base_dtype,
+                                  name='loss_weight')
+    loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
+    tf.add_to_collection(LOSSES_COLLECTION, loss)
+    return loss

+ 89 - 0
inception/slim/losses_test.py

@@ -0,0 +1,89 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for slim.losses."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import tensorflow as tf
+
+from inception.slim import losses
+
+
+class LossesTest(tf.test.TestCase):
+
+  def testL1Loss(self):
+    with self.test_session():
+      shape = [5, 5, 5]
+      num_elem = 5 * 5 * 5
+      weights = tf.constant(1.0, shape=shape)
+      wd = 0.01
+      loss = losses.l1_loss(weights, wd)
+      self.assertEquals(loss.op.name, 'L1Loss/value')
+      self.assertAlmostEqual(loss.eval(), num_elem * wd, 5)
+
+  def testL2Loss(self):
+    with self.test_session():
+      shape = [5, 5, 5]
+      num_elem = 5 * 5 * 5
+      weights = tf.constant(1.0, shape=shape)
+      wd = 0.01
+      loss = losses.l2_loss(weights, wd)
+      self.assertEquals(loss.op.name, 'L2Loss/value')
+      self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5)
+
+
+class CrossEntropyLossTest(tf.test.TestCase):
+
+  def testCrossEntropyLossAllCorrect(self):
+    with self.test_session():
+      logits = tf.constant([[10.0, 0.0, 0.0],
+                            [0.0, 10.0, 0.0],
+                            [0.0, 0.0, 10.0]])
+      labels = tf.constant([[1, 0, 0],
+                            [0, 1, 0],
+                            [0, 0, 1]])
+      loss = losses.cross_entropy_loss(logits, labels)
+      self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
+      self.assertAlmostEqual(loss.eval(), 0.0, 3)
+
+  def testCrossEntropyLossAllWrong(self):
+    with self.test_session():
+      logits = tf.constant([[10.0, 0.0, 0.0],
+                            [0.0, 10.0, 0.0],
+                            [0.0, 0.0, 10.0]])
+      labels = tf.constant([[0, 0, 1],
+                            [1, 0, 0],
+                            [0, 1, 0]])
+      loss = losses.cross_entropy_loss(logits, labels)
+      self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
+      self.assertAlmostEqual(loss.eval(), 10.0, 3)
+
+  def testCrossEntropyLossAllWrongWithWeight(self):
+    with self.test_session():
+      logits = tf.constant([[10.0, 0.0, 0.0],
+                            [0.0, 10.0, 0.0],
+                            [0.0, 0.0, 10.0]])
+      labels = tf.constant([[0, 0, 1],
+                            [1, 0, 0],
+                            [0, 1, 0]])
+      loss = losses.cross_entropy_loss(logits, labels, weight=0.5)
+      self.assertEquals(loss.op.name, 'CrossEntropyLoss/value')
+      self.assertAlmostEqual(loss.eval(), 5.0, 3)
+
+if __name__ == '__main__':
+  tf.test.main()

+ 418 - 0
inception/slim/ops.py

@@ -0,0 +1,418 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
+
+   Additionally it maintains a collection with update_ops that need to be
+   updated after the ops have been computed, for exmaple to update moving means
+   and moving variances of batch_norm.
+
+   Ops that have different behavior during training or eval have an is_training
+   parameter. Additionally Ops that contain variables.variable have a trainable
+   parameter, which control if the ops variables are trainable or not.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import tensorflow as tf
+
+from tensorflow.python.training import moving_averages
+
+from inception.slim import losses
+from inception.slim import scopes
+from inception.slim import variables
+
+# Used to keep the update ops done by batch_norm.
+UPDATE_OPS_COLLECTION = '_update_ops_'
+
+
+@scopes.add_arg_scope
+def batch_norm(inputs,
+               decay=0.999,
+               scale=False,
+               epsilon=0.001,
+               moving_vars='moving_vars',
+               activation=None,
+               is_training=True,
+               trainable=True,
+               restore=True,
+               scope=None):
+  """Adds a Batch Normalization layer.
+
+  Args:
+    inputs: a tensor of size [batch_size, height, width, channels]
+            or [batch_size, channels].
+    decay: decay for the moving average.
+    scale: If True, multiply by gamma. If False, gamma is
+      not used. When the next layer is linear (also e.g. ReLU), this can be
+      disabled since the scaling can be done by the next layer.
+    epsilon: small float added to variance to avoid dividing by zero.
+    moving_vars: collection to store the moving_mean and moving_variance.
+    activation: activation function.
+    is_training: whether or not the model is in training mode.
+    trainable: whether or not the variables should be trainable or not.
+    restore: whether or not the variables should be marked for restore.
+    scope: Optional scope for variable_op_scope.
+
+  Returns:
+    a tensor representing the output of the operation.
+
+  """
+  inputs_shape = inputs.get_shape()
+  with tf.variable_op_scope([inputs], scope, 'BatchNorm'):
+    axis = range(len(inputs_shape) - 1)
+    params_shape = inputs_shape[-1:]
+    with scopes.arg_scope([variables.variable], restore=restore):
+      # Allocate parameters for the beta and gamma of the normalization.
+      beta = variables.variable('beta',
+                                params_shape,
+                                initializer=tf.zeros_initializer,
+                                trainable=trainable)
+      if scale:
+        gamma = variables.variable('gamma',
+                                   params_shape,
+                                   initializer=tf.ones,
+                                   trainable=trainable)
+      else:
+        gamma = None
+      # Create moving_mean and moving_variance add them to moving_vars and
+      # GraphKeys.MOVING_AVERAGE_VARIABLES collections.
+      with scopes.arg_scope([variables.variable], trainable=False,
+                            collections=[
+                                moving_vars,
+                                tf.GraphKeys.MOVING_AVERAGE_VARIABLES]):
+        moving_mean = variables.variable('moving_mean',
+                                         params_shape,
+                                         initializer=tf.zeros_initializer)
+        moving_variance = variables.variable('moving_variance',
+                                             params_shape,
+                                             initializer=tf.ones)
+    if is_training:
+      # Calculate the moments based on the individual batch.
+      mean, variance = tf.nn.moments(inputs, axis)
+
+      update_moving_mean = moving_averages.assign_moving_average(
+          moving_mean, mean, decay)
+      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
+      update_moving_variance = moving_averages.assign_moving_average(
+          moving_variance, variance, decay)
+      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
+    else:
+      # Just use the moving_mean and moving_variance.
+      mean = moving_mean
+      variance = moving_variance
+    # Normalize the activations.
+    outputs = tf.nn.batch_normalization(
+        inputs, mean, variance, beta, gamma, epsilon)
+    outputs.set_shape(inputs.get_shape())
+    if activation:
+      outputs = activation(outputs)
+    return outputs
+
+
+@scopes.add_arg_scope
+def conv2d(inputs,
+           num_filters_out,
+           kernel_size,
+           stride=1,
+           padding='SAME',
+           activation=tf.nn.relu,
+           stddev=0.01,
+           bias=0.0,
+           weight_decay=0,
+           batch_norm_params=None,
+           is_training=True,
+           trainable=True,
+           restore=True,
+           scope=None):
+  """Adds a 2D convolution followed by an optional batch_norm layer.
+
+  conv2d creates a variable called 'weights', representing the convolutional
+  kernel, that is convolved with the input. If `batch_norm_params` is None, a
+  second variable called 'biases' is added to the result of the convolution
+  operation.
+
+  Args:
+    inputs: a tensor of size [batch_size, height, width, channels].
+    num_filters_out: the number of output filters.
+    kernel_size: a 2-D list comprising of the height and width of the filters.
+    stride: the stride in height and width of the convolution.
+    padding: one of 'VALID' or 'SAME'.
+    activation: activation function.
+    stddev: standard deviation of the truncated guassian weight distribution.
+    bias: the initial value of the biases.
+    weight_decay: the weight decay.
+    batch_norm_params: parameters for the batch_norm. If is None don't use it.
+    is_training: whether or not the model is in training mode.
+    trainable: whether or not the variables should be trainable or not.
+    restore: whether or not the variables should be marked for restore.
+    scope: Optional scope for variable_op_scope.
+
+  Returns:
+    a tensor representing the output of the operation.
+
+  Raises:
+    ValueError: if 'kernel_size' is not a 2-D list.
+  """
+  if len(kernel_size) != 2:
+    raise ValueError('kernel_size must be a 2-D list.')
+  with tf.variable_op_scope([inputs], scope, 'Conv'):
+    num_filters_in = inputs.get_shape()[-1]
+    weights_shape = [kernel_size[0], kernel_size[1],
+                     num_filters_in, num_filters_out]
+    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
+    l2_regularizer = lambda t: losses.l2_loss(t, weight_decay)
+    weights = variables.variable('weights',
+                                 shape=weights_shape,
+                                 initializer=weights_initializer,
+                                 regularizer=l2_regularizer,
+                                 trainable=trainable,
+                                 restore=restore)
+    conv = tf.nn.conv2d(inputs, weights, [1, stride, stride, 1],
+                        padding=padding)
+    if batch_norm_params is not None:
+      with scopes.arg_scope([batch_norm], is_training=is_training,
+                            trainable=trainable, restore=restore):
+        outputs = batch_norm(conv, **batch_norm_params)
+    else:
+      bias_shape = [num_filters_out,]
+      bias_initializer = tf.constant_initializer(bias)
+      biases = variables.variable('biases',
+                                  shape=bias_shape,
+                                  initializer=bias_initializer,
+                                  trainable=trainable,
+                                  restore=restore)
+      outputs = tf.nn.bias_add(conv, biases)
+    if activation:
+      outputs = activation(outputs)
+    return outputs
+
+
+@scopes.add_arg_scope
+def fc(inputs,
+       num_units_out,
+       activation=tf.nn.relu,
+       stddev=0.01,
+       bias=0.0,
+       weight_decay=0,
+       batch_norm_params=None,
+       is_training=True,
+       trainable=True,
+       restore=True,
+       scope=None):
+  """Adds a fully connected layer followed by an optional batch_norm layer.
+
+  FC creates a variable called 'weights', representing the fully connected
+  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
+  second variable called 'biases' is added to the result of the initial
+  vector-matrix multiplication.
+
+  Args:
+    inputs: a [B x N] tensor where B is the batch size and N is the number of
+            input units in the layer.
+    num_units_out: the number of output units in the layer.
+    activation: activation function.
+    stddev: the standard deviation for the weights.
+    bias: the initial value of the biases.
+    weight_decay: the weight decay.
+    batch_norm_params: parameters for the batch_norm. If is None don't use it.
+    is_training: whether or not the model is in training mode.
+    trainable: whether or not the variables should be trainable or not.
+    restore: whether or not the variables should be marked for restore.
+    scope: Optional scope for variable_op_scope.
+
+  Returns:
+     the tensor variable representing the result of the series of operations.
+  """
+  with tf.variable_op_scope([inputs], scope, 'FC'):
+    num_units_in = inputs.get_shape()[1]
+    weights_shape = [num_units_in, num_units_out]
+    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
+    l2_regularizer = lambda t: losses.l2_loss(t, weight_decay)
+    weights = variables.variable('weights',
+                                 shape=weights_shape,
+                                 initializer=weights_initializer,
+                                 regularizer=l2_regularizer,
+                                 trainable=trainable,
+                                 restore=restore)
+    if batch_norm_params is not None:
+      outputs = tf.matmul(inputs, weights)
+      with scopes.arg_scope([batch_norm], is_training=is_training,
+                            trainable=trainable, restore=restore):
+        outputs = batch_norm(outputs, **batch_norm_params)
+    else:
+      bias_shape = [num_units_out,]
+      bias_initializer = tf.constant_initializer(bias)
+      biases = variables.variable('biases',
+                                  shape=bias_shape,
+                                  initializer=bias_initializer,
+                                  trainable=trainable,
+                                  restore=restore)
+      outputs = tf.nn.xw_plus_b(inputs, weights, biases)
+    if activation:
+      outputs = activation(outputs)
+    return outputs
+
+
+def one_hot_encoding(labels, num_classes, scope=None):
+  """Transform numeric labels into onehot_labels.
+
+  Args:
+    labels: [batch_size] target labels.
+    num_classes: total number of classes.
+    scope: Optional scope for op_scope.
+  Returns:
+    one hot encoding of the labels.
+  """
+  with tf.op_scope([labels], scope, 'OneHotEncoding'):
+    batch_size = labels.get_shape()[0]
+    indices = tf.expand_dims(tf.range(0, batch_size), 1)
+    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
+    concated = tf.concat(1, [indices, labels])
+    onehot_labels = tf.sparse_to_dense(
+        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
+    onehot_labels.set_shape([batch_size, num_classes])
+    return onehot_labels
+
+
+@scopes.add_arg_scope
+def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
+  """Adds a Max Pooling layer.
+
+  It is assumed by the wrapper that the pooling is only done per image and not
+  in depth or batch.
+
+  Args:
+    inputs: a tensor of size [batch_size, height, width, depth].
+    kernel_size: the size of the pooling kernel over which the op is computed.
+    stride: the stride in height and width of the convolution.
+    padding: the padding method, either 'VALID' or 'SAME'.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    a tensor representing the results of the pooling operation.
+  Raises:
+    ValueError: if 'kernel_size' is not a 2-D list
+  """
+  if len(kernel_size) != 2:
+    raise ValueError('kernel_size must be a 2-D list.')
+  with tf.op_scope([inputs], scope, 'MaxPool'):
+    return tf.nn.max_pool(inputs,
+                          ksize=[1, kernel_size[0], kernel_size[1], 1],
+                          strides=[1, stride, stride, 1],
+                          padding=padding)
+
+
+@scopes.add_arg_scope
+def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
+  """Adds a Avg Pooling layer.
+
+  It is assumed by the wrapper that the pooling is only done per image and not
+  in depth or batch.
+
+  Args:
+    inputs: a tensor of size [batch_size, height, width, depth].
+    kernel_size: the size of the pooling kernel over which the op is computed.
+    stride: the stride in height and width of the convolution.
+    padding: the padding method, either 'VALID' or 'SAME'.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    a tensor representing the results of the pooling operation.
+  Raises:
+    ValueError: if 'kernel_size' is not a 2-D list
+  """
+  if len(kernel_size) != 2:
+    raise ValueError('kernel_size must be a 2-D list.')
+  with tf.op_scope([inputs], scope, 'AvgPool'):
+    return tf.nn.avg_pool(inputs,
+                          ksize=[1, kernel_size[0], kernel_size[1], 1],
+                          strides=[1, stride, stride, 1],
+                          padding=padding)
+
+
+@scopes.add_arg_scope
+def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
+  """Returns a dropout layer applied to the input.
+
+  Args:
+    inputs: the tensor to pass to the Dropout layer.
+    keep_prob: the probability of dropping each input unit.
+    is_training: whether or not the model is in training mode. If so, dropout is
+    applied and values scaled. Otherwise, inputs is returned.
+    scope: Optional scope for op_scope.
+
+  Returns:
+    a tensor representing the output of the operation.
+  """
+  if is_training and keep_prob > 0:
+    with tf.op_scope([inputs], scope, 'Dropout'):
+      return tf.nn.dropout(inputs, keep_prob)
+  else:
+    return inputs
+
+
+def flatten(inputs, scope=None):
+  """Flattens the input while maintaining the batch_size.
+
+    Assumes that the first dimension represents the batch.
+
+  Args:
+    inputs: a tensor of size [batch_size, ...].
+    scope: Optional scope for op_scope.
+
+  Returns:
+    a flattened tensor with shape [batch_size, k].
+  Raises:
+    ValueError: if inputs.shape is wrong.
+  """
+  if len(inputs.get_shape()) < 2:
+    raise ValueError('Inputs must be have a least 2 dimensions')
+  dims = inputs.get_shape()[1:]
+  k = dims.num_elements()
+  with tf.op_scope([inputs], scope, 'Flatten'):
+    return tf.reshape(inputs, [-1, k])
+
+
+def repeat_op(repetitions, inputs, op, *args, **kwargs):
+  """Build a sequential Tower starting from inputs by using an op repeatedly.
+
+  It creates new scopes for each operation by increasing the counter.
+  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
+    it will repeat the given op under the following variable_scopes:
+      conv1/Conv
+      conv1/Conv_1
+      conv1/Conv_2
+
+  Args:
+    repetitions: number or repetitions.
+    inputs: a tensor of size [batch_size, height, width, channels].
+    op: an operation.
+    *args: args for the op.
+    **kwargs: kwargs for the op.
+
+  Returns:
+    a tensor result of applying the operation op, num times.
+  Raises:
+    ValueError: if the op is unknown or wrong.
+  """
+  scope = kwargs.pop('scope', None)
+  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
+    tower = inputs
+    for _ in range(repetitions):
+      tower = op(tower, *args, **kwargs)
+    return tower

+ 510 - 0
inception/slim/ops_test.py

@@ -0,0 +1,510 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for slim.ops."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.ops import control_flow_ops
+
+from inception.slim import losses
+from inception.slim import ops
+from inception.slim import scopes
+from inception.slim import variables
+
+
+class ConvTest(tf.test.TestCase):
+
+  def testCreateConv(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.conv2d(images, 32, [3, 3])
+      self.assertEquals(output.op.name, 'Conv/Relu')
+      self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
+
+  def testCreateConvCreatesWeightsAndBiasesVars(self):
+    height, width = 3, 3
+    images = tf.random_uniform((5, height, width, 3), seed=1)
+    with self.test_session():
+      self.assertFalse(variables.get_variables('conv1/weights'))
+      self.assertFalse(variables.get_variables('conv1/biases'))
+      ops.conv2d(images, 32, [3, 3], scope='conv1')
+      self.assertTrue(variables.get_variables('conv1/weights'))
+      self.assertTrue(variables.get_variables('conv1/biases'))
+
+  def testCreateConvWithScope(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.conv2d(images, 32, [3, 3], scope='conv1')
+      self.assertEquals(output.op.name, 'conv1/Relu')
+
+  def testCreateConvWithoutActivation(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.conv2d(images, 32, [3, 3], activation=None)
+      self.assertEquals(output.op.name, 'Conv/BiasAdd')
+
+  def testCreateConvValid(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.conv2d(images, 32, [3, 3], padding='VALID')
+      self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
+
+  def testCreateConvWithWD(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.conv2d(images, 32, [3, 3], weight_decay=0.01)
+      wd = tf.get_collection(losses.LOSSES_COLLECTION)[0]
+      self.assertEquals(wd.op.name, 'Conv/weights/Regularizer/L2Loss/value')
+      sess.run(tf.initialize_all_variables())
+      self.assertTrue(sess.run(wd) <= 0.01)
+
+  def testReuseConvWithWD(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
+      self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1)
+      tf.get_variable_scope().reuse_variables()
+      ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
+      self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1)
+
+  def testConvWithBatchNorm(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      with scopes.arg_scope([ops.conv2d], batch_norm_params={}):
+        net = ops.conv2d(images, 32, [3, 3], scope='conv1')
+        net = ops.conv2d(net, 32, [3, 3], scope='conv2')
+      self.assertEquals(len(tf.get_collection('moving_vars')), 4)
+      self.assertEquals(len(variables.get_variables('conv1/BatchNorm')), 3)
+      self.assertEquals(len(variables.get_variables('conv2/BatchNorm')), 3)
+
+
+class FCTest(tf.test.TestCase):
+
+  def testCreateFC(self):
+    height, width = 3, 3
+    with self.test_session():
+      inputs = tf.random_uniform((5, height * width * 3), seed=1)
+      output = ops.fc(inputs, 32)
+      self.assertEquals(output.op.name, 'FC/Relu')
+      self.assertListEqual(output.get_shape().as_list(), [5, 32])
+
+  def testCreateFCWithScope(self):
+    height, width = 3, 3
+    with self.test_session():
+      inputs = tf.random_uniform((5, height * width * 3), seed=1)
+      output = ops.fc(inputs, 32, scope='fc1')
+      self.assertEquals(output.op.name, 'fc1/Relu')
+
+  def testCreateFcCreatesWeightsAndBiasesVars(self):
+    height, width = 3, 3
+    inputs = tf.random_uniform((5, height * width * 3), seed=1)
+    with self.test_session():
+      self.assertFalse(variables.get_variables('fc1/weights'))
+      self.assertFalse(variables.get_variables('fc1/biases'))
+      ops.fc(inputs, 32, scope='fc1')
+      self.assertTrue(variables.get_variables('fc1/weights'))
+      self.assertTrue(variables.get_variables('fc1/biases'))
+
+  def testReuseVars(self):
+    height, width = 3, 3
+    inputs = tf.random_uniform((5, height * width * 3), seed=1)
+    with self.test_session():
+      ops.fc(inputs, 32, scope='fc1')
+      self.assertEquals(len(variables.get_variables('fc1')), 2)
+      tf.get_variable_scope().reuse_variables()
+      ops.fc(inputs, 32, scope='fc1')
+      self.assertEquals(len(variables.get_variables('fc1')), 2)
+
+  def testNonReuseVars(self):
+    height, width = 3, 3
+    inputs = tf.random_uniform((5, height * width * 3), seed=1)
+    with self.test_session():
+      ops.fc(inputs, 32)
+      self.assertEquals(len(variables.get_variables('FC')), 2)
+      ops.fc(inputs, 32)
+      self.assertEquals(len(variables.get_variables('FC')), 4)
+
+  def testCreateFCWithoutActivation(self):
+    height, width = 3, 3
+    with self.test_session():
+      inputs = tf.random_uniform((5, height * width * 3), seed=1)
+      output = ops.fc(inputs, 32, activation=None)
+      self.assertEquals(output.op.name, 'FC/xw_plus_b')
+
+  def testCreateFCWithWD(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      inputs = tf.random_uniform((5, height * width * 3), seed=1)
+      ops.fc(inputs, 32, weight_decay=0.01)
+      wd = tf.get_collection(losses.LOSSES_COLLECTION)[0]
+      self.assertEquals(wd.op.name, 'FC/weights/Regularizer/L2Loss/value')
+      sess.run(tf.initialize_all_variables())
+      self.assertTrue(sess.run(wd) <= 0.01)
+
+  def testReuseFCWithWD(self):
+    height, width = 3, 3
+    with self.test_session():
+      inputs = tf.random_uniform((5, height * width * 3), seed=1)
+      ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
+      self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1)
+      tf.get_variable_scope().reuse_variables()
+      ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
+      self.assertEquals(len(tf.get_collection(losses.LOSSES_COLLECTION)), 1)
+
+  def testFCWithBatchNorm(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height * width * 3), seed=1)
+      with scopes.arg_scope([ops.fc], batch_norm_params={}):
+        net = ops.fc(images, 32, scope='fc1')
+        net = ops.fc(net, 32, scope='fc2')
+      self.assertEquals(len(tf.get_collection('moving_vars')), 4)
+      self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3)
+      self.assertEquals(len(variables.get_variables('fc2/BatchNorm')), 3)
+
+
+class MaxPoolTest(tf.test.TestCase):
+
+  def testCreateMaxPool(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.max_pool(images, [3, 3])
+      self.assertEquals(output.op.name, 'MaxPool/MaxPool')
+      self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
+
+  def testCreateMaxPoolWithScope(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.max_pool(images, [3, 3], scope='pool1')
+      self.assertEquals(output.op.name, 'pool1/MaxPool')
+
+  def testCreateMaxPoolSAME(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.max_pool(images, [3, 3], padding='SAME')
+      self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
+
+  def testCreateMaxPoolStrideSAME(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
+      self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
+
+
+class AvgPoolTest(tf.test.TestCase):
+
+  def testCreateAvgPool(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.avg_pool(images, [3, 3])
+      self.assertEquals(output.op.name, 'AvgPool/AvgPool')
+      self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
+
+  def testCreateAvgPoolWithScope(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.avg_pool(images, [3, 3], scope='pool1')
+      self.assertEquals(output.op.name, 'pool1/AvgPool')
+
+  def testCreateAvgPoolSAME(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.avg_pool(images, [3, 3], padding='SAME')
+      self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
+
+  def testCreateAvgPoolStrideSAME(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME')
+      self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
+
+
+class OneHotEncodingTest(tf.test.TestCase):
+
+  def testOneHotEncodingCreate(self):
+    with self.test_session():
+      labels = tf.constant([0, 1, 2])
+      output = ops.one_hot_encoding(labels, num_classes=3)
+      self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense')
+      self.assertListEqual(output.get_shape().as_list(), [3, 3])
+
+  def testOneHotEncoding(self):
+    with self.test_session():
+      labels = tf.constant([0, 1, 2])
+      one_hot_labels = tf.constant([[1, 0, 0],
+                                    [0, 1, 0],
+                                    [0, 0, 1]])
+      output = ops.one_hot_encoding(labels, num_classes=3)
+      self.assertAllClose(output.eval(), one_hot_labels.eval())
+
+
+class DropoutTest(tf.test.TestCase):
+
+  def testCreateDropout(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.dropout(images)
+      self.assertEquals(output.op.name, 'Dropout/dropout/mul_1')
+      output.get_shape().assert_is_compatible_with(images.get_shape())
+
+  def testCreateDropoutNoTraining(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
+      output = ops.dropout(images, is_training=False)
+      self.assertEquals(output, images)
+
+
+class FlattenTest(tf.test.TestCase):
+
+  def testFlatten4D(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
+      output = ops.flatten(images)
+      self.assertEquals(output.get_shape().num_elements(),
+                        images.get_shape().num_elements())
+      self.assertEqual(output.get_shape()[0], images.get_shape()[0])
+
+  def testFlatten3D(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width), seed=1, name='images')
+      output = ops.flatten(images)
+      self.assertEquals(output.get_shape().num_elements(),
+                        images.get_shape().num_elements())
+      self.assertEqual(output.get_shape()[0], images.get_shape()[0])
+
+  def testFlattenBatchSize(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
+      inputs = tf.placeholder(tf.int32, (None, height, width, 3))
+      output = ops.flatten(inputs)
+      self.assertEquals(output.get_shape().as_list(),
+                        [None, height * width * 3])
+      output = sess.run(output, {inputs: images.eval()})
+      self.assertEquals(output.size,
+                        images.get_shape().num_elements())
+      self.assertEqual(output.shape[0], images.get_shape()[0])
+
+
+class BatchNormTest(tf.test.TestCase):
+
+  def testCreateOp(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      output = ops.batch_norm(images)
+      self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
+      self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
+
+  def testCreateVariables(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.batch_norm(images, scale=True)
+      beta = variables.get_variables_by_name('beta')[0]
+      gamma = variables.get_variables_by_name('gamma')[0]
+      self.assertEquals(beta.op.name, 'BatchNorm/beta')
+      self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
+      moving_mean = tf.get_collection('moving_vars')[0]
+      moving_variance = tf.get_collection('moving_vars')[1]
+      self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
+      self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
+
+  def testMovingAverageVariables(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.batch_norm(images, scale=True)
+      moving_mean = tf.moving_average_variables()[0]
+      moving_variance = tf.moving_average_variables()[1]
+      self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
+      self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
+
+  def testUpdateOps(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.batch_norm(images)
+      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
+      update_moving_mean = update_ops[0]
+      update_moving_variance = update_ops[1]
+      self.assertEquals(update_moving_mean.op.name,
+                        'BatchNorm/AssignMovingAvg')
+      self.assertEquals(update_moving_variance.op.name,
+                        'BatchNorm/AssignMovingAvg_1')
+
+  def testReuseVariables(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.batch_norm(images, scale=True, scope='bn')
+      tf.get_variable_scope().reuse_variables()
+      ops.batch_norm(images, scale=True, scope='bn')
+      beta = variables.get_variables_by_name('beta')
+      gamma = variables.get_variables_by_name('gamma')
+      self.assertEquals(len(beta), 1)
+      self.assertEquals(len(gamma), 1)
+      moving_vars = tf.get_collection('moving_vars')
+      self.assertEquals(len(moving_vars), 2)
+
+  def testReuseUpdateOps(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      ops.batch_norm(images, scope='bn')
+      self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
+      tf.get_variable_scope().reuse_variables()
+      ops.batch_norm(images, scope='bn')
+      self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
+
+  def testCreateMovingVars(self):
+    height, width = 3, 3
+    with self.test_session():
+      images = tf.random_uniform((5, height, width, 3), seed=1)
+      _ = ops.batch_norm(images, moving_vars='moving_vars')
+      moving_mean = tf.get_collection('moving_vars',
+                                      'BatchNorm/moving_mean')
+      self.assertEquals(len(moving_mean), 1)
+      self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
+      moving_variance = tf.get_collection('moving_vars',
+                                          'BatchNorm/moving_variance')
+      self.assertEquals(len(moving_variance), 1)
+      self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
+
+  def testComputeMovingVars(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      image_shape = (10, height, width, 3)
+      image_values = np.random.rand(*image_shape)
+      expected_mean = np.mean(image_values, axis=(0, 1, 2))
+      expected_var = np.var(image_values, axis=(0, 1, 2))
+      images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
+      output = ops.batch_norm(images, decay=0.1)
+      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
+      with tf.control_dependencies(update_ops):
+        barrier = tf.no_op(name='gradient_barrier')
+        output = control_flow_ops.with_dependencies([barrier], output)
+      # Initialize all variables
+      sess.run(tf.initialize_all_variables())
+      moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
+      moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
+      mean, variance = sess.run([moving_mean, moving_variance])
+      # After initialization moving_mean == 0 and moving_variance == 1.
+      self.assertAllClose(mean, [0] * 3)
+      self.assertAllClose(variance, [1] * 3)
+      for _ in range(10):
+        sess.run([output])
+      mean = moving_mean.eval()
+      variance = moving_variance.eval()
+      # After 10 updates with decay 0.1 moving_mean == expected_mean and
+      # moving_variance == expected_var.
+      self.assertAllClose(mean, expected_mean)
+      self.assertAllClose(variance, expected_var)
+
+  def testEvalMovingVars(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      image_shape = (10, height, width, 3)
+      image_values = np.random.rand(*image_shape)
+      expected_mean = np.mean(image_values, axis=(0, 1, 2))
+      expected_var = np.var(image_values, axis=(0, 1, 2))
+      images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
+      output = ops.batch_norm(images, decay=0.1, is_training=False)
+      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
+      with tf.control_dependencies(update_ops):
+        barrier = tf.no_op(name='gradient_barrier')
+        output = control_flow_ops.with_dependencies([barrier], output)
+      # Initialize all variables
+      sess.run(tf.initialize_all_variables())
+      moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
+      moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
+      mean, variance = sess.run([moving_mean, moving_variance])
+      # After initialization moving_mean == 0 and moving_variance == 1.
+      self.assertAllClose(mean, [0] * 3)
+      self.assertAllClose(variance, [1] * 3)
+      # Simulate assigment from saver restore.
+      init_assigns = [tf.assign(moving_mean, expected_mean),
+                      tf.assign(moving_variance, expected_var)]
+      sess.run(init_assigns)
+      for _ in range(10):
+        sess.run([output], {images: np.random.rand(*image_shape)})
+      mean = moving_mean.eval()
+      variance = moving_variance.eval()
+      # Although we feed different images, the moving_mean and moving_variance
+      # shouldn't change.
+      self.assertAllClose(mean, expected_mean)
+      self.assertAllClose(variance, expected_var)
+
+  def testReuseVars(self):
+    height, width = 3, 3
+    with self.test_session() as sess:
+      image_shape = (10, height, width, 3)
+      image_values = np.random.rand(*image_shape)
+      expected_mean = np.mean(image_values, axis=(0, 1, 2))
+      expected_var = np.var(image_values, axis=(0, 1, 2))
+      images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
+      output = ops.batch_norm(images, decay=0.1, is_training=False)
+      update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
+      with tf.control_dependencies(update_ops):
+        barrier = tf.no_op(name='gradient_barrier')
+        output = control_flow_ops.with_dependencies([barrier], output)
+      # Initialize all variables
+      sess.run(tf.initialize_all_variables())
+      moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
+      moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
+      mean, variance = sess.run([moving_mean, moving_variance])
+      # After initialization moving_mean == 0 and moving_variance == 1.
+      self.assertAllClose(mean, [0] * 3)
+      self.assertAllClose(variance, [1] * 3)
+      # Simulate assigment from saver restore.
+      init_assigns = [tf.assign(moving_mean, expected_mean),
+                      tf.assign(moving_variance, expected_var)]
+      sess.run(init_assigns)
+      for _ in range(10):
+        sess.run([output], {images: np.random.rand(*image_shape)})
+      mean = moving_mean.eval()
+      variance = moving_variance.eval()
+      # Although we feed different images, the moving_mean and moving_variance
+      # shouldn't change.
+      self.assertAllClose(mean, expected_mean)
+      self.assertAllClose(variance, expected_var)
+
+if __name__ == '__main__':
+  tf.test.main()

+ 144 - 0
inception/slim/scopes.py

@@ -0,0 +1,144 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Contains the new arg_scope used for TF-Slim ops.
+
+  Allows one to define models much more compactly by eliminating boilerplate
+  code. This is accomplished through the use of argument scoping (arg_scope).
+
+  Example of how to use scopes.arg_scope:
+
+  with slim.arg_scope(ops.conv2d, padding='SAME',
+                      stddev=0.01, weight_decay=0.0005):
+    net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
+    net = ops.conv2d(net, 256, [5, 5], scope='conv2')
+
+  The first call to conv2d will use predefined args:
+    ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
+              stddev=0.01, weight_decay=0.0005, scope='conv1')
+
+  The second call to Conv will overwrite padding:
+    ops.conv2d(inputs, 256, [5, 5], padding='SAME',
+               stddev=0.01, weight_decay=0.0005, scope='conv2')
+
+  Example of how to use scopes.add_arg_scope:
+
+  @scopes.add_arg_scope
+  def conv2d(*args, **kwargs)
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import contextlib
+import functools
+
+
+from tensorflow.python.framework import ops
+
+_ARGSTACK_KEY = ("__arg_stack",)
+
+_DECORATED_OPS = set()
+
+
+def _get_arg_stack():
+  stack = ops.get_collection(_ARGSTACK_KEY)
+  if stack:
+    return stack[0]
+  else:
+    stack = [{}]
+    ops.add_to_collection(_ARGSTACK_KEY, stack)
+    return stack
+
+
+def _current_arg_scope():
+  stack = _get_arg_stack()
+  return stack[-1]
+
+
+def _add_op(op):
+  key_op = (op.__module__, op.__name__)
+  if key_op not in _DECORATED_OPS:
+    _DECORATED_OPS.add(key_op)
+
+
+@contextlib.contextmanager
+def arg_scope(list_ops, **kwargs):
+  """Stores the default arguments for the given set of list_ops.
+
+  Args:
+    list_ops: List or tuple of operations to set argument scope for. Every op in
+              list_ops need to be decorated with @add_arg_scope to work.
+    **kwargs: keyword=value that will define the defaults for each op in
+              list_ops. All the ops need to accept the given set of arguments.
+
+  Yields:
+    the current_scope, which is a dictionary of {op: {arg: value}}
+  Raises:
+    TypeError: if list_ops is not a list or a tuple.
+    ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
+  """
+  if not isinstance(list_ops, (list, tuple)):
+    raise TypeError("list_ops is not a list or a tuple")
+  try:
+    current_scope = _current_arg_scope().copy()
+    for op in list_ops:
+      key_op = (op.__module__, op.__name__)
+      if not has_arg_scope(op):
+        raise ValueError("%s is not decorated with @add_arg_scope", key_op)
+      if key_op in current_scope:
+        current_kwargs = current_scope[key_op].copy()
+        current_kwargs.update(kwargs)
+        current_scope[key_op] = current_kwargs
+      else:
+        current_scope[key_op] = kwargs.copy()
+    _get_arg_stack().append(current_scope)
+    yield current_scope
+  finally:
+    _get_arg_stack().pop()
+
+
+def add_arg_scope(func):
+  """Decorates a function with args so it can be used within an arg_scope.
+
+  Args:
+    func: function to decorate.
+
+  Returns:
+    A tuple with the decorated function func_with_args().
+  """
+  @functools.wraps(func)
+  def func_with_args(*args, **kwargs):
+    current_scope = _current_arg_scope()
+    current_args = kwargs
+    key_func = (func.__module__, func.__name__)
+    if key_func in current_scope:
+      current_args = current_scope[key_func].copy()
+      current_args.update(kwargs)
+    return func(*args, **current_args)
+  _add_op(func)
+  return func_with_args
+
+
+def has_arg_scope(func):
+  """Checks whether a func has been decorated with @add_arg_scope or not.
+
+  Args:
+    func: function to check.
+
+  Returns:
+    a boolean.
+  """
+  key_op = (func.__module__, func.__name__)
+  return key_op in _DECORATED_OPS

+ 118 - 0
inception/slim/scopes_test.py

@@ -0,0 +1,118 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests slim.scopes."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+
+import tensorflow as tf
+from inception.slim import scopes
+
+
+@scopes.add_arg_scope
+def func1(*args, **kwargs):
+  return (args, kwargs)
+
+
+@scopes.add_arg_scope
+def func2(*args, **kwargs):
+  return (args, kwargs)
+
+
+class ArgScopeTest(tf.test.TestCase):
+
+  def testEmptyArgScope(self):
+    with self.test_session():
+      self.assertEqual(scopes._current_arg_scope(), {})
+
+  def testSimpleArgScope(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    with self.test_session():
+      with scopes.arg_scope([func1], a=1, b=None, c=[1]):
+        args, kwargs = func1(0)
+        self.assertTupleEqual(args, func1_args)
+        self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testSimpleArgScopeWithTuple(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    with self.test_session():
+      with scopes.arg_scope((func1,), a=1, b=None, c=[1]):
+        args, kwargs = func1(0)
+        self.assertTupleEqual(args, func1_args)
+        self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testOverwriteArgScope(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
+    with scopes.arg_scope([func1], a=1, b=None, c=[1]):
+      args, kwargs = func1(0, b=2)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testNestedArgScope(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    with scopes.arg_scope([func1], a=1, b=None, c=[1]):
+      args, kwargs = func1(0)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+      func1_kwargs['b'] = 2
+      with scopes.arg_scope([func1], b=2):
+        args, kwargs = func1(0)
+        self.assertTupleEqual(args, func1_args)
+        self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testSharedArgScope(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]):
+      args, kwargs = func1(0)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+      args, kwargs = func2(0)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testSharedArgScopeTuple(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]):
+      args, kwargs = func1(0)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+      args, kwargs = func2(0)
+      self.assertTupleEqual(args, func1_args)
+      self.assertDictEqual(kwargs, func1_kwargs)
+
+  def testPartiallySharedArgScope(self):
+    func1_args = (0,)
+    func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
+    func2_args = (1,)
+    func2_kwargs = {'a': 1, 'b': None, 'd': [2]}
+    with scopes.arg_scope([func1, func2], a=1, b=None):
+      with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]):
+        args, kwargs = func1(0)
+        self.assertTupleEqual(args, func1_args)
+        self.assertDictEqual(kwargs, func1_kwargs)
+        args, kwargs = func2(1)
+        self.assertTupleEqual(args, func2_args)
+        self.assertDictEqual(kwargs, func2_kwargs)
+
+if __name__ == '__main__':
+  tf.test.main()

+ 24 - 0
inception/slim/slim.py

@@ -0,0 +1,24 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""TF-Slim grouped API. Please see README.md for details and usage."""
+# pylint: disable=unused-import
+
+# Collapse tf-slim into a single namespace.
+from inception.slim import inception_model as inception
+from inception.slim import losses
+from inception.slim import ops
+from inception.slim import scopes
+from inception.slim import variables
+from inception.slim.scopes import arg_scope

+ 224 - 0
inception/slim/variables.py

@@ -0,0 +1,224 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Contains convenience wrappers for creating Variables in TensorFlow.
+
+Usage:
+  weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
+  l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
+  weights = variables.variable('weights',
+                               shape=[100, 100],
+                               initializer=weights_initializer,
+                               regularizer=l2_regularizer,
+                               device='/cpu:0')
+
+  biases = variables.variable('biases',
+                               shape=[100],
+                               initializer=tf.zeros_initializer,
+                               device='/cpu:0')
+
+  # More complex example.
+
+  net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
+  net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
+  with slim.arg_scope(variables.Variables, restore=False):
+    net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
+
+  # Get all model variables from all the layers.
+  model_variables = slim.variables.get_variables()
+
+  # Get all model variables from a specific the layer, i.e 'conv1'.
+  conv1_variables = slim.variables.get_variables('conv1')
+
+  # Get all weights from all the layers.
+  weights = slim.variables.get_variables_by_name('weights')
+
+  # Get all bias from all the layers.
+  biases = slim.variables.get_variables_by_name('biases')
+
+  # Get all variables in the VARIABLES_TO_RESTORE collection
+  # (i.e. only those created by 'conv1' and 'conv2')
+  variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+
+************************************************
+* Initializing model variables from a checkpoint
+************************************************
+
+# Create some variables.
+v1 = slim.variables.variable(name="v1", ..., restore=False)
+v2 = slim.variables.variable(name="v2", ...) # By default restore=True
+...
+# The list of variables to restore should only contain 'v2'.
+variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
+restorer = tf.train.Saver(variables_to_restore)
+with tf.Session() as sess:
+  # Restore variables from disk.
+  restorer.restore(sess, "/tmp/model.ckpt")
+  print("Model restored.")
+  # Do some work with the model
+  ...
+
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception.slim import scopes
+
+# Collection containing all the variables created using slim.variables
+VARIABLES_COLLECTION = '_variables_'
+
+# Collection containing all the slim.variables that are marked to_restore
+VARIABLES_TO_RESTORE = '_variables_to_restore_'
+
+
+def get_variable_given_name(var):
+  """Gets the variable given name without the scope.
+
+  Args:
+    var: a variable.
+
+  Returns:
+    the given name of the variable without the scope.
+  """
+  name = var.op.name
+  if '/' in name:
+    name = name.split('/')[-1]
+  return name
+
+
+def default_collections(given_name, restore):
+  """Define the set of default collections that variables should be added.
+
+  Args:
+    given_name: the given name of the variable.
+    restore: whether the variable should be added to the VARIABLES_TO_RESTORE
+      collection.
+
+  Returns:
+    a list of default collections.
+  """
+  defaults = [tf.GraphKeys.VARIABLES, VARIABLES_COLLECTION]
+  defaults += [VARIABLES_COLLECTION + given_name]
+  if restore:
+    defaults += [VARIABLES_TO_RESTORE]
+  return defaults
+
+
+def add_variable(var, restore=True):
+  """Adds a variable to the default set of collections.
+
+  Args:
+    var: a variable.
+    restore: whether the variable should be added to the
+      VARIABLES_TO_RESTORE collection.
+  """
+  given_name = get_variable_given_name(var)
+  for collection in default_collections(given_name, restore):
+    if var not in tf.get_collection(collection):
+      tf.add_to_collection(collection, var)
+
+
+def get_variables(prefix=None, suffix=None):
+  """Gets the list of variables, filtered by prefix and/or suffix.
+
+  Args:
+    prefix: an optional prefix for filtering the variables to return.
+    suffix: an optional suffix for filtering the variables to return.
+
+  Returns:
+    a list of variables with prefix and suffix.
+  """
+  candidates = tf.get_collection(VARIABLES_COLLECTION, prefix)
+  if suffix is not None:
+    candidates = [var for var in candidates if var.op.name.endswith(suffix)]
+  return candidates
+
+
+def get_variables_by_name(given_name, prefix=None):
+  """Gets the list of variables were given that name.
+
+  Args:
+    given_name: name given to the variable without scope.
+    prefix: an optional prefix for filtering the variables to return.
+
+  Returns:
+    a list of variables with prefix and suffix.
+  """
+  return tf.get_collection(VARIABLES_COLLECTION + given_name, prefix)
+
+
+def get_unique_variable(name):
+  """Gets the variable uniquely identified by that name.
+
+  Args:
+    name: a name that uniquely identifies the variable.
+
+  Returns:
+    a tensorflow variable.
+
+  Raises:
+    ValueError: if no variable uniquely identified by the name exists.
+  """
+  candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name)
+  if not candidates:
+    raise ValueError('Couldnt find variable %s' % name)
+
+  for candidate in candidates:
+    if candidate.op.name == name:
+      return candidate
+  raise ValueError('Variable %s does not uniquely identify a variable', name)
+
+
+@scopes.add_arg_scope
+def variable(name, shape=None, dtype=tf.float32, initializer=None,
+             regularizer=None, trainable=True, collections=None, device='',
+             restore=True):
+  """Gets an existing variable with these parameters or creates a new one.
+
+    It also add itself to a group with its name.
+
+  Args:
+    name: the name of the new or existing variable.
+    shape: shape of the new or existing variable.
+    dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
+    initializer: initializer for the variable if one is created.
+    regularizer: a (Tensor -> Tensor or None) function; the result of
+        applying it on a newly created variable will be added to the collection
+        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
+    trainable: If `True` also add the variable to the graph collection
+      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
+    collections: A list of collection names to which the Variable will be added.
+      Note that the variable is always also added to the tf.GraphKeys.VARIABLES
+      collection.
+    device: Optional device to place the variable. It can be an string or a
+      function that is called to get the device for the variable.
+    restore: whether the variable should be added to the
+      VARIABLES_TO_RESTORE collection.
+
+  Returns:
+    The created or existing variable.
+  """
+  # Instantiate the device for this variable if it is passed as a function.
+  if device and callable(device):
+    device = device()
+  collections = set(list(collections or []) + default_collections(name,
+                                                                  restore))
+  with tf.device(device):
+    return tf.get_variable(name, shape=shape, dtype=dtype,
+                           initializer=initializer, regularizer=regularizer,
+                           trainable=trainable, collections=collections)

+ 200 - 0
inception/slim/variables_test.py

@@ -0,0 +1,200 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for slim.variables."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+import tensorflow as tf
+
+from inception.slim import scopes
+from inception.slim import variables
+
+
+class VariablesTest(tf.test.TestCase):
+
+  def testCreateVariable(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+        self.assertEquals(a.op.name, 'A/a')
+        self.assertListEqual(a.get_shape().as_list(), [5])
+
+  def testGetVariableGivenName(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+      with tf.variable_scope('B'):
+        b = variables.variable('a', [5])
+      self.assertEquals('a', variables.get_variable_given_name(a))
+      self.assertEquals('a', variables.get_variable_given_name(b))
+
+  def testGetVariableGivenNameScoped(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+        b = variables.variable('b', [5])
+        self.assertEquals([a], variables.get_variables_by_name('a'))
+        self.assertEquals([b], variables.get_variables_by_name('b'))
+
+  def testGetVariables(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+      with tf.variable_scope('B'):
+        b = variables.variable('a', [5])
+      self.assertEquals([a], variables.get_variables('A'))
+      self.assertEquals([b], variables.get_variables('B'))
+
+  def testGetVariablesSuffix(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+      with tf.variable_scope('A'):
+        b = variables.variable('b', [5])
+      self.assertEquals([a], variables.get_variables(suffix='a'))
+      self.assertEquals([b], variables.get_variables(suffix='b'))
+
+  def testGetVariableWithSingleVar(self):
+    with self.test_session():
+      with tf.variable_scope('parent'):
+        a = variables.variable('child', [5])
+      self.assertEquals(a, variables.get_unique_variable('parent/child'))
+
+  def testGetVariableWithDistractors(self):
+    with self.test_session():
+      with tf.variable_scope('parent'):
+        a = variables.variable('child', [5])
+        with tf.variable_scope('child'):
+          variables.variable('grandchild1', [7])
+          variables.variable('grandchild2', [9])
+      self.assertEquals(a, variables.get_unique_variable('parent/child'))
+
+  def testGetVariableThrowsExceptionWithNoMatch(self):
+    var_name = 'cant_find_me'
+    with self.test_session():
+      with self.assertRaises(ValueError):
+        variables.get_unique_variable(var_name)
+
+  def testGetThrowsExceptionWithChildrenButNoMatch(self):
+    var_name = 'parent/child'
+    with self.test_session():
+      with tf.variable_scope(var_name):
+        variables.variable('grandchild1', [7])
+        variables.variable('grandchild2', [9])
+      with self.assertRaises(ValueError):
+        variables.get_unique_variable(var_name)
+
+  def testGetVariablesToRestore(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+      with tf.variable_scope('B'):
+        b = variables.variable('b', [5])
+      self.assertListEqual([a, b],
+                           tf.get_collection(variables.VARIABLES_TO_RESTORE))
+
+  def testGetVariablesToRestorePartial(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [5])
+      with tf.variable_scope('B'):
+        b = variables.variable('b', [5], restore=False)
+      self.assertListEqual([a, b], variables.get_variables())
+      self.assertListEqual([a],
+                           tf.get_collection(variables.VARIABLES_TO_RESTORE))
+
+  def testReuseVariable(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [])
+      with tf.variable_scope('A', reuse=True):
+        b = variables.variable('a', [])
+      self.assertEquals(a, b)
+      self.assertListEqual([a], variables.get_variables())
+
+  def testVariableWithDevice(self):
+    with self.test_session():
+      with tf.variable_scope('A'):
+        a = variables.variable('a', [], device='cpu:0')
+        b = variables.variable('b', [], device='cpu:1')
+      self.assertDeviceEqual(a.device, 'cpu:0')
+      self.assertDeviceEqual(b.device, 'cpu:1')
+
+  def testVariableWithDeviceFromScope(self):
+    with self.test_session():
+      with tf.device('/cpu:0'):
+        a = variables.variable('a', [])
+        b = variables.variable('b', [], device='cpu:1')
+      self.assertDeviceEqual(a.device, 'cpu:0')
+      self.assertDeviceEqual(b.device, 'cpu:1')
+
+  def testVariableCollection(self):
+    with self.test_session():
+      a = variables.variable('a', [], collections='A')
+      b = variables.variable('b', [], collections='B')
+      self.assertEquals(a, tf.get_collection('A')[0])
+      self.assertEquals(b, tf.get_collection('B')[0])
+
+  def testVariableCollections(self):
+    with self.test_session():
+      a = variables.variable('a', [], collections=['A', 'C'])
+      b = variables.variable('b', [], collections=['B', 'C'])
+      self.assertEquals(a, tf.get_collection('A')[0])
+      self.assertEquals(b, tf.get_collection('B')[0])
+
+  def testVariableCollectionsWithArgScope(self):
+    with self.test_session():
+      with scopes.arg_scope([variables.variable], collections='A'):
+        a = variables.variable('a', [])
+        b = variables.variable('b', [])
+      self.assertListEqual([a, b], tf.get_collection('A'))
+
+  def testVariableCollectionsWithArgScopeNested(self):
+    with self.test_session():
+      with scopes.arg_scope([variables.variable], collections='A'):
+        a = variables.variable('a', [])
+        with scopes.arg_scope([variables.variable], collections='B'):
+          b = variables.variable('b', [])
+      self.assertEquals(a, tf.get_collection('A')[0])
+      self.assertEquals(b, tf.get_collection('B')[0])
+
+  def testVariableCollectionsWithArgScopeNonNested(self):
+    with self.test_session():
+      with scopes.arg_scope([variables.variable], collections='A'):
+        a = variables.variable('a', [])
+      with scopes.arg_scope([variables.variable], collections='B'):
+        b = variables.variable('b', [])
+      variables.variable('c', [])
+      self.assertListEqual([a], tf.get_collection('A'))
+      self.assertListEqual([b], tf.get_collection('B'))
+
+  def testVariableRestoreWithArgScopeNested(self):
+    with self.test_session():
+      with scopes.arg_scope([variables.variable], restore=True):
+        a = variables.variable('a', [])
+        with scopes.arg_scope([variables.variable], trainable=False,
+                              collections=['A', 'B']):
+          b = variables.variable('b', [])
+        c = variables.variable('c', [])
+      self.assertListEqual([a, b, c],
+                           tf.get_collection(variables.VARIABLES_TO_RESTORE))
+      self.assertListEqual([a, c], tf.trainable_variables())
+      self.assertListEqual([b], tf.get_collection('A'))
+      self.assertListEqual([b], tf.get_collection('B'))
+
+if __name__ == '__main__':
+  tf.test.main()

+ 1 - 0
third_party

@@ -0,0 +1 @@
+tensorflow/third_party

+ 1 - 0
tools

@@ -0,0 +1 @@
+tensorflow/tools