Forráskód Böngészése

update tf dataset

aymericdamien 6 éve
szülő
commit
7960768a38

+ 3 - 7
examples/2_BasicModels/logistic_regression_eager_api.py

@@ -26,7 +26,8 @@ num_steps = 1000
 display_step = 100
 
 dataset = tf.data.Dataset.from_tensor_slices(
-    (mnist.train.images, mnist.train.labels)).batch(batch_size)
+    (mnist.train.images, mnist.train.labels))
+dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
 dataset_iter = tfe.Iterator(dataset)
 
 # Variables
@@ -64,12 +65,7 @@ average_acc = 0.
 for step in range(num_steps):
 
     # Iterate through the dataset
-    try:
-        d = dataset_iter.next()
-    except StopIteration:
-        # Refill queue
-        dataset_iter = tfe.Iterator(dataset)
-        d = dataset_iter.next()
+    d = dataset_iter.next()
 
     # Images
     x_batch = d[0]

+ 3 - 7
examples/3_NeuralNetworks/neural_network_eager_api.py

@@ -39,7 +39,8 @@ num_classes = 10 # MNIST total classes (0-9 digits)
 
 # Using TF Dataset to split data into batches
 dataset = tf.data.Dataset.from_tensor_slices(
-    (mnist.train.images, mnist.train.labels)).batch(batch_size)
+    (mnist.train.images, mnist.train.labels))
+dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
 dataset_iter = tfe.Iterator(dataset)
 
 
@@ -92,12 +93,7 @@ average_acc = 0.
 for step in range(num_steps):
 
     # Iterate through the dataset
-    try:
-        d = dataset_iter.next()
-    except StopIteration:
-        # Refill queue
-        dataset_iter = tfe.Iterator(dataset)
-        d = dataset_iter.next()
+    d = dataset_iter.next()
 
     # Images
     x_batch = d[0]

+ 11 - 18
examples/5_DataManagement/tensorflow_dataset_api.py

@@ -29,21 +29,21 @@ dropout = 0.75 # Dropout, probability to keep units
 sess = tf.Session()
 
 # Create a dataset tensor from the images and the labels
-dataset = tf.contrib.data.Dataset.from_tensor_slices(
+dataset = tf.data.Dataset.from_tensor_slices(
     (mnist.train.images, mnist.train.labels))
+# Automatically refill the data queue when empty
+dataset = dataset.repeat()
 # Create batches of data
 dataset = dataset.batch(batch_size)
-# Create an iterator, to go over the dataset
+# Prefetch data for faster
+dataset = dataset.prefetch(batch_size)
+
+# Create an iterator over the dataset
 iterator = dataset.make_initializable_iterator()
-# It is better to use 2 placeholders, to avoid to load all data into memory,
-# and avoid the 2Gb restriction length of a tensor.
-_data = tf.placeholder(tf.float32, [None, n_input])
-_labels = tf.placeholder(tf.float32, [None, n_classes])
 # Initialize the iterator
-sess.run(iterator.initializer, feed_dict={_data: mnist.train.images,
-                                          _labels: mnist.train.labels})
+sess.run(iterator.initializer)
 
-# Neural Net Input
+# Neural Net Input (images, labels)
 X, Y = iterator.get_next()
 
 
@@ -116,15 +116,8 @@ sess.run(init)
 # Training cycle
 for step in range(1, num_steps + 1):
 
-    try:
-        # Run optimization
-        sess.run(train_op)
-    except tf.errors.OutOfRangeError:
-        # Reload the iterator when it reaches the end of the dataset
-        sess.run(iterator.initializer,
-                 feed_dict={_data: mnist.train.images,
-                            _labels: mnist.train.labels})
-        sess.run(train_op)
+    # Run optimization
+    sess.run(train_op)
 
     if step % display_step == 0 or step == 1:
         # Calculate batch loss and accuracy

+ 8 - 18
notebooks/2_BasicModels/logistic_regression_eager_api.ipynb

@@ -54,9 +54,7 @@
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -100,7 +98,8 @@
    "source": [
     "# Iterator for the dataset\n",
     "dataset = tf.data.Dataset.from_tensor_slices(\n",
-    "    (mnist.train.images, mnist.train.labels)).batch(batch_size)\n",
+    "    (mnist.train.images, mnist.train.labels))\n",
+    "dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n",
     "dataset_iter = tfe.Iterator(dataset)"
    ]
   },
@@ -151,9 +150,7 @@
   {
    "cell_type": "code",
    "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -181,12 +178,7 @@
     "for step in range(num_steps):\n",
     "\n",
     "    # Iterate through the dataset\n",
-    "    try:\n",
-    "        d = dataset_iter.next()\n",
-    "    except StopIteration:\n",
-    "        # Refill queue\n",
-    "        dataset_iter = tfe.Iterator(dataset)\n",
-    "        d = dataset_iter.next()\n",
+    "    d = dataset_iter.next()\n",
     "\n",
     "    # Images\n",
     "    x_batch = d[0]\n",
@@ -222,9 +214,7 @@
   {
    "cell_type": "code",
    "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -246,7 +236,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python [default]",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
@@ -260,7 +250,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.12"
+   "version": "2.7.14"
   }
  },
  "nbformat": 4,

+ 9 - 19
notebooks/3_NeuralNetworks/neural_network_eager_api.ipynb

@@ -60,9 +60,7 @@
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -112,7 +110,8 @@
    "source": [
     "# Using TF Dataset to split data into batches\n",
     "dataset = tf.data.Dataset.from_tensor_slices(\n",
-    "    (mnist.train.images, mnist.train.labels)).batch(batch_size)\n",
+    "    (mnist.train.images, mnist.train.labels))\n",
+    "dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)\n",
     "dataset_iter = tfe.Iterator(dataset)"
    ]
   },
@@ -180,9 +179,7 @@
   {
    "cell_type": "code",
    "execution_count": 8,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -210,13 +207,8 @@
     "for step in range(num_steps):\n",
     "\n",
     "    # Iterate through the dataset\n",
-    "    try:\n",
-    "        d = dataset_iter.next()\n",
-    "    except StopIteration:\n",
-    "        # Refill queue\n",
-    "        dataset_iter = tfe.Iterator(dataset)\n",
-    "        d = dataset_iter.next()\n",
-    "\n",
+    "    d = dataset_iter.next()\n",
+    "    \n",
     "    # Images\n",
     "    x_batch = d[0]\n",
     "    # Labels\n",
@@ -251,9 +243,7 @@
   {
    "cell_type": "code",
    "execution_count": 9,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -275,7 +265,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python [default]",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
@@ -289,7 +279,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.12"
+   "version": "2.7.14"
   }
  },
  "nbformat": 4,

+ 15 - 27
notebooks/5_DataManagement/tensorflow_dataset_api.ipynb

@@ -2,9 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "source": [
     "# TensorFlow Dataset API\n",
     "\n",
@@ -19,9 +17,7 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -64,21 +60,21 @@
     "sess = tf.Session()\n",
     "\n",
     "# Create a dataset tensor from the images and the labels\n",
-    "dataset = tf.contrib.data.Dataset.from_tensor_slices(\n",
+    "dataset = tf.data.Dataset.from_tensor_slices(\n",
     "    (mnist.train.images, mnist.train.labels))\n",
+    "# Automatically refill the data queue when empty\n",
+    "dataset = dataset.repeat()\n",
     "# Create batches of data\n",
     "dataset = dataset.batch(batch_size)\n",
-    "# Create an iterator, to go over the dataset\n",
+    "# Prefetch data for faster\n",
+    "dataset = dataset.prefetch(batch_size)\n",
+    "\n",
+    "# Create an iterator over the dataset\n",
     "iterator = dataset.make_initializable_iterator()\n",
-    "# It is better to use 2 placeholders, to avoid to load all data into memory,\n",
-    "# and avoid the 2Gb restriction length of a tensor.\n",
-    "_data = tf.placeholder(tf.float32, [None, n_input])\n",
-    "_labels = tf.placeholder(tf.float32, [None, n_classes])\n",
     "# Initialize the iterator\n",
-    "sess.run(iterator.initializer, feed_dict={_data: mnist.train.images,\n",
-    "                                          _labels: mnist.train.labels})\n",
+    "sess.run(iterator.initializer)\n",
     "\n",
-    "# Neural Net Input\n",
+    "# Neural Net Input (images, labels)\n",
     "X, Y = iterator.get_next()"
    ]
   },
@@ -155,7 +151,6 @@
    "cell_type": "code",
    "execution_count": 4,
    "metadata": {
-    "collapsed": false,
     "scrolled": false
    },
    "outputs": [
@@ -188,15 +183,8 @@
     "# Training cycle\n",
     "for step in range(1, num_steps + 1):\n",
     "    \n",
-    "    try:\n",
-    "        # Run optimization\n",
-    "        sess.run(train_op)\n",
-    "    except tf.errors.OutOfRangeError:\n",
-    "        # Reload the iterator when it reaches the end of the dataset\n",
-    "        sess.run(iterator.initializer, \n",
-    "                 feed_dict={_data: mnist.train.images,\n",
-    "                            _labels: mnist.train.labels})\n",
-    "        sess.run(train_op)\n",
+    "    # Run optimization\n",
+    "    sess.run(train_op)\n",
     "        \n",
     "    if step % display_step == 0 or step == 1:\n",
     "        # Calculate batch loss and accuracy\n",
@@ -212,7 +200,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python [default]",
+   "display_name": "Python 2",
    "language": "python",
    "name": "python2"
   },
@@ -226,7 +214,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython2",
-   "version": "2.7.12"
+   "version": "2.7.14"
   }
  },
  "nbformat": 4,