浏览代码

add eager API examples

aymericdamien 7 年之前
父节点
当前提交
4c8c201429

+ 68 - 0
examples/1_Introduction/basic_eager_api.py

@@ -0,0 +1,68 @@
+'''
+Basic introduction to TensorFlow's Eager API.
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+
+What is Eager API?
+" Eager execution is an imperative, define-by-run interface where operations are
+executed immediately as they are called from Python. This makes it easier to
+get started with TensorFlow, and can make research and development more
+intuitive. A vast majority of the TensorFlow API remains the same whether eager
+execution is enabled or not. As a result, the exact same code that constructs
+TensorFlow graphs (e.g. using the layers API) can be executed imperatively
+by using eager execution. Conversely, most models written with Eager enabled
+can be converted to a graph that can be further optimized and/or extracted
+for deployment in production without changing code. " - Rajat Monga
+
+'''
+from __future__ import absolute_import, division, print_function
+
+import numpy as np
+import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
+# Set Eager API
+print("Setting Eager mode...")
+tfe.enable_eager_execution()
+
+# Define constant tensors
+print("Define constant tensors")
+a = tf.constant(2)
+print("a = %i" % a)
+b = tf.constant(3)
+print("b = %i" % b)
+
+# Run the operation without the need for tf.Session
+print("Running operations, without tf.Session")
+c = a + b
+print("a + b = %i" % c)
+d = a * b
+print("a * b = %i" % d)
+
+
+# Full compatibility with Numpy
+print("Mixing operations with Tensors and Numpy Arrays")
+
+# Define constant tensors
+a = tf.constant([[2., 1.],
+                 [1., 0.]], dtype=tf.float32)
+print("Tensor:\n a = %s" % a)
+b = np.array([[3., 0.],
+              [5., 1.]], dtype=np.float32)
+print("NumpyArray:\n b = %s" % b)
+
+# Run the operation without the need for tf.Session
+print("Running operations, without tf.Session")
+
+c = a + b
+print("a + b = %s" % c)
+
+d = tf.matmul(a, b)
+print("a * b = %s" % d)
+
+print("Iterate through Tensor 'a':")
+for i in range(a.shape[0]):
+    for j in range(a.shape[1]):
+        print(a[i][j])
+

+ 70 - 0
examples/2_BasicModels/linear_regression_eager_api.py

@@ -0,0 +1,70 @@
+'''
+A logistic regression learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+from __future__ import absolute_import, division, print_function
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
+# Set Eager API
+tfe.enable_eager_execution()
+
+# Training Data
+train_X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
+           7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]
+train_Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
+           2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]
+n_samples = len(train_X)
+
+# Parameters
+learning_rate = 0.01
+display_step = 100
+num_steps = 1000
+
+# Weight and Bias
+W = tfe.Variable(np.random.randn())
+b = tfe.Variable(np.random.randn())
+
+
+# Linear regression (Wx + b)
+def linear_regression(inputs):
+    return inputs * W + b
+
+
+# Mean square error
+def mean_square_fn(model_fn, inputs, labels):
+    return tf.reduce_sum(tf.pow(model_fn(inputs) - labels, 2)) / (2 * n_samples)
+
+
+# SGD Optimizer
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(mean_square_fn)
+
+# Initial cost, before optimizing
+print("Initial cost= {:.9f}".format(
+    mean_square_fn(linear_regression, train_X, train_Y)),
+    "W=", W.numpy(), "b=", b.numpy())
+
+# Training
+for step in range(num_steps):
+
+    optimizer.apply_gradients(grad(linear_regression, train_X, train_Y))
+
+    if (step + 1) % display_step == 0 or step == 0:
+        print("Epoch:", '%04d' % (step + 1), "cost=",
+              "{:.9f}".format(mean_square_fn(linear_regression, train_X, train_Y)),
+              "W=", W.numpy(), "b=", b.numpy())
+
+# Graphic display
+plt.plot(train_X, train_Y, 'ro', label='Original data')
+plt.plot(train_X, np.array(W * train_X + b), label='Fitted line')
+plt.legend()
+plt.show()

+ 108 - 0
examples/2_BasicModels/logistic_regression_eager_api.py

@@ -0,0 +1,108 @@
+'''
+A logistic regression learning algorithm example using TensorFlow library.
+This example is using the MNIST database of handwritten digits
+(http://yann.lecun.com/exdb/mnist/)
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+'''
+from __future__ import absolute_import, division, print_function
+
+import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
+# Set Eager API
+tfe.enable_eager_execution()
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+# Parameters
+learning_rate = 0.1
+batch_size = 128
+num_steps = 1000
+display_step = 100
+
+dataset = tf.data.Dataset.from_tensor_slices(
+    (mnist.train.images, mnist.train.labels)).batch(batch_size)
+dataset_iter = tfe.Iterator(dataset)
+
+# Variables
+W = tfe.Variable(tf.zeros([784, 10]), name='weights')
+b = tfe.Variable(tf.zeros([10]), name='bias')
+
+
+# Logistic regression (Wx + b)
+def logistic_regression(inputs):
+    return tf.matmul(inputs, W) + b
+
+
+# Cross-Entropy loss function
+def loss_fn(inference_fn, inputs, labels):
+    # Using sparse_softmax cross entropy
+    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=inference_fn(inputs), labels=labels))
+
+
+# Calculate accuracy
+def accuracy_fn(inference_fn, inputs, labels):
+    prediction = tf.nn.softmax(inference_fn(inputs))
+    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
+    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+
+# SGD Optimizer
+optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(loss_fn)
+
+# Training
+average_loss = 0.
+average_acc = 0.
+for step in range(num_steps):
+
+    # Iterate through the dataset
+    try:
+        d = dataset_iter.next()
+    except StopIteration:
+        # Refill queue
+        dataset_iter = tfe.Iterator(dataset)
+        d = dataset_iter.next()
+
+    # Images
+    x_batch = d[0]
+    # Labels
+    y_batch = tf.cast(d[1], dtype=tf.int64)
+
+    # Compute the batch loss
+    batch_loss = loss_fn(logistic_regression, x_batch, y_batch)
+    average_loss += batch_loss
+    # Compute the batch accuracy
+    batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)
+    average_acc += batch_accuracy
+
+    if step == 0:
+        # Display the initial cost, before optimizing
+        print("Initial loss= {:.9f}".format(average_loss))
+
+    # Update the variables following gradients info
+    optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))
+
+    # Display info
+    if (step + 1) % display_step == 0 or step == 0:
+        if step > 0:
+            average_loss /= display_step
+            average_acc /= display_step
+        print("Step:", '%04d' % (step + 1), " loss=",
+              "{:.9f}".format(average_loss), " accuracy=",
+              "{:.4f}".format(average_acc))
+        average_loss = 0.
+        average_acc = 0.
+
+# Evaluate model on the test image set
+testX = mnist.test.images
+testY = mnist.test.labels
+
+test_acc = accuracy_fn(logistic_regression, testX, testY)
+print("Testset Accuracy: {:.4f}".format(test_acc))

+ 137 - 0
examples/3_NeuralNetworks/neural_network_eager_api.py

@@ -0,0 +1,137 @@
+""" Neural Network.
+
+A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
+implementation with TensorFlow. This example is using the MNIST database
+of handwritten digits (http://yann.lecun.com/exdb/mnist/).
+
+This example is using TensorFlow layers, see 'neural_network_raw' example for
+a raw implementation with variables.
+
+Links:
+    [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
+
+Author: Aymeric Damien
+Project: https://github.com/aymericdamien/TensorFlow-Examples/
+"""
+from __future__ import print_function
+
+import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
+# Set Eager API
+tfe.enable_eager_execution()
+
+# Import MNIST data
+from tensorflow.examples.tutorials.mnist import input_data
+mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
+
+# Parameters
+learning_rate = 0.001
+num_steps = 1000
+batch_size = 128
+display_step = 100
+
+# Network Parameters
+n_hidden_1 = 256 # 1st layer number of neurons
+n_hidden_2 = 256 # 2nd layer number of neurons
+num_input = 784 # MNIST data input (img shape: 28*28)
+num_classes = 10 # MNIST total classes (0-9 digits)
+
+# Using TF Dataset to split data into batches
+dataset = tf.data.Dataset.from_tensor_slices(
+    (mnist.train.images, mnist.train.labels)).batch(batch_size)
+dataset_iter = tfe.Iterator(dataset)
+
+
+# Define the neural network. To use eager API and tf.layers API together,
+# we must instantiate a tfe.Network class as follow:
+class NeuralNet(tfe.Network):
+    def __init__(self):
+        # Define each layer
+        super(NeuralNet, self).__init__()
+        # Hidden fully connected layer with 256 neurons
+        self.layer1 = self.track_layer(
+            tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
+        # Hidden fully connected layer with 256 neurons
+        self.layer2 = self.track_layer(
+            tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
+        # Output fully connected layer with a neuron for each class
+        self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
+
+    def call(self, x):
+        x = self.layer1(x)
+        x = self.layer2(x)
+        return self.out_layer(x)
+
+
+neural_net = NeuralNet()
+
+
+# Cross-Entropy loss function
+def loss_fn(inference_fn, inputs, labels):
+    # Using sparse_softmax cross entropy
+    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
+        logits=inference_fn(inputs), labels=labels))
+
+
+# Calculate accuracy
+def accuracy_fn(inference_fn, inputs, labels):
+    prediction = tf.nn.softmax(inference_fn(inputs))
+    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
+    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
+
+
+# SGD Optimizer
+optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+# Compute gradients
+grad = tfe.implicit_gradients(loss_fn)
+
+# Training
+average_loss = 0.
+average_acc = 0.
+for step in range(num_steps):
+
+    # Iterate through the dataset
+    try:
+        d = dataset_iter.next()
+    except StopIteration:
+        # Refill queue
+        dataset_iter = tfe.Iterator(dataset)
+        d = dataset_iter.next()
+
+    # Images
+    x_batch = d[0]
+    # Labels
+    y_batch = tf.cast(d[1], dtype=tf.int64)
+
+    # Compute the batch loss
+    batch_loss = loss_fn(neural_net, x_batch, y_batch)
+    average_loss += batch_loss
+    # Compute the batch accuracy
+    batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
+    average_acc += batch_accuracy
+
+    if step == 0:
+        # Display the initial cost, before optimizing
+        print("Initial loss= {:.9f}".format(average_loss))
+
+    # Update the variables following gradients info
+    optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
+
+    # Display info
+    if (step + 1) % display_step == 0 or step == 0:
+        if step > 0:
+            average_loss /= display_step
+            average_acc /= display_step
+        print("Step:", '%04d' % (step + 1), " loss=",
+              "{:.9f}".format(average_loss), " accuracy=",
+              "{:.4f}".format(average_acc))
+        average_loss = 0.
+        average_acc = 0.
+
+# Evaluate model on the test image set
+testX = mnist.test.images
+testY = mnist.test.labels
+
+test_acc = accuracy_fn(neural_net, testX, testY)
+print("Testset Accuracy: {:.4f}".format(test_acc))