|
@@ -0,0 +1,280 @@
|
|
|
+{
|
|
|
+ "cells": [
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": null,
|
|
|
+ "metadata": {},
|
|
|
+ "outputs": [],
|
|
|
+ "source": [
|
|
|
+ "'''\n",
|
|
|
+ "Graph and Loss visualization using Tensorboard.\n",
|
|
|
+ "This example is using the MNIST database of handwritten digits\n",
|
|
|
+ "(http://yann.lecun.com/exdb/mnist/)\n",
|
|
|
+ "\n",
|
|
|
+ "Author: Aymeric Damien\n",
|
|
|
+ "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
|
|
|
+ "'''"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 1,
|
|
|
+ "metadata": {},
|
|
|
+ "outputs": [
|
|
|
+ {
|
|
|
+ "name": "stdout",
|
|
|
+ "output_type": "stream",
|
|
|
+ "text": [
|
|
|
+ "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
|
|
|
+ "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
|
|
|
+ "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
|
|
|
+ "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
|
|
|
+ ]
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ "source": [
|
|
|
+ "from __future__ import print_function\n",
|
|
|
+ "\n",
|
|
|
+ "import tensorflow as tf\n",
|
|
|
+ "\n",
|
|
|
+ "tf.logging.set_verbosity(tf.logging.WARN)\n",
|
|
|
+ "\n",
|
|
|
+ "# Import MNIST data\n",
|
|
|
+ "from tensorflow.examples.tutorials.mnist import input_data\n",
|
|
|
+ "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 2,
|
|
|
+ "metadata": {
|
|
|
+ "collapsed": true
|
|
|
+ },
|
|
|
+ "outputs": [],
|
|
|
+ "source": [
|
|
|
+ "# Parameters\n",
|
|
|
+ "learning_rate = 0.01\n",
|
|
|
+ "training_epochs = 25\n",
|
|
|
+ "batch_size = 100\n",
|
|
|
+ "display_step = 1\n",
|
|
|
+ "logs_path = '/tmp/tensorflow_logs/example'"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 3,
|
|
|
+ "metadata": {
|
|
|
+ "collapsed": true
|
|
|
+ },
|
|
|
+ "outputs": [],
|
|
|
+ "source": [
|
|
|
+ "# Network Parameters\n",
|
|
|
+ "n_hidden_1 = 256 # 1st layer number of features\n",
|
|
|
+ "n_hidden_2 = 256 # 2nd layer number of features\n",
|
|
|
+ "n_input = 784 # MNIST data input (img shape: 28*28)\n",
|
|
|
+ "n_classes = 10 # MNIST total classes (0-9 digits)\n",
|
|
|
+ "\n",
|
|
|
+ "# tf Graph Input\n",
|
|
|
+ "# mnist data image of shape 28*28=784\n",
|
|
|
+ "x = tf.placeholder(tf.float32, [None, 784], name='InputData')\n",
|
|
|
+ "# 0-9 digits recognition => 10 classes\n",
|
|
|
+ "y = tf.placeholder(tf.float32, [None, 10], name='LabelData')"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 4,
|
|
|
+ "metadata": {
|
|
|
+ "collapsed": true
|
|
|
+ },
|
|
|
+ "outputs": [],
|
|
|
+ "source": [
|
|
|
+ "# Create model\n",
|
|
|
+ "def multilayer_perceptron(x, weights, biases):\n",
|
|
|
+ " # Hidden layer with RELU activation\n",
|
|
|
+ " layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])\n",
|
|
|
+ " layer_1 = tf.nn.relu(layer_1)\n",
|
|
|
+ " # Create a summary to visualize the first layer ReLU activation\n",
|
|
|
+ " tf.summary.histogram(\"relu1\", layer_1)\n",
|
|
|
+ " # Hidden layer with RELU activation\n",
|
|
|
+ " layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])\n",
|
|
|
+ " layer_2 = tf.nn.relu(layer_2)\n",
|
|
|
+ " # Create another summary to visualize the second layer ReLU activation\n",
|
|
|
+ " tf.summary.histogram(\"relu2\", layer_2)\n",
|
|
|
+ " # Output layer\n",
|
|
|
+ " out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])\n",
|
|
|
+ " return out_layer\n",
|
|
|
+ "\n",
|
|
|
+ "# Store layers weight & bias\n",
|
|
|
+ "weights = {\n",
|
|
|
+ " 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),\n",
|
|
|
+ " 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),\n",
|
|
|
+ " 'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')\n",
|
|
|
+ "}\n",
|
|
|
+ "biases = {\n",
|
|
|
+ " 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),\n",
|
|
|
+ " 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),\n",
|
|
|
+ " 'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')\n",
|
|
|
+ "}"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 5,
|
|
|
+ "metadata": {},
|
|
|
+ "outputs": [],
|
|
|
+ "source": [
|
|
|
+ "# Encapsulating all ops into scopes, making Tensorboard's Graph\n",
|
|
|
+ "# Visualization more convenient\n",
|
|
|
+ "with tf.name_scope('Model'):\n",
|
|
|
+ " # Build model\n",
|
|
|
+ " pred = multilayer_perceptron(x, weights, biases)\n",
|
|
|
+ "\n",
|
|
|
+ "with tf.name_scope('Loss'):\n",
|
|
|
+ " # Softmax Cross entropy (cost function)\n",
|
|
|
+ " loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
|
|
|
+ "\n",
|
|
|
+ "with tf.name_scope('SGD'):\n",
|
|
|
+ " # Gradient Descent\n",
|
|
|
+ " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
|
|
|
+ " # Op to calculate every variable gradient\n",
|
|
|
+ " grads = tf.gradients(loss, tf.trainable_variables())\n",
|
|
|
+ " grads = list(zip(grads, tf.trainable_variables()))\n",
|
|
|
+ " # Op to update all variables according to their gradient\n",
|
|
|
+ " apply_grads = optimizer.apply_gradients(grads_and_vars=grads)\n",
|
|
|
+ "\n",
|
|
|
+ "with tf.name_scope('Accuracy'):\n",
|
|
|
+ " # Accuracy\n",
|
|
|
+ " acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n",
|
|
|
+ " acc = tf.reduce_mean(tf.cast(acc, tf.float32))\n",
|
|
|
+ "\n",
|
|
|
+ "# Initializing the variables\n",
|
|
|
+ "init = tf.global_variables_initializer()\n",
|
|
|
+ "\n",
|
|
|
+ "# Create a summary to monitor cost tensor\n",
|
|
|
+ "tf.summary.scalar(\"loss\", loss)\n",
|
|
|
+ "# Create a summary to monitor accuracy tensor\n",
|
|
|
+ "tf.summary.scalar(\"accuracy\", acc)\n",
|
|
|
+ "# Create summaries to visualize weights\n",
|
|
|
+ "for var in tf.trainable_variables():\n",
|
|
|
+ " tf.summary.histogram(var.name, var)\n",
|
|
|
+ "# Summarize all gradients\n",
|
|
|
+ "for grad, var in grads:\n",
|
|
|
+ " tf.summary.histogram(var.name + '/gradient', grad)\n",
|
|
|
+ "# Merge all summaries into a single op\n",
|
|
|
+ "merged_summary_op = tf.summary.merge_all()"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": 6,
|
|
|
+ "metadata": {},
|
|
|
+ "outputs": [
|
|
|
+ {
|
|
|
+ "name": "stdout",
|
|
|
+ "output_type": "stream",
|
|
|
+ "text": [
|
|
|
+ "Epoch: 0001 cost= 67.380016100\n",
|
|
|
+ "Epoch: 0002 cost= 15.307113347\n",
|
|
|
+ "Epoch: 0003 cost= 9.986865815\n",
|
|
|
+ "Epoch: 0004 cost= 7.381951704\n",
|
|
|
+ "Epoch: 0005 cost= 5.849047792\n",
|
|
|
+ "Epoch: 0006 cost= 4.881959525\n",
|
|
|
+ "Epoch: 0007 cost= 4.045799575\n",
|
|
|
+ "Epoch: 0008 cost= 3.430059265\n",
|
|
|
+ "Epoch: 0009 cost= 3.076626336\n",
|
|
|
+ "Epoch: 0010 cost= 2.863002729\n",
|
|
|
+ "Epoch: 0011 cost= 2.510218838\n",
|
|
|
+ "Epoch: 0012 cost= 2.276251159\n",
|
|
|
+ "Epoch: 0013 cost= 1.978880318\n",
|
|
|
+ "Epoch: 0014 cost= 1.733890927\n",
|
|
|
+ "Epoch: 0015 cost= 1.540066199\n",
|
|
|
+ "Epoch: 0016 cost= 1.439536399\n",
|
|
|
+ "Epoch: 0017 cost= 1.279739846\n",
|
|
|
+ "Epoch: 0018 cost= 1.224386179\n",
|
|
|
+ "Epoch: 0019 cost= 1.095804572\n",
|
|
|
+ "Epoch: 0020 cost= 1.100819187\n",
|
|
|
+ "Epoch: 0021 cost= 0.885994007\n",
|
|
|
+ "Epoch: 0022 cost= 1.079832625\n",
|
|
|
+ "Epoch: 0023 cost= 0.948164673\n",
|
|
|
+ "Epoch: 0024 cost= 0.613826872\n",
|
|
|
+ "Epoch: 0025 cost= 0.644082715\n",
|
|
|
+ "Optimization Finished!\n",
|
|
|
+ "Accuracy: 0.9513\n",
|
|
|
+ "Run the command line:\n",
|
|
|
+ "--> tensorboard --logdir=/tmp/tensorflow_logs \n",
|
|
|
+ "Then open http://0.0.0.0:6006/ into your web browser\n"
|
|
|
+ ]
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ "source": [
|
|
|
+ "# Launch the graph\n",
|
|
|
+ "with tf.Session() as sess:\n",
|
|
|
+ " sess.run(init)\n",
|
|
|
+ "\n",
|
|
|
+ " # op to write logs to Tensorboard\n",
|
|
|
+ " summary_writer = tf.summary.FileWriter(logs_path,\n",
|
|
|
+ " graph=tf.get_default_graph())\n",
|
|
|
+ "\n",
|
|
|
+ " # Training cycle\n",
|
|
|
+ " for epoch in range(training_epochs):\n",
|
|
|
+ " avg_cost = 0.\n",
|
|
|
+ " total_batch = int(mnist.train.num_examples/batch_size)\n",
|
|
|
+ " # Loop over all batches\n",
|
|
|
+ " for i in range(total_batch):\n",
|
|
|
+ " batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
|
|
|
+ " # Run optimization op (backprop), cost op (to get loss value)\n",
|
|
|
+ " # and summary nodes\n",
|
|
|
+ " _, c, summary = sess.run([apply_grads, loss, merged_summary_op],\n",
|
|
|
+ " feed_dict={x: batch_xs, y: batch_ys})\n",
|
|
|
+ " # Write logs at every iteration\n",
|
|
|
+ " summary_writer.add_summary(summary, epoch * total_batch + i)\n",
|
|
|
+ " # Compute average loss\n",
|
|
|
+ " avg_cost += c / total_batch\n",
|
|
|
+ " # Display logs per epoch step\n",
|
|
|
+ " if (epoch+1) % display_step == 0:\n",
|
|
|
+ " print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n",
|
|
|
+ "\n",
|
|
|
+ " print(\"Optimization Finished!\")\n",
|
|
|
+ "\n",
|
|
|
+ " # Test model\n",
|
|
|
+ " # Calculate accuracy\n",
|
|
|
+ " print(\"Accuracy:\", acc.eval({x: mnist.test.images, y: mnist.test.labels}))\n",
|
|
|
+ "\n",
|
|
|
+ " print(\"Run the command line:\\n\" \\\n",
|
|
|
+ " \"--> tensorboard --logdir=/tmp/tensorflow_logs \" \\\n",
|
|
|
+ " \"\\nThen open http://0.0.0.0:6006/ into your web browser\")"
|
|
|
+ ]
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "cell_type": "code",
|
|
|
+ "execution_count": null,
|
|
|
+ "metadata": {
|
|
|
+ "collapsed": true
|
|
|
+ },
|
|
|
+ "outputs": [],
|
|
|
+ "source": []
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ "metadata": {
|
|
|
+ "kernelspec": {
|
|
|
+ "display_name": "Python 3",
|
|
|
+ "language": "python",
|
|
|
+ "name": "python3"
|
|
|
+ },
|
|
|
+ "language_info": {
|
|
|
+ "codemirror_mode": {
|
|
|
+ "name": "ipython",
|
|
|
+ "version": 3
|
|
|
+ },
|
|
|
+ "file_extension": ".py",
|
|
|
+ "mimetype": "text/x-python",
|
|
|
+ "name": "python",
|
|
|
+ "nbconvert_exporter": "python",
|
|
|
+ "pygments_lexer": "ipython3",
|
|
|
+ "version": "3.5.3"
|
|
|
+ }
|
|
|
+ },
|
|
|
+ "nbformat": 4,
|
|
|
+ "nbformat_minor": 2
|
|
|
+}
|