logistic_regression_eager_api.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. ''' Logistic Regression with Eager API.
  2. A logistic regression learning algorithm example using TensorFlow's Eager API.
  3. This example is using the MNIST database of handwritten digits
  4. (http://yann.lecun.com/exdb/mnist/)
  5. Author: Aymeric Damien
  6. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  7. '''
  8. from __future__ import absolute_import, division, print_function
  9. import tensorflow as tf
  10. # Set Eager API
  11. tf.enable_eager_execution()
  12. tfe = tf.contrib.eager
  13. # Import MNIST data
  14. from tensorflow.examples.tutorials.mnist import input_data
  15. mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
  16. # Parameters
  17. learning_rate = 0.1
  18. batch_size = 128
  19. num_steps = 1000
  20. display_step = 100
  21. dataset = tf.data.Dataset.from_tensor_slices(
  22. (mnist.train.images, mnist.train.labels)).batch(batch_size)
  23. dataset_iter = tfe.Iterator(dataset)
  24. # Variables
  25. W = tfe.Variable(tf.zeros([784, 10]), name='weights')
  26. b = tfe.Variable(tf.zeros([10]), name='bias')
  27. # Logistic regression (Wx + b)
  28. def logistic_regression(inputs):
  29. return tf.matmul(inputs, W) + b
  30. # Cross-Entropy loss function
  31. def loss_fn(inference_fn, inputs, labels):
  32. # Using sparse_softmax cross entropy
  33. return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
  34. logits=inference_fn(inputs), labels=labels))
  35. # Calculate accuracy
  36. def accuracy_fn(inference_fn, inputs, labels):
  37. prediction = tf.nn.softmax(inference_fn(inputs))
  38. correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
  39. return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  40. # SGD Optimizer
  41. optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  42. # Compute gradients
  43. grad = tfe.implicit_gradients(loss_fn)
  44. # Training
  45. average_loss = 0.
  46. average_acc = 0.
  47. for step in range(num_steps):
  48. # Iterate through the dataset
  49. try:
  50. d = dataset_iter.next()
  51. except StopIteration:
  52. # Refill queue
  53. dataset_iter = tfe.Iterator(dataset)
  54. d = dataset_iter.next()
  55. # Images
  56. x_batch = d[0]
  57. # Labels
  58. y_batch = tf.cast(d[1], dtype=tf.int64)
  59. # Compute the batch loss
  60. batch_loss = loss_fn(logistic_regression, x_batch, y_batch)
  61. average_loss += batch_loss
  62. # Compute the batch accuracy
  63. batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch)
  64. average_acc += batch_accuracy
  65. if step == 0:
  66. # Display the initial cost, before optimizing
  67. print("Initial loss= {:.9f}".format(average_loss))
  68. # Update the variables following gradients info
  69. optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch))
  70. # Display info
  71. if (step + 1) % display_step == 0 or step == 0:
  72. if step > 0:
  73. average_loss /= display_step
  74. average_acc /= display_step
  75. print("Step:", '%04d' % (step + 1), " loss=",
  76. "{:.9f}".format(average_loss), " accuracy=",
  77. "{:.4f}".format(average_acc))
  78. average_loss = 0.
  79. average_acc = 0.
  80. # Evaluate model on the test image set
  81. testX = mnist.test.images
  82. testY = mnist.test.labels
  83. test_acc = accuracy_fn(logistic_regression, testX, testY)
  84. print("Testset Accuracy: {:.4f}".format(test_acc))