multigpu_basics.py 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. from __future__ import print_function
  2. '''
  3. Basic Multi GPU computation example using TensorFlow library.
  4. Author: Aymeric Damien
  5. Project: https://github.com/aymericdamien/TensorFlow-Examples/
  6. '''
  7. '''
  8. This tutorial requires your machine to have 2 GPUs
  9. "/cpu:0": The CPU of your machine.
  10. "/gpu:0": The first GPU of your machine
  11. "/gpu:1": The second GPU of your machine
  12. '''
  13. import numpy as np
  14. import tensorflow as tf
  15. import datetime
  16. # Processing Units logs
  17. log_device_placement = True
  18. # Num of multiplications to perform
  19. n = 10
  20. '''
  21. Example: compute A^n + B^n on 2 GPUs
  22. Results on 8 cores with 2 GTX-980:
  23. * Single GPU computation time: 0:00:11.277449
  24. * Multi GPU computation time: 0:00:07.131701
  25. '''
  26. # Create random large matrix
  27. A = np.random.rand(10000, 10000).astype('float32')
  28. B = np.random.rand(10000, 10000).astype('float32')
  29. # Create a graph to store results
  30. c1 = []
  31. c2 = []
  32. def matpow(M, n):
  33. if n < 1: #Abstract cases where n < 1
  34. return M
  35. else:
  36. return tf.matmul(M, matpow(M, n-1))
  37. '''
  38. Single GPU computing
  39. '''
  40. with tf.device('/gpu:0'):
  41. a = tf.placeholder(tf.float32, [10000, 10000])
  42. b = tf.placeholder(tf.float32, [10000, 10000])
  43. # Compute A^n and B^n and store results in c1
  44. c1.append(matpow(a, n))
  45. c1.append(matpow(b, n))
  46. with tf.device('/cpu:0'):
  47. sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
  48. t1_1 = datetime.datetime.now()
  49. with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
  50. # Run the op.
  51. sess.run(sum, {a:A, b:B})
  52. t2_1 = datetime.datetime.now()
  53. '''
  54. Multi GPU computing
  55. '''
  56. # GPU:0 computes A^n
  57. with tf.device('/gpu:0'):
  58. # Compute A^n and store result in c2
  59. a = tf.placeholder(tf.float32, [10000, 10000])
  60. c2.append(matpow(a, n))
  61. # GPU:1 computes B^n
  62. with tf.device('/gpu:1'):
  63. # Compute B^n and store result in c2
  64. b = tf.placeholder(tf.float32, [10000, 10000])
  65. c2.append(matpow(b, n))
  66. with tf.device('/cpu:0'):
  67. sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n
  68. t1_2 = datetime.datetime.now()
  69. with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
  70. # Run the op.
  71. sess.run(sum, {a:A, b:B})
  72. t2_2 = datetime.datetime.now()
  73. print("Single GPU computation time: " + str(t2_1-t1_1))
  74. print("Multi GPU computation time: " + str(t2_2-t1_2))