FullyConvolutionalResnet50.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. import cv2
  2. import numpy as np
  3. import tensorflow as tf
  4. from tensorflow.keras import Input
  5. from tensorflow.keras.applications import ResNet50
  6. from tensorflow.keras.applications.resnet import preprocess_input
  7. from tensorflow.keras.layers import (
  8. Activation,
  9. AveragePooling2D,
  10. BatchNormalization,
  11. Conv2D,
  12. MaxPooling2D,
  13. ZeroPadding2D,
  14. )
  15. from tensorflow.python.keras.engine import training
  16. from tensorflow.python.keras.utils import data_utils
  17. from utils import (
  18. BASE_WEIGHTS_PATH,
  19. WEIGHTS_HASHES,
  20. stack1,
  21. )
  22. # setting FC weights to the final convolutional layer
  23. def set_conv_weights(model, feature_extractor):
  24. # get pre-trained ResNet50 FC weights
  25. dense_layer_weights = feature_extractor.layers[-1].get_weights()
  26. weights_list = [
  27. tf.reshape(
  28. dense_layer_weights[0], (1, 1, *dense_layer_weights[0].shape),
  29. ).numpy(),
  30. dense_layer_weights[1],
  31. ]
  32. model.get_layer(name="last_conv").set_weights(weights_list)
  33. def fully_convolutional_resnet50(
  34. input_shape, num_classes=1000, pretrained_resnet=True, use_bias=True,
  35. ):
  36. # init input layer
  37. img_input = Input(shape=input_shape)
  38. # define basic model pipeline
  39. x = ZeroPadding2D(padding=((3, 3), (3, 3)), name="conv1_pad")(img_input)
  40. x = Conv2D(64, 7, strides=2, use_bias=use_bias, name="conv1_conv")(x)
  41. x = BatchNormalization(axis=3, epsilon=1.001e-5, name="conv1_bn")(x)
  42. x = Activation("relu", name="conv1_relu")(x)
  43. x = ZeroPadding2D(padding=((1, 1), (1, 1)), name="pool1_pad")(x)
  44. x = MaxPooling2D(3, strides=2, name="pool1_pool")(x)
  45. # the sequence of stacked residual blocks
  46. x = stack1(x, 64, 3, stride1=1, name="conv2")
  47. x = stack1(x, 128, 4, name="conv3")
  48. x = stack1(x, 256, 6, name="conv4")
  49. x = stack1(x, 512, 3, name="conv5")
  50. # add avg pooling layer after feature extraction layers
  51. x = AveragePooling2D(pool_size=7)(x)
  52. # add final convolutional layer
  53. conv_layer_final = Conv2D(
  54. filters=num_classes, kernel_size=1, use_bias=use_bias, name="last_conv",
  55. )(x)
  56. # configure fully convolutional ResNet50 model
  57. model = training.Model(img_input, x)
  58. # load model weights
  59. if pretrained_resnet:
  60. model_name = "resnet50"
  61. # configure full file name
  62. file_name = model_name + "_weights_tf_dim_ordering_tf_kernels_notop.h5"
  63. # get the file hash from TF WEIGHTS_HASHES
  64. file_hash = WEIGHTS_HASHES[model_name][1]
  65. weights_path = data_utils.get_file(
  66. file_name,
  67. BASE_WEIGHTS_PATH + file_name,
  68. cache_subdir="models",
  69. file_hash=file_hash,
  70. )
  71. model.load_weights(weights_path)
  72. # form final model
  73. model = training.Model(inputs=model.input, outputs=[conv_layer_final])
  74. if pretrained_resnet:
  75. # get model with the dense layer for further FC weights extraction
  76. resnet50_extractor = ResNet50(
  77. include_top=True, weights="imagenet", classes=num_classes,
  78. )
  79. # set ResNet50 FC-layer weights to final convolutional layer
  80. set_conv_weights(model=model, feature_extractor=resnet50_extractor)
  81. return model
  82. if __name__ == "__main__":
  83. # read ImageNet class ids to a list of labels
  84. with open("imagenet_classes.txt") as f:
  85. labels = [line.strip() for line in f.readlines()]
  86. # read image
  87. original_image = cv2.imread("camel.jpg")
  88. # convert image to the RGB format
  89. image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
  90. # pre-process image
  91. image = preprocess_input(image)
  92. # convert image to NCHW tf.tensor
  93. image = tf.expand_dims(image, 0)
  94. # load modified resnet50 model with pre-trained ImageNet weights
  95. model = fully_convolutional_resnet50(input_shape=(image.shape[-3:]))
  96. # Perform inference.
  97. # Instead of a 1×1000 vector, we will get a
  98. # 1×1000×n×m output ( i.e. a probability map
  99. # of size n × m for each 1000 class,
  100. # where n and m depend on the size of the image).
  101. preds = model.predict(image)
  102. preds = tf.transpose(preds, perm=[0, 3, 1, 2])
  103. preds = tf.nn.softmax(preds, axis=1)
  104. print("Response map shape : ", preds.shape)
  105. # find the class with the maximum score in the n × m output map
  106. pred = tf.math.reduce_max(preds, axis=1)
  107. class_idx = tf.math.argmax(preds, axis=1)
  108. print(class_idx)
  109. row_max = tf.math.reduce_max(pred, axis=1)
  110. row_idx = tf.math.argmax(pred, axis=1)
  111. col_idx = tf.math.argmax(row_max, axis=1)
  112. predicted_class = tf.gather_nd(
  113. class_idx, (0, tf.gather_nd(row_idx, (0, col_idx[0])), col_idx[0]),
  114. )
  115. # print top predicted class
  116. print("Predicted Class : ", labels[predicted_class], predicted_class)
  117. # find the n × m score map for the predicted class
  118. score_map = tf.expand_dims(preds[0, predicted_class, :, :], 0).numpy()
  119. score_map = score_map[0]
  120. # resize score map to the original image size
  121. score_map = cv2.resize(
  122. score_map, (original_image.shape[1], original_image.shape[0]),
  123. )
  124. # binarize score map
  125. _, score_map_for_contours = cv2.threshold(
  126. score_map, 0.65, 1, type=cv2.THRESH_BINARY,
  127. )
  128. score_map_for_contours = score_map_for_contours.astype(np.uint8).copy()
  129. # find the contour of the binary blob
  130. contours, _ = cv2.findContours(
  131. score_map_for_contours, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE,
  132. )
  133. # find bounding box around the object.
  134. rect = cv2.boundingRect(contours[0])
  135. # apply score map as a mask to original image
  136. score_map = score_map - np.min(score_map[:])
  137. score_map = score_map / np.max(score_map[:])
  138. score_map = cv2.cvtColor(score_map, cv2.COLOR_GRAY2BGR)
  139. masked_image = (original_image * score_map).astype(np.uint8)
  140. # display bounding box
  141. cv2.rectangle(
  142. masked_image, rect[:2], (rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255), 2,
  143. )
  144. # display images
  145. cv2.imshow("Original Image", original_image)
  146. cv2.imshow("scaled_score_map", score_map)
  147. cv2.imshow("activations_and_bbox", masked_image)
  148. cv2.waitKey(0)