mask_rcnn.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # Copyright (C) 2018-2019, BigVision LLC (LearnOpenCV.com), All Rights Reserved.
  2. # Author : Sunita Nayak
  3. # Article : https://www.learnopencv.com/deep-learning-based-object-detection-and-instance-segmentation-using-mask-r-cnn-in-opencv-python-c/
  4. # License: BSD-3-Clause-Attribution (Please read the license file.)
  5. # This work is based on OpenCV samples code (https://opencv.org/license.html)
  6. import cv2 as cv
  7. import argparse
  8. import numpy as np
  9. import os.path
  10. import sys
  11. import random
  12. # Initialize the parameters
  13. confThreshold = 0.5 # Confidence threshold
  14. maskThreshold = 0.3 # Mask threshold
  15. parser = argparse.ArgumentParser(description='Use this script to run Mask-RCNN object detection and segmentation')
  16. parser.add_argument('--image', help='Path to image file')
  17. parser.add_argument('--video', help='Path to video file.')
  18. parser.add_argument("--device", default="cpu", help="Device to inference on")
  19. args = parser.parse_args()
  20. # Draw the predicted bounding box, colorize and show the mask on the image
  21. def drawBox(frame, classId, conf, left, top, right, bottom, classMask):
  22. # Draw a bounding box.
  23. cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
  24. # Print a label of class.
  25. label = '%.2f' % conf
  26. if classes:
  27. assert(classId < len(classes))
  28. label = '%s:%s' % (classes[classId], label)
  29. # Display the label at the top of the bounding box
  30. labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  31. top = max(top, labelSize[1])
  32. cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
  33. cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
  34. # Resize the mask, threshold, color and apply it on the image
  35. classMask = cv.resize(classMask, (right - left + 1, bottom - top + 1))
  36. mask = (classMask > maskThreshold)
  37. roi = frame[top:bottom+1, left:right+1][mask]
  38. # color = colors[classId%len(colors)]
  39. # Comment the above line and uncomment the two lines below to generate different instance colors
  40. colorIndex = random.randint(0, len(colors)-1)
  41. color = colors[colorIndex]
  42. frame[top:bottom+1, left:right+1][mask] = ([0.3*color[0], 0.3*color[1], 0.3*color[2]] + 0.7 * roi).astype(np.uint8)
  43. # Draw the contours on the image
  44. mask = mask.astype(np.uint8)
  45. contours, hierarchy = cv.findContours(mask,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
  46. cv.drawContours(frame[top:bottom+1, left:right+1], contours, -1, color, 3, cv.LINE_8, hierarchy, 100)
  47. # For each frame, extract the bounding box and mask for each detected object
  48. def postprocess(boxes, masks):
  49. # Output size of masks is NxCxHxW where
  50. # N - number of detected boxes
  51. # C - number of classes (excluding background)
  52. # HxW - segmentation shape
  53. numClasses = masks.shape[1]
  54. numDetections = boxes.shape[2]
  55. frameH = frame.shape[0]
  56. frameW = frame.shape[1]
  57. for i in range(numDetections):
  58. box = boxes[0, 0, i]
  59. mask = masks[i]
  60. score = box[2]
  61. if score > confThreshold:
  62. classId = int(box[1])
  63. # Extract the bounding box
  64. left = int(frameW * box[3])
  65. top = int(frameH * box[4])
  66. right = int(frameW * box[5])
  67. bottom = int(frameH * box[6])
  68. left = max(0, min(left, frameW - 1))
  69. top = max(0, min(top, frameH - 1))
  70. right = max(0, min(right, frameW - 1))
  71. bottom = max(0, min(bottom, frameH - 1))
  72. # Extract the mask for the object
  73. classMask = mask[classId]
  74. # Draw bounding box, colorize and show the mask on the image
  75. drawBox(frame, classId, score, left, top, right, bottom, classMask)
  76. # Load names of classes
  77. classesFile = "mscoco_labels.names";
  78. classes = None
  79. with open(classesFile, 'rt') as f:
  80. classes = f.read().rstrip('\n').split('\n')
  81. # Give the textGraph and weight files for the model
  82. textGraph = "./mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";
  83. modelWeights = "./mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb";
  84. # Load the network
  85. net = cv.dnn.readNetFromTensorflow(modelWeights, textGraph);
  86. if args.device == "cpu":
  87. net.setPreferableBackend(cv.dnn.DNN_TARGET_CPU)
  88. print("Using CPU device")
  89. elif args.device == "gpu":
  90. net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
  91. net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
  92. print("Using GPU device")
  93. # Load the classes
  94. colorsFile = "colors.txt";
  95. with open(colorsFile, 'rt') as f:
  96. colorsStr = f.read().rstrip('\n').split('\n')
  97. colors = [] #[0,0,0]
  98. for i in range(len(colorsStr)):
  99. rgb = colorsStr[i].split(' ')
  100. color = np.array([float(rgb[0]), float(rgb[1]), float(rgb[2])])
  101. colors.append(color)
  102. winName = 'Mask-RCNN Object detection and Segmentation in OpenCV'
  103. cv.namedWindow(winName, cv.WINDOW_NORMAL)
  104. outputFile = "mask_rcnn_out_py.avi"
  105. if (args.image):
  106. # Open the image file
  107. if not os.path.isfile(args.image):
  108. print("Input image file ", args.image, " doesn't exist")
  109. sys.exit(1)
  110. cap = cv.VideoCapture(args.image)
  111. outputFile = args.image[:-4]+'_mask_rcnn_out_py.jpg'
  112. elif (args.video):
  113. # Open the video file
  114. if not os.path.isfile(args.video):
  115. print("Input video file ", args.video, " doesn't exist")
  116. sys.exit(1)
  117. cap = cv.VideoCapture(args.video)
  118. outputFile = args.video[:-4]+'_mask_rcnn_out_py.avi'
  119. else:
  120. # Webcam input
  121. cap = cv.VideoCapture(0)
  122. # Get the video writer initialized to save the output video
  123. if (not args.image):
  124. vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 28, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
  125. while cv.waitKey(1) < 0:
  126. # Get frame from the video
  127. hasFrame, frame = cap.read()
  128. # Stop the program if reached end of video
  129. if not hasFrame:
  130. print("Done processing !!!")
  131. print("Output file is stored as ", outputFile)
  132. cv.waitKey(3000)
  133. break
  134. # Create a 4D blob from a frame.
  135. blob = cv.dnn.blobFromImage(frame, swapRB=True, crop=False)
  136. # Set the input to the network
  137. net.setInput(blob)
  138. # Run the forward pass to get output from the output layers
  139. boxes, masks = net.forward(['detection_out_final', 'detection_masks'])
  140. # Extract the bounding box and mask for each of the detected objects
  141. postprocess(boxes, masks)
  142. # Put efficiency information.
  143. t, _ = net.getPerfProfile()
  144. label = 'Mask-RCNN on 2.5 GHz Intel Core i7 CPU, Inference time for a frame : %0.0f ms' % abs(t * 1000.0 / cv.getTickFrequency())
  145. cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
  146. # Write the frame with the detection boxes
  147. if (args.image):
  148. cv.imwrite(outputFile, frame.astype(np.uint8));
  149. else:
  150. vid_writer.write(frame.astype(np.uint8))
  151. cv.imshow(winName, frame)