Trex.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. import cv2
  2. import numpy as np
  3. # MSS library for screen capture.
  4. from mss import mss
  5. from tkinter import *
  6. # PyAutoGUI for controlling keyboard inputs.
  7. import pyautogui as gui
  8. # Message box to show error message prompt.
  9. import tkinter.messagebox
  10. def getMatches(ref_trex, captured_screen):
  11. # Initialize lists
  12. list_kpts = []
  13. # Initialize ORB.
  14. orb = cv2.ORB_create(nfeatures=500)
  15. # Detect and Compute.
  16. kp1, des1 = orb.detectAndCompute(ref_trex, None)
  17. kp2, des2 = orb.detectAndCompute(captured_screen, None)
  18. # Match features.
  19. matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
  20. matches = matcher.match(des1, des2, None)
  21. # Convert to list.
  22. matches = list(matches)
  23. # Sort matches by score.
  24. matches.sort(key=lambda x: x.distance, reverse=False)
  25. # Retain only the top 25% of matches.
  26. numGoodMatches = int(len(matches) * 0.25)
  27. matches = matches[:numGoodMatches]
  28. # Visualize matches.
  29. match_img = cv2.drawMatches(ref_trex, kp1, captured_screen, kp2, matches[:50], None)
  30. # For each match...
  31. for mat in matches:
  32. # Get the matching keypoints for each of the images.
  33. img2_idx = mat.trainIdx
  34. # Get the coordinates.
  35. (x2, y2) = kp2[img2_idx].pt
  36. # Append to each list.
  37. list_kpts.append((int(x2), int(y2)))
  38. # Resize the image for display convenience.
  39. cv2.imshow('Matches', cv2.resize(match_img, None, fx=0.5, fy=0.5))
  40. # cv2.imwrite('Matches.jpg', match_img)
  41. cv2.waitKey(0)
  42. cv2.destroyAllWindows()
  43. return list_kpts
  44. def drawBboxManual(action, x, y, flags, *userdata):
  45. global bbox_top_left, bbox_bottom_right
  46. # Text origin coordinates estimated on the right half using following logic.
  47. '''
  48. Devide the screen into 12 columns and 3 rows. Origin of the text is defined at
  49. 3rd row, 6th column.
  50. '''
  51. org_x = int(6 * img.shape[1] / 12)
  52. org_y = int(3 * img.shape[0] / 5)
  53. # Display Error Text.
  54. cv2.putText(img, 'Error detecting Trex', (org_x + 20, org_y - 20),
  55. cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 1, cv2.LINE_AA)
  56. cv2.putText(img, 'Please click and drag', (org_x + 20, org_y),
  57. cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 1, cv2.LINE_AA)
  58. cv2.putText(img, 'To define the target area', (org_x + 20, org_y + 30),
  59. cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 1, cv2.LINE_AA)
  60. # Mouse interactions.
  61. if action == cv2.EVENT_LBUTTONDOWN:
  62. # Acquire the coordinates (stored as a list).
  63. bbox_top_left = [(x, y)]
  64. # center_1 : centre of point circle to be drawn.
  65. center_1 = (bbox_top_left[0][0], bbox_top_left[0][1])
  66. # Draw a small filled circle.
  67. cv2.circle(img, center_1, 3, (0, 0, 255), -1)
  68. cv2.imshow("DetectionArea", img)
  69. if action == cv2.EVENT_LBUTTONUP:
  70. # Acquire the coordinates (stored as a list).
  71. bbox_bottom_right = [(x, y)]
  72. # center_1 : centre of point circle to be drawn.
  73. center_2 = (bbox_bottom_right[0][0], bbox_bottom_right[0][1])
  74. # Draw a small filled circle.
  75. cv2.circle(img, center_2, 3, (0, 0, 255), -1)
  76. # Define top left corner and bottom right corner coordinates of the bounding box as tuples.
  77. point_1 = (bbox_top_left[0][0], bbox_top_left[0][1])
  78. point_2 = (bbox_bottom_right[0][0], bbox_bottom_right[0][1])
  79. # Draw bounding box.
  80. cv2.rectangle(img, point_1, point_2, (0, 255, 0), 2)
  81. cv2.imshow("DetectionArea", img)
  82. cv2.imshow("DetectionArea", img)
  83. # cv2.imwrite('MouseDefinedBox.jpg', cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_AREA))
  84. def checkDayOrNight(img):
  85. # List to hold pixel intensities of a patch.
  86. pixels_intensities = []
  87. img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  88. h = int(img.shape[0] / 4)
  89. w = int(img.shape[1] / 4)
  90. for i in range(h):
  91. for j in range(w):
  92. pixels_intensities.append(img[i, j])
  93. # Find average pixel intensities.
  94. val = int(sum(pixels_intensities) / len(pixels_intensities))
  95. # If greater than 195, consider day mode.
  96. if val > 195:
  97. return True
  98. else:
  99. return False
  100. # Set keypress delay to 0.
  101. gui.PAUSE = 0
  102. # Initialize lists to hold bounding box coordinates.
  103. bbox_top_left = []
  104. bbox_bottom_right = []
  105. # Main function.
  106. if __name__ == "__main__":
  107. # Load reference image.
  108. ref_img = cv2.imread('trex.png')
  109. # Uncomment the following line if you are on Dark Mode.
  110. # ref_img = cv2.imread('tRexDark.jpg')
  111. screen = mss()
  112. # Identify the display to capture.
  113. monitor = screen.monitors[1]
  114. # Check resolution info returned by mss.
  115. # print('MSS resolution info : ', monitor)
  116. # Grab the screen.
  117. screenshot = screen.grab(monitor)
  118. # Convert to numpy array.
  119. screen_img = np.array(screenshot)
  120. # Define Tested height and width of TRex, according to the screen resolution.
  121. box_h_factor = 0.062962
  122. box_w_factor = 0.046875
  123. hTrex = int(box_h_factor * screen_img.shape[0])
  124. wTrex = int(box_w_factor * screen_img.shape[1])
  125. tested_area = hTrex * wTrex
  126. # print('Tested Dimensions : ', hTrex, '::', wTrex)
  127. # Obtain keypoints.
  128. trex_keypoints = getMatches(ref_img, screen_img)
  129. # Convert to numpy array.
  130. kp_arary = np.array(trex_keypoints)
  131. # Get dimensions of the bounding rectangle.
  132. x, y, w, h = cv2.boundingRect(np.int32(kp_arary))
  133. obtained_area = w * h
  134. # print('Obtained Area : ', obtained_area)
  135. # tested_area = wTrex * hTrex
  136. # print('Tested Area : ', tested_area)
  137. """
  138. Check whether matches are good by comparing the area of the boundingRect to
  139. the tested area. If the obtained bounding box is not too small or too large.
  140. """
  141. if 0.1*tested_area < obtained_area < 3*tested_area:
  142. print('Matches are good.')
  143. # Set Target area bbox coordinates.
  144. xRoi1 = x + wTrex
  145. yRoi1 = y
  146. xRoi2 = x + 2 * wTrex
  147. """
  148. Set the height of bbox at 50% of original. To make sure that it does not
  149. capture the line below the T-Rex. You can play with this value to come up
  150. with better positioning.
  151. """
  152. yRoi2 = y + int(0.5*hTrex)
  153. # Draw rectangle.
  154. cv2.rectangle(screen_img, (xRoi1, yRoi1), (xRoi2, yRoi2), (0, 255, 0), 2)
  155. cv2.imshow('DetectionArea', cv2.resize(screen_img, None, fx=0.5, fy=0.5))
  156. # cv2.imwrite('ScreenBox.jpg', screen_img)
  157. cv2.waitKey(0)
  158. cv2.destroyAllWindows()
  159. else:
  160. print('Matches are not good, please set target area manually.')
  161. # Resize the image for display convenience.
  162. img = cv2.resize(screen_img, None, fx=0.5, fy=0.5)
  163. cv2.namedWindow('DetectionArea')
  164. cv2.setMouseCallback('DetectionArea', drawBboxManual)
  165. cv2.imshow('DetectionArea', img)
  166. cv2.waitKey(0)
  167. cv2.destroyAllWindows()
  168. # Resize back and set Target area bbox coordinates accordingly.
  169. xRoi1 = 2 * bbox_top_left[0][0]
  170. yRoi1 = 2 * bbox_top_left[0][1]
  171. xRoi2 = 2 * bbox_bottom_right[0][0]
  172. yRoi2 = 2 * bbox_bottom_right[0][1]
  173. # If you click-drag performed wrong, restart.
  174. if xRoi1 == xRoi2 and yRoi1 == yRoi2:
  175. print('Please draw the bounding box again using click-drag-release method not click-drag-click')
  176. window = Tk()
  177. window.wm_withdraw()
  178. # Error message at the center of the screen.
  179. win_width = str(window.winfo_screenwidth()//2)
  180. win_height = str(window.winfo_screenheight()//2)
  181. window.geometry("1x1+"+win_width+"+"+win_width)
  182. tkinter.messagebox.showinfo(title="Error", message="Please use click-drag-release")
  183. exit()
  184. """
  185. If the screen resolution returned by mss is different to that of actual system resolution.
  186. That could mean multiple connected high resolution displays or displays with auto scaling
  187. feature, such as Macbooks with retina display. We found no issue upto 1920 x 1080 windows
  188. and Linux systems. Macbook air without retina display (1366 x 768) works without any issue
  189. as well. However, Macbooks with retina display (2560 x 1600) has some issue with mss screen
  190. capture. The tested scaling factor is half for 2560 x 1600 Macs. Uncomment the following
  191. line in that case. You may need to check the scaling factor for other cases.
  192. """
  193. # xRoi1, yRoi1, xRoi2, yRoi2 = (xRoi1 // 2, yRoi1 // 2, xRoi2 // 2, yRoi2 // 2)
  194. # Create a dictionary for MSS, defining size of the screen to be captured.
  195. obstacle_check_bbox = {'top': yRoi1, 'left': xRoi1, 'width': xRoi2 - xRoi1, 'height': yRoi2 - yRoi1}
  196. # Day or Night mode checking patch. Estimated just above obstacle detecting patch.
  197. day_check_bbox = {'top': yRoi1 - 2*hTrex, 'left': xRoi1, 'width': xRoi2, 'height': yRoi2 - 2*hTrex}
  198. # Main loop.
  199. while True:
  200. # Capture obstacle detecting patch.
  201. obstacle_check_patch = screen.grab(obstacle_check_bbox)
  202. obstacle_check_patch = np.array(obstacle_check_patch)
  203. # Day or Night mode checking patch.
  204. day_check_patch = screen.grab(day_check_bbox)
  205. day_check_patch = np.array(day_check_patch)
  206. # Convert obstacle detecting area to gray scale.
  207. obstacle_check_gray = cv2.cvtColor(obstacle_check_patch, cv2.COLOR_BGR2GRAY)
  208. # Check the game mode.
  209. day = checkDayOrNight(day_check_patch)
  210. # Perform contour analysis according to the game mode.
  211. if day:
  212. # Add 10px padding for effective contour analysis.
  213. obstacle_check_gray = cv2.copyMakeBorder(obstacle_check_gray, 10, 10, 10, 10,
  214. cv2.BORDER_CONSTANT, None, value=255)
  215. # Perform thresholding.
  216. ret, thresh = cv2.threshold(obstacle_check_gray, 127, 255,
  217. cv2.THRESH_BINARY)
  218. else:
  219. # Add 10px padding for effective contour analysis.
  220. obstacle_check_gray = cv2.copyMakeBorder(obstacle_check_gray, 10, 10, 10, 10,
  221. cv2.BORDER_CONSTANT, None, value=0)
  222. # Perform thresholding.
  223. ret, thresh = cv2.threshold(obstacle_check_gray, 127, 255,
  224. cv2.THRESH_BINARY_INV)
  225. # Find contours.
  226. contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST,
  227. cv2.CHAIN_APPROX_NONE)
  228. # Print number of contours.
  229. # print('Contours Detected : ', len(contours))
  230. # Make T-Rex jump.
  231. if len(contours) > 1:
  232. gui.press('space', interval=0.1)
  233. cv2.imshow('Window', obstacle_check_gray)
  234. key = cv2.waitKey(1)
  235. if key == ord('q'):
  236. cv2.destroyAllWindows()
  237. break