123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 |
- import cv2
- import depthai as dai
- import numpy as np
- def getFrame(queue):
- # Get frame from queue
- frame = queue.get()
- # Convert frame to OpenCV format and return
- return frame.getCvFrame()
- def getMonoCamera(pipeline, isLeft):
- # Configure mono camera
- mono = pipeline.createMonoCamera()
- # Set Camera Resolution
- mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
- if isLeft:
- # Get left camera
- mono.setBoardSocket(dai.CameraBoardSocket.LEFT)
- else :
- # Get right camera
- mono.setBoardSocket(dai.CameraBoardSocket.RIGHT)
- return mono
- def getStereoPair(pipeline, monoLeft, monoRight):
- # Configure stereo pair for depth estimation
- stereo = pipeline.createStereoDepth()
- # Checks occluded pixels and marks them as invalid
- stereo.setLeftRightCheck(True)
-
- # Configure left and right cameras to work as a stereo pair
- monoLeft.out.link(stereo.left)
- monoRight.out.link(stereo.right)
- return stereo
- def mouseCallback(event,x,y,flags,param):
- global mouseX, mouseY
- if event == cv2.EVENT_LBUTTONDOWN:
- mouseX = x
- mouseY = y
- if __name__ == '__main__':
- mouseX = 0
- mouseY = 640
- # Start defining a pipeline
- pipeline = dai.Pipeline()
- # Set up left and right cameras
- monoLeft = getMonoCamera(pipeline, isLeft = True)
- monoRight = getMonoCamera(pipeline, isLeft = False)
- # Combine left and right cameras to form a stereo pair
- stereo = getStereoPair(pipeline, monoLeft, monoRight)
-
- # Set XlinkOut for disparity, rectifiedLeft, and rectifiedRight
- xoutDisp = pipeline.createXLinkOut()
- xoutDisp.setStreamName("disparity")
-
- xoutRectifiedLeft = pipeline.createXLinkOut()
- xoutRectifiedLeft.setStreamName("rectifiedLeft")
- xoutRectifiedRight = pipeline.createXLinkOut()
- xoutRectifiedRight.setStreamName("rectifiedRight")
- stereo.disparity.link(xoutDisp.input)
-
- stereo.rectifiedLeft.link(xoutRectifiedLeft.input)
- stereo.rectifiedRight.link(xoutRectifiedRight.input)
-
- # Pipeline is defined, now we can connect to the device
- with dai.Device(pipeline) as device:
-
- # Output queues will be used to get the rgb frames and nn data from the outputs defined above
- disparityQueue = device.getOutputQueue(name="disparity", maxSize=1, blocking=False)
- rectifiedLeftQueue = device.getOutputQueue(name="rectifiedLeft", maxSize=1, blocking=False)
- rectifiedRightQueue = device.getOutputQueue(name="rectifiedRight", maxSize=1, blocking=False)
- # Calculate a multiplier for colormapping disparity map
- disparityMultiplier = 255 / stereo.getMaxDisparity()
- cv2.namedWindow("Stereo Pair")
- cv2.setMouseCallback("Stereo Pair", mouseCallback)
-
- # Variable use to toggle between side by side view and one frame view.
- sideBySide = False
- while True:
-
- # Get disparity map
- disparity = getFrame(disparityQueue)
-
- # Colormap disparity for display
- disparity = (disparity * disparityMultiplier).astype(np.uint8)
- disparity = cv2.applyColorMap(disparity, cv2.COLORMAP_JET)
-
- # Get left and right rectified frame
- leftFrame = getFrame(rectifiedLeftQueue);
- rightFrame = getFrame(rectifiedRightQueue)
-
- if sideBySide:
- # Show side by side view
- imOut = np.hstack((leftFrame, rightFrame))
- else :
- # Show overlapping frames
- imOut = np.uint8(leftFrame/2 + rightFrame/2)
-
-
- imOut = cv2.cvtColor(imOut,cv2.COLOR_GRAY2RGB)
-
- imOut = cv2.line(imOut, (mouseX, mouseY), (1280, mouseY), (0, 0, 255), 2)
- imOut = cv2.circle(imOut, (mouseX, mouseY), 2, (255, 255, 128), 2)
- cv2.imshow("Stereo Pair", imOut)
- cv2.imshow("Disparity", disparity)
- # Check for keyboard input
- key = cv2.waitKey(1)
- if key == ord('q'):
- # Quit when q is pressed
- break
- elif key == ord('t'):
- # Toggle display when t is pressed
- sideBySide = not sideBySide
-
|