Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Getting 3D points from pointcloud to show objects positions

Hardware: Kinect V1`` Software: Ubuntu 16.04, cv2, python

Hello, Currently in my project I have used used openCV to do color based object detection. My next step in my project is to find the coordinates (x,y,z) in mm of these objects to pass over to another program that uses a robotic arm. An interface would be used as well to request the coordinates of the objects. Say if the user wants to pick up all green objects then the program will return the coordinates of only the green objects. The robotic arm will used the coordinates to pick up the colored object. The problem I am running into is finding how to get these coordinates using the kinectV1. Could I use the get_depth() somehow to get the xyz coordinates? I do not know how to use the point cloud to get coordinates as I have seen that is an option but I could not find any examples with python. Also could I take a picture of the images to get the coordinates of the objects? Any advice would be appreciated. Attached is a picture of my results and my code for your reference. Thank you

image description

import cv2
import numpy as np
import freenect

#function to get RGB image from kinect
def get_video():
    array,_ = freenect.sync_get_video()
    array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
    return array

#function to get depth image from kinect
def get_depth():
    array,_ = freenect.sync_get_depth()
    array = array.astype(np.uint8)
    return array

if __name__ == "__main__":
    while True:
        #get a frame from RGB kinect camera
        frame = get_video()     
        #_, frame = cap.read()

        #get a frame from depth sensor
        #depth = get_depth()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)


        #Create kernel to use in filter
        kernel = np.ones((5, 5), np.uint8)

    #list yellow range
        lower_yellow = np.array([15, 60, 30]) 
        upper_yellow = np.array([30, 255, 255 ])     

        #Create filter for yellow
        yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow) # Threshold the HSV image to get only yellow colors
        opening_y = cv2.morphologyEx(yellow_mask, cv2.MORPH_OPEN, kernel, iterations = 1) #Use morphology open to rid of false pos and false neg (noise)
        result_y = cv2.bitwise_and(frame, frame, mask = opening_y) #bitwise and the opening filter with the original frame 

    #list green range 
        lower_green = np.array([45, 45, 10]) 
        upper_green = np.array([100, 255, 255])

        #Create filter for green
        green_mask = cv2.inRange(hsv, lower_green, upper_green)
        opening_g = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel, iterations = 1)
        result_g = cv2.bitwise_and(frame, frame, mask = opening_g)

    #list blue range 
        lower_blue = np.array([110, 70, 30]) 
        upper_blue = np.array([130, 255, 255])

    #Create filter for blue
        blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)
        opening_b = cv2.morphologyEx(blue_mask, cv2.MORPH_OPEN, kernel, iterations = 1)
        result_b = cv2.bitwise_and(frame, frame, mask = opening_b)

        #Tracking the color yellow
        _, contours, _ = cv2.findContours(opening_y, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
                        cv2.putText(frame, "Yellow", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 255))

        #Tracking the color green
        _, contours, _ = cv2.findContours(opening_g, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (87, 139, 46), 2)
                        cv2.putText(frame, "Green", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (87, 139, 46) )

        #Tracking the color blue
        _, contours, _ = cv2.findContours(opening_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                        cv2.putText(frame, "Blue", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0) )



        cv2.imshow("Frame", frame)

        key = cv2.waitKey(5) & 0xFF
        if key == 27:
            break

cv2.destroyAllWindows()

Getting 3D points from pointcloud to show objects positions

Hardware: Kinect V1`` Software: Ubuntu 16.04, cv2, python

Hello, Currently in my project I have used used openCV to do color based object detection. My next step in my project is to find the coordinates (x,y,z) in mm of these objects to pass over to another program that uses a robotic arm. An interface would be used as well to request the coordinates of the objects. Say if the user wants to pick up all green objects then the program will return the coordinates of only the green objects. The robotic arm will used the coordinates to pick up the colored object. The problem I am running into is finding how to get these coordinates using the kinectV1. Could I use the get_depth() somehow to get the xyz coordinates? I do not know how to use the point cloud to get coordinates as I have seen that is an option but I could not find any examples with python. Also could I take a picture of the images to get the coordinates of the objects? Any advice would be appreciated. Attached is a picture of my results and my code for your reference. Thank you

Edit: I found this repository https://github.com/amiller/libfreenect-goodies?fbclid=IwAR0Ikhivhy-2ukhxUq_aSJV3ArcQh3oNShrtCEce-iTKD_DvRn-qF-RA06s In the calibkinect.py there is code that helps with getting the xyz coordinates from the point cloud. After playing with this code a while I noticed that the image has to be refreshed otherwise the xyz matrix will stay the same. I am just having trouble understanding how can I integrate this with my code.

image description

import cv2
import numpy as np
import freenect

#function to get RGB image from kinect
def get_video():
    array,_ = freenect.sync_get_video()
    array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
    return array

#function to get depth image from kinect
def get_depth():
    array,_ = freenect.sync_get_depth()
    array = array.astype(np.uint8)
    return array

if __name__ == "__main__":
    while True:
        #get a frame from RGB kinect camera
        frame = get_video()     
        #_, frame = cap.read()

        #get a frame from depth sensor
        #depth = get_depth()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)


        #Create kernel to use in filter
        kernel = np.ones((5, 5), np.uint8)

    #list yellow range
        lower_yellow = np.array([15, 60, 30]) 
        upper_yellow = np.array([30, 255, 255 ])     

        #Create filter for yellow
        yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow) # Threshold the HSV image to get only yellow colors
        opening_y = cv2.morphologyEx(yellow_mask, cv2.MORPH_OPEN, kernel, iterations = 1) #Use morphology open to rid of false pos and false neg (noise)
        result_y = cv2.bitwise_and(frame, frame, mask = opening_y) #bitwise and the opening filter with the original frame 

    #list green range 
        lower_green = np.array([45, 45, 10]) 
        upper_green = np.array([100, 255, 255])

        #Create filter for green
        green_mask = cv2.inRange(hsv, lower_green, upper_green)
        opening_g = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel, iterations = 1)
        result_g = cv2.bitwise_and(frame, frame, mask = opening_g)

    #list blue range 
        lower_blue = np.array([110, 70, 30]) 
        upper_blue = np.array([130, 255, 255])

    #Create filter for blue
        blue_mask = cv2.inRange(hsv, lower_blue, upper_blue)
        opening_b = cv2.morphologyEx(blue_mask, cv2.MORPH_OPEN, kernel, iterations = 1)
        result_b = cv2.bitwise_and(frame, frame, mask = opening_b)

        #Tracking the color yellow
        _, contours, _ = cv2.findContours(opening_y, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
                        cv2.putText(frame, "Yellow", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 255))

        #Tracking the color green
        _, contours, _ = cv2.findContours(opening_g, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (87, 139, 46), 2)
                        cv2.putText(frame, "Green", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (87, 139, 46) )

        #Tracking the color blue
        _, contours, _ = cv2.findContours(opening_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate (contours):
                area = cv2.contourArea (contour)
                if (area > 300):
                        x, y, w, h = cv2.boundingRect(contour)
                        frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                        cv2.putText(frame, "Blue", (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0) )



        cv2.imshow("Frame", frame)

        key = cv2.waitKey(5) & 0xFF
        if key == 27:
            break

cv2.destroyAllWindows()