Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

To get Delta E for only the region of interest.

# -- coding: utf-8 -- """ Created on Sun Feb 9 00:09:31 2020

@author: Shrouti """

import cv2 import numpy as np from PIL import Image, ImageCms from skimage import color

f = 500 rotXval = 90 rotYval = 90 rotZval = 90 distXval = 500 distYval = 500 distZval = 500

def onFchange(val): global f f = val

def onRotXChange(val): global rotXval rotXval = val

def onRotYChange(val): global rotYval rotYval = val

def onRotZChange(val): global rotZval rotZval = val

def onDistXChange(val): global distXval distXval = val

def onDistYChange(val): global distYval distYval = val

def onDistZChange(val): global distZval distZval = val

if __name__ == '__main__':

# Read input image, and create output image
src = cv2.imread("D:\SHROUTI\Testpictures\hand_gesture3.jpg")
src = cv2.resize(src, (640, 480))
dst = np.zeros_like(src)
h, w = src.shape[:2]

# Create user interface with trackbars that will allow to modify the parameters of the transformation
wndname1 = "Source:"
wndname2 = "WarpPerspective: "

# Show original image
cv2.imshow(wndname1, src)

k = -1
while k != 27:

    if f <= 0: f = 1
    rotX = (rotXval - 90) * np.pi / 180
    rotY = (rotYval - 90) * np.pi / 180
    rotZ = (rotZval - 90) * np.pi / 180
    distX = distXval - 500
    distY = distYval - 500
    distZ = distZval - 500

    # Camera intrinsic matrix
    K = np.array([[f, 0, w / 2, 0],
                  [0, f, h / 2, 0],
                  [0, 0, 1, 0]])

    # K inverse
    Kinv = np.zeros((4, 3))
    Kinv[:3, :3] = np.linalg.inv(K[:3, :3]) * f
    Kinv[-1, :] = [0, 0, 1]

    # Rotation matrices around the X,Y,Z axis
    RX = np.array([[1, 0, 0, 0],
                   [0, np.cos(rotX), -np.sin(rotX), 0],
                   [0, np.sin(rotX), np.cos(rotX), 0],
                   [0, 0, 0, 1]])

    RY = np.array([[np.cos(rotY), 0, np.sin(rotY), 0],
                   [0, 1, 0, 0],
                   [-np.sin(rotY), 0, np.cos(rotY), 0],
                   [0, 0, 0, 1]])

    RZ = np.array([[np.cos(rotZ), -np.sin(rotZ), 0, 0],
                   [np.sin(rotZ), np.cos(rotZ), 0, 0],
                   [0, 0, 1, 0],
                   [0, 0, 0, 1]])

    # Composed rotation matrix with (RX,RY,RZ)
    R = np.linalg.multi_dot([RX, RY, RZ])

    # Translation matrix
    T = np.array([[1, 0, 0, distX],
                  [0, 1, 0, distY],
                  [0, 0, 1, distZ],
                  [0, 0, 0, 1]])

    # Overall homography matrix
    H = np.linalg.multi_dot([K, R, T, Kinv])

    # Apply matrix transformation
    cv2.warpPerspective(src, H, (w, h), dst, cv2.INTER_NEAREST, cv2.BORDER_CONSTANT, 0)

    # Show the image
    cv2.imshow(wndname2, dst)

    k = cv2.waitKey(1)

    # convert image to gray scale
    gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)

    # blur the image
    blur = cv2.blur(gray, (3, 3))

    # binary thresholding of the image
    ret, thresh = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY)
    # ret, thresh = cv2.threshold(gray, 127, 255,0)

    # find contours
    # contours, hierarchy = cv2.findContours(thresh,2,1)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    # cc
    cnt = sorted(contours, key=cv2.contourArea, reverse=True)

    mask = contours[0]
    # Convert to Lab colourspace

    Lab = color.rgb2lab(dst)
    L, A, B = cv2.split(Lab)
   # cv2.imshow("L_Channel", L)  # For L Channel
   # cv2.imshow("A_Channel", A)  # For A Channel (Here's what You need)
   # cv2.imshow("B_Channel", B)  # For B Channel
    LMean = L.mean()
    AMean = A.mean()
    BMean = B.mean()
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    #print(Lab)
    #print(LMean)
    #print(AMean)
    #print(BMean)
    [row,column,no_of_ColorBands]= src.shape
    #make uniform images with lab colors
    LStandard = LMean*np.ones([row, column], dtype = int)
    AStandard = AMean*np.ones([row, column], dtype = int)
    BStandard = BMean*np.ones([row, column], dtype = int)
    #determine delta values
    DeltaL = L-LStandard
    DeltaA = A- AStandard
    DeltaB = B - BStandard

    DeltaE = np.sqrt(pow(DeltaA,2)+pow(DeltaB,2)+pow(DeltaL,2))
    print(DeltaE)
    print(mask)
    #euclidian distance(delta E) for only the masked region only
    maskedDeltaE = DeltaE*mask

It is showing the following error:

File "C:/Users/Shrouti/PycharmProjects/homography/homography.py", line 175, in <module> maskedDeltaE = DeltaE*mask ValueError: operands could not be broadcast together with shapes (480,640) (6,1,2)

I want to get the Delta E for the region in the image which has largest contour and nearest from the camera considering the depth value. Please help.

To get Delta E for only the region of interest.

# -- Code:

 # -*- coding: utf-8 --
-*-
"""
Created on Sun Feb  9 00:09:31 2020

2020 @author: Shrouti """

""" import cv2 import numpy as np from PIL import Image, ImageCms from skimage import color

color f = 500 rotXval = 90 rotYval = 90 rotZval = 90 distXval = 500 distYval = 500 distZval = 500

500 def onFchange(val): global f f = val

val def onRotXChange(val): global rotXval rotXval = val

val def onRotYChange(val): global rotYval rotYval = val

val def onRotZChange(val): global rotZval rotZval = val

val def onDistXChange(val): global distXval distXval = val

val def onDistYChange(val): global distYval distYval = val

val def onDistZChange(val): global distZval distZval = val

val if __name__ == '__main__':

'__main__':

    # Read input image, and create output image
 src = cv2.imread("D:\SHROUTI\Testpictures\hand_gesture3.jpg")
 src = cv2.resize(src, (640, 480))
 dst = np.zeros_like(src)
 h, w = src.shape[:2]

 # Create user interface with trackbars that will allow to modify the parameters of the transformation
 wndname1 = "Source:"
 wndname2 = "WarpPerspective: "

 # Show original image
 cv2.imshow(wndname1, src)

 k = -1
 while k != 27:

     if f <= 0: f = 1
     rotX = (rotXval - 90) * np.pi / 180
     rotY = (rotYval - 90) * np.pi / 180
     rotZ = (rotZval - 90) * np.pi / 180
     distX = distXval - 500
     distY = distYval - 500
     distZ = distZval - 500

     # Camera intrinsic matrix
     K = np.array([[f, 0, w / 2, 0],
                   [0, f, h / 2, 0],
                   [0, 0, 1, 0]])

     # K inverse
     Kinv = np.zeros((4, 3))
     Kinv[:3, :3] = np.linalg.inv(K[:3, :3]) * f
     Kinv[-1, :] = [0, 0, 1]

     # Rotation matrices around the X,Y,Z axis
     RX = np.array([[1, 0, 0, 0],
                    [0, np.cos(rotX), -np.sin(rotX), 0],
                    [0, np.sin(rotX), np.cos(rotX), 0],
                    [0, 0, 0, 1]])

     RY = np.array([[np.cos(rotY), 0, np.sin(rotY), 0],
                    [0, 1, 0, 0],
                    [-np.sin(rotY), 0, np.cos(rotY), 0],
                    [0, 0, 0, 1]])

     RZ = np.array([[np.cos(rotZ), -np.sin(rotZ), 0, 0],
                    [np.sin(rotZ), np.cos(rotZ), 0, 0],
                    [0, 0, 1, 0],
                    [0, 0, 0, 1]])

     # Composed rotation matrix with (RX,RY,RZ)
     R = np.linalg.multi_dot([RX, RY, RZ])

     # Translation matrix
     T = np.array([[1, 0, 0, distX],
                   [0, 1, 0, distY],
                   [0, 0, 1, distZ],
                   [0, 0, 0, 1]])

     # Overall homography matrix
     H = np.linalg.multi_dot([K, R, T, Kinv])

     # Apply matrix transformation
     cv2.warpPerspective(src, H, (w, h), dst, cv2.INTER_NEAREST, cv2.BORDER_CONSTANT, 0)

     # Show the image
     cv2.imshow(wndname2, dst)

     k = cv2.waitKey(1)

     # convert image to gray scale
     gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)

     # blur the image
     blur = cv2.blur(gray, (3, 3))

     # binary thresholding of the image
     ret, thresh = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY)
     # ret, thresh = cv2.threshold(gray, 127, 255,0)

     # find contours
     # contours, hierarchy = cv2.findContours(thresh,2,1)
     contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
     # cc
     cnt = sorted(contours, key=cv2.contourArea, reverse=True)

     mask = contours[0]
     # Convert to Lab colourspace

     Lab = color.rgb2lab(dst)
     L, A, B = cv2.split(Lab)
    # cv2.imshow("L_Channel", L)  # For L Channel
    # cv2.imshow("A_Channel", A)  # For A Channel (Here's what You need)
    # cv2.imshow("B_Channel", B)  # For B Channel
     LMean = L.mean()
     AMean = A.mean()
     BMean = B.mean()
     cv2.waitKey(0)
     cv2.destroyAllWindows()
     #print(Lab)
     #print(LMean)
     #print(AMean)
     #print(BMean)
     [row,column,no_of_ColorBands]= src.shape
     #make uniform images with lab colors
     LStandard = LMean*np.ones([row, column], dtype = int)
     AStandard = AMean*np.ones([row, column], dtype = int)
     BStandard = BMean*np.ones([row, column], dtype = int)
     #determine delta values
     DeltaL = L-LStandard
     DeltaA = A- AStandard
     DeltaB = B - BStandard

     DeltaE = np.sqrt(pow(DeltaA,2)+pow(DeltaB,2)+pow(DeltaL,2))
     print(DeltaE)
     print(mask)
     #euclidian distance(delta E) for only the masked region only
     maskedDeltaE = DeltaE*mask

It is showing the following error:

File "C:/Users/Shrouti/PycharmProjects/homography/homography.py", line 175, in <module>
    maskedDeltaE = DeltaE*mask
ValueError: operands could not be broadcast together with shapes (480,640) (6,1,2) 

(6,1,2)

I want to get the Delta E for the region in the image which has largest contour and nearest from the camera considering the depth value. Please help.