Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Edit: Example image of my problem. image description

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Edit: Edit: Example image of my problem. image description

Edit 2: I tried the radial symmetry transform by kbarni. I used [this] (https://github.com/ceilab/frst_python) implementation but unfortunately I only get a just black output. I have already tried to modify every parameter but it didn't help.

'''
Implementation of fast radial symmetry transform in pure Python using OpenCV and numpy.

Adapted from:
https://github.com/Xonxt/frst

Which is itself adapted from:
Loy, G., & Zelinsky, A. (2002). A fast radial symmetry transform for detecting points of interest. Computer         
Vision, ECCV 2002.
'''

import cv2
import numpy as np


def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))


# Performs fast radial symmetry transform
# img: input image, grayscale
# radii: integer value for radius size in pixels (n in the original paper); also used to size gaussian kernel
# alpha: Strictness of symmetry transform (higher=more strict; 2 is good place to start)
# beta: gradient threshold parameter, float in [0,1]
# stdFactor: Standard deviation factor for gaussian kernel
# mode: BRIGHT, DARK, or BOTH
def frst(img, radii, alpha, beta, stdFactor, mode='BOTH'):
    mode = mode.upper()
    assert mode in ['BRIGHT', 'DARK', 'BOTH']
    dark = (mode == 'DARK' or mode == 'BOTH')
    bright = (mode == 'BRIGHT' or mode == 'BOTH')

    workingDims = tuple((e + 2 * radii) for e in img.shape)

    # Set up output and M and O working matrices
    output = np.zeros(img.shape, np.uint8)
    O_n = np.zeros(workingDims, np.int16)
    M_n = np.zeros(workingDims, np.int16)

    # Calculate gradients
    gx = gradx(img)
    gy = grady(img)

    # Find gradient vector magnitude
    gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))

    # Use beta to set threshold - speeds up transform significantly
    gthresh = np.amax(gnorms) * beta

    # Find x/y distance to affected pixels
    gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0),         
    radii).round().astype(int);
    gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0),     
    radii).round().astype(int);

    # Iterate over all pixels (w/ gradient above threshold)
    for coords, gnorm in np.ndenumerate(gnorms):
        if gnorm > gthresh:
            i, j = coords
            # Positively affected pixel
            if bright:
                ppve = (i + gpx[i, j], j + gpy[i, j])
                O_n[ppve] += 1
                M_n[ppve] += gnorm
            # Negatively affected pixel
            if dark:
                pnve = (i - gpx[i, j], j - gpy[i, j])
                O_n[pnve] -= 1
                M_n[pnve] -= gnorm

    # Abs and normalize O matrix
    O_n = np.abs(O_n)
    O_n = O_n / float(np.amax(O_n))

    # Normalize M matrix
    M_max = float(np.amax(np.abs(M_n)))
    M_n = M_n / M_max

    # Elementwise multiplication
    F_n = np.multiply(np.power(O_n, alpha), M_n)

    # Gaussian blur
    kSize = int(np.ceil(radii / 2))
    kSize = kSize + 1 if kSize % 2 == 0 else kSize

    S = cv2.GaussianBlur(F_n, (kSize, kSize), int(radii * stdFactor))

    return S


img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

result = frst(gray, 60, 2, 0, 1, mode='BOTH')

cv2.imshow("detected circles", result)
cv2.waitKey(0)

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Edit: Example image of my problem. image description

Edit 2: I tried the radial symmetry transform by kbarni. I used [this] (https://github.com/ceilab/frst_python) implementation but unfortunately I only get a just black output. I have already tried to modify every parameter but it didn't help.

'''
Implementation of fast radial symmetry transform in pure Python using OpenCV and numpy.

Adapted from:
https://github.com/Xonxt/frst

Which is itself adapted from:
Loy, G., & Zelinsky, A. (2002). A fast radial symmetry transform for detecting points of interest. Computer         
Vision, ECCV 2002.
'''

import cv2
import numpy as np


def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))


# Performs fast radial symmetry transform
# img: input image, grayscale
# radii: integer value for radius size in pixels (n in the original paper); also used to size gaussian kernel
# alpha: Strictness of symmetry transform (higher=more strict; 2 is good place to start)
# beta: gradient threshold parameter, float in [0,1]
# stdFactor: Standard deviation factor for gaussian kernel
# mode: BRIGHT, DARK, or BOTH
def frst(img, radii, alpha, beta, stdFactor, mode='BOTH'):
    mode = mode.upper()
    assert mode in ['BRIGHT', 'DARK', 'BOTH']
    dark = (mode == 'DARK' or mode == 'BOTH')
    bright = (mode == 'BRIGHT' or mode == 'BOTH')

    workingDims = tuple((e + 2 * radii) for e in img.shape)

    # Set up output and M and O working matrices
    output = np.zeros(img.shape, np.uint8)
    O_n = np.zeros(workingDims, np.int16)
    M_n = np.zeros(workingDims, np.int16)

    # Calculate gradients
    gx = gradx(img)
    gy = grady(img)

    # Find gradient vector magnitude
    gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))

    # Use beta to set threshold - speeds up transform significantly
    gthresh = np.amax(gnorms) * beta

    # Find x/y distance to affected pixels
    gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0),         
    radii).round().astype(int);
    gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0),     
    radii).round().astype(int);

    # Iterate over all pixels (w/ gradient above threshold)
    for coords, gnorm in np.ndenumerate(gnorms):
        if gnorm > gthresh:
            i, j = coords
            # Positively affected pixel
            if bright:
                ppve = (i + gpx[i, j], j + gpy[i, j])
                O_n[ppve] += 1
                M_n[ppve] += gnorm
            # Negatively affected pixel
            if dark:
                pnve = (i - gpx[i, j], j - gpy[i, j])
                O_n[pnve] -= 1
                M_n[pnve] -= gnorm

    # Abs and normalize O matrix
    O_n = np.abs(O_n)
    O_n = O_n / float(np.amax(O_n))

    # Normalize M matrix
    M_max = float(np.amax(np.abs(M_n)))
    M_n = M_n / M_max

    # Elementwise multiplication
    F_n = np.multiply(np.power(O_n, alpha), M_n)

    # Gaussian blur
    kSize = int(np.ceil(radii / 2))
    kSize = kSize + 1 if kSize % 2 == 0 else kSize

    S = cv2.GaussianBlur(F_n, (kSize, kSize), int(radii * stdFactor))

    return S


img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

result = frst(gray, 60, 2, 0, 1, mode='BOTH')

cv2.imshow("detected circles", result)
cv2.waitKey(0)

Edit 3: I tried to implement the algroithm myself following the describtion from kbarni and supported by the code above. unfortunately, I got the following error:

Traceback (most recent call last): File "C:/Users/Christian/PycharmProjects/symmetry_transform3/frst.py", line 133, in <module> cv2.imshow("detection:", result) cv2.error: OpenCV(3.4.3) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:356: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'

It seems like it wouldn't detect the output as an image?

Code:

import cv2
import numpy as np


def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))

def createLineIterator(P1, P2, img):
    """
    Produces and array that consists of the coordinates and intensities of each pixel in a line between two points

    Parameters:
        -P1: a numpy array that consists of the coordinate of the first point (x,y)
        -P2: a numpy array that consists of the coordinate of the second point (x,y)
        -img: the image being processed

    Returns:
        -it: a numpy array that consists of the coordinates and intensities of each pixel in the radii (shape:     [numPixels, 3], row = [x,y,intensity])
    """
    #define local variables for readability
    imageH = img.shape[0]
    imageW = img.shape[1]
    P1X = P1[0]
    P1Y = P1[1]
    P2X = P2[0]
    P2Y = P2[1]

   #difference and absolute difference between points
   #used to calculate slope and relative location between points
    dX = P2X - P1X
    dY = P2Y - P1Y
    dXa = np.abs(dX)
    dYa = np.abs(dY)

   #predefine numpy array for output based on distance between points
    itbuffer = np.empty(shape=(np.maximum(dYa,dXa),3),dtype=np.float32)
    itbuffer.fill(np.nan)

   #Obtain coordinates along the line using a form of Bresenham's algorithm
    negY = P1Y > P2Y
    negX = P1X > P2X
    if P1X == P2X: #vertical line segment
        itbuffer[:,0] = P1X
        if negY:
            itbuffer[:,1] = np.arange(P1Y - 1,P1Y - dYa - 1,-1)
        else:
            itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
    elif P1Y == P2Y: #horizontal line segment
        itbuffer[:,1] = P1Y
        if negX:
            itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
        else:
            itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
    else: #diagonal line segment
        steepSlope = dYa > dXa
        if steepSlope:
            slope = dX.astype(np.float32)/dY.astype(np.float32)
            if negY:
                itbuffer[:,1] = np.arange(P1Y-1,P1Y-dYa-1,-1)
            else:
                itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
            itbuffer[:,0] = (slope*(itbuffer[:,1]-P1Y)).astype(np.int) + P1X
        else:
            slope = dY.astype(np.float32)/dX.astype(np.float32)
            if negX:
                itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
            else:
                itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
            itbuffer[:,1] = (slope*(itbuffer[:,0]-P1X)).astype(np.int) + P1Y

    #Remove points outside of image
    colX = itbuffer[:,0]
    colY = itbuffer[:,1]
    itbuffer = itbuffer[(colX >= 0) & (colY >=0) & (colX<imageW) & (colY<imageH)]

    #Get intensities from img ndarray
    itbuffer[:,2] = img[itbuffer[:,1].astype(np.uint),itbuffer[:,0].astype(np.uint)]

    return itbuffer


def detection(img, thresh, radii, d):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Define an accumulator image (same size as the original)
    accumumulator = np.zeros(gray.shape)

    # Detect the gradients in the image (magnitude/angle).
    gx = gradx(gray)
    gy = grady(gray)
    # Gradient magnitude
    gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))
    # Gradient direction
    gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0), radii).round().astype(int);
    gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0), radii).round().astype(int);

    for coords, gnorm in np.ndenumerate(gnorms):
        # Gradient magnitude is over a certain threshold
        if gnorm > thresh:
            i, j = coords
            iNew = i + gpx[i,j]
            #if iNew > d:
            #    iUse = d
            #else:
            #    iUse = iNew
            jNew = j + gpy[i,j]
            #if jNew > d:
            #    jUse = d
            #else:
            #    jUse = jNew
            #newPoint = (iUse, jUse)
            newPoint = (iNew, jNew)
            # Draw a line in gradient direction
            accumumulator = createLineIterator(coords, newPoint, accumumulator)

    # Local maxima (and minima)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(accumumulator)

    return accumumulator


img = cv2.imread('data/P1190263.jpg')
result = detection(img, 0, 2, 60)
cv2.imshow("detection:", result)
cv2.waitKey(0)

Writting robust (size invariant) circle detection (Watershed)

I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.

It's probably not ideal but I already have described it in a stackoverflow thread and I would be really happy if I could get any more advices: https://stackoverflow.com/questions/52279810/writting-robust-size-invariant-circle-detection-watershed

I used this watershed algorithm: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html

Edit: Example image of my problem. image description

Edit 2: I tried the radial symmetry transform by kbarni. I used [this] (https://github.com/ceilab/frst_python) implementation but unfortunately I only get a just black output. I have already tried to modify every parameter but it didn't help.

'''
Implementation of fast radial symmetry transform in pure Python using OpenCV and numpy.

Adapted from:
https://github.com/Xonxt/frst

Which is itself adapted from:
Loy, G., & Zelinsky, A. (2002). A fast radial symmetry transform for detecting points of interest. Computer         
Vision, ECCV 2002.
'''

import cv2
import numpy as np


def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))


# Performs fast radial symmetry transform
# img: input image, grayscale
# radii: integer value for radius size in pixels (n in the original paper); also used to size gaussian kernel
# alpha: Strictness of symmetry transform (higher=more strict; 2 is good place to start)
# beta: gradient threshold parameter, float in [0,1]
# stdFactor: Standard deviation factor for gaussian kernel
# mode: BRIGHT, DARK, or BOTH
def frst(img, radii, alpha, beta, stdFactor, mode='BOTH'):
    mode = mode.upper()
    assert mode in ['BRIGHT', 'DARK', 'BOTH']
    dark = (mode == 'DARK' or mode == 'BOTH')
    bright = (mode == 'BRIGHT' or mode == 'BOTH')

    workingDims = tuple((e + 2 * radii) for e in img.shape)

    # Set up output and M and O working matrices
    output = np.zeros(img.shape, np.uint8)
    O_n = np.zeros(workingDims, np.int16)
    M_n = np.zeros(workingDims, np.int16)

    # Calculate gradients
    gx = gradx(img)
    gy = grady(img)

    # Find gradient vector magnitude
    gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))

    # Use beta to set threshold - speeds up transform significantly
    gthresh = np.amax(gnorms) * beta

    # Find x/y distance to affected pixels
    gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0),         
    radii).round().astype(int);
    gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0),     
    radii).round().astype(int);

    # Iterate over all pixels (w/ gradient above threshold)
    for coords, gnorm in np.ndenumerate(gnorms):
        if gnorm > gthresh:
            i, j = coords
            # Positively affected pixel
            if bright:
                ppve = (i + gpx[i, j], j + gpy[i, j])
                O_n[ppve] += 1
                M_n[ppve] += gnorm
            # Negatively affected pixel
            if dark:
                pnve = (i - gpx[i, j], j - gpy[i, j])
                O_n[pnve] -= 1
                M_n[pnve] -= gnorm

    # Abs and normalize O matrix
    O_n = np.abs(O_n)
    O_n = O_n / float(np.amax(O_n))

    # Normalize M matrix
    M_max = float(np.amax(np.abs(M_n)))
    M_n = M_n / M_max

    # Elementwise multiplication
    F_n = np.multiply(np.power(O_n, alpha), M_n)

    # Gaussian blur
    kSize = int(np.ceil(radii / 2))
    kSize = kSize + 1 if kSize % 2 == 0 else kSize

    S = cv2.GaussianBlur(F_n, (kSize, kSize), int(radii * stdFactor))

    return S


img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

result = frst(gray, 60, 2, 0, 1, mode='BOTH')

cv2.imshow("detected circles", result)
cv2.waitKey(0)

Edit 3: I tried to implement the algroithm myself following the describtion from kbarni and supported by the code above. unfortunately, I got the following error:

Traceback (most recent call last): File "C:/Users/Christian/PycharmProjects/symmetry_transform3/frst.py", line 133, in <module> cv2.imshow("detection:", result) cv2.error: OpenCV(3.4.3) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:356: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'

It seems like it wouldn't detect the output as an image?

Code:

import cv2
import numpy as np


def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))

def createLineIterator(P1, P2, img):
    """
    Produces and array that consists of the coordinates and intensities of each pixel in a line between two points

    Parameters:
        -P1: a numpy array that consists of the coordinate of the first point (x,y)
        -P2: a numpy array that consists of the coordinate of the second point (x,y)
        -img: the image being processed

    Returns:
        -it: a numpy array that consists of the coordinates and intensities of each pixel in the radii (shape:     [numPixels, 3], row = [x,y,intensity])
    """
    #define local variables for readability
    imageH = img.shape[0]
    imageW = img.shape[1]
    P1X = P1[0]
    P1Y = P1[1]
    P2X = P2[0]
    P2Y = P2[1]

   #difference and absolute difference between points
   #used to calculate slope and relative location between points
    dX = P2X - P1X
    dY = P2Y - P1Y
    dXa = np.abs(dX)
    dYa = np.abs(dY)

   #predefine numpy array for output based on distance between points
    itbuffer = np.empty(shape=(np.maximum(dYa,dXa),3),dtype=np.float32)
    itbuffer.fill(np.nan)

   #Obtain coordinates along the line using a form of Bresenham's algorithm
    negY = P1Y > P2Y
    negX = P1X > P2X
    if P1X == P2X: #vertical line segment
        itbuffer[:,0] = P1X
        if negY:
            itbuffer[:,1] = np.arange(P1Y - 1,P1Y - dYa - 1,-1)
        else:
            itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
    elif P1Y == P2Y: #horizontal line segment
        itbuffer[:,1] = P1Y
        if negX:
            itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
        else:
            itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
    else: #diagonal line segment
        steepSlope = dYa > dXa
        if steepSlope:
            slope = dX.astype(np.float32)/dY.astype(np.float32)
            if negY:
                itbuffer[:,1] = np.arange(P1Y-1,P1Y-dYa-1,-1)
            else:
                itbuffer[:,1] = np.arange(P1Y+1,P1Y+dYa+1)
            itbuffer[:,0] = (slope*(itbuffer[:,1]-P1Y)).astype(np.int) + P1X
        else:
            slope = dY.astype(np.float32)/dX.astype(np.float32)
            if negX:
                itbuffer[:,0] = np.arange(P1X-1,P1X-dXa-1,-1)
            else:
                itbuffer[:,0] = np.arange(P1X+1,P1X+dXa+1)
            itbuffer[:,1] = (slope*(itbuffer[:,0]-P1X)).astype(np.int) + P1Y

    #Remove points outside of image
    colX = itbuffer[:,0]
    colY = itbuffer[:,1]
    itbuffer = itbuffer[(colX >= 0) & (colY >=0) & (colX<imageW) & (colY<imageH)]

    #Get intensities from img ndarray
    itbuffer[:,2] = img[itbuffer[:,1].astype(np.uint),itbuffer[:,0].astype(np.uint)]

    return itbuffer


def detection(img, thresh, radii, d):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Define an accumulator image (same size as the original)
    accumumulator = np.zeros(gray.shape)

    # Detect the gradients in the image (magnitude/angle).
    gx = gradx(gray)
    gy = grady(gray)
    # Gradient magnitude
    gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))
    # Gradient direction
    gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0), radii).round().astype(int);
    gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0), radii).round().astype(int);

    for coords, gnorm in np.ndenumerate(gnorms):
        # Gradient magnitude is over a certain threshold
        if gnorm > thresh:
            i, j = coords
            iNew = i + gpx[i,j]
            #if iNew > d:
            #    iUse = d
            #else:
            #    iUse = iNew
            jNew = j + gpy[i,j]
            #if jNew > d:
            #    jUse = d
            #else:
            #    jUse = jNew
            #newPoint = (iUse, jUse)
            newPoint = (iNew, jNew)
            # Draw a line in gradient direction
            accumumulator = createLineIterator(coords, newPoint, accumumulator)

    # Local maxima (and minima)
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(accumumulator)

    return accumumulator


img = cv2.imread('data/P1190263.jpg')
result = detection(img, 0, 2, 60)
cv2.imshow("detection:", result)
cv2.waitKey(0)

Edit 4: I translated the code from kbarni to Python but I got just a white image.

import cv2
import numpy as np
import math


# x gradient
def gradx(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use hstack to add back in the columns that were dropped as zeros
    return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))


# y gradient
def grady(img):
    img = img.astype('int')
    rows, cols = img.shape
    # Use vstack to add back the rows that were dropped as zeros
    return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))


# img -> gray-scale image
# Detection radius ray
# minVal -> low threshold for the gradient
# maxVal -> low threshold for the gradient
def radSymTransform(img, ray, minVal, maxVal):
    #gxMat = gradx(img)
    #gyMat = grady(img)

    gxMat = cv2.Sobel(img,cv2.CV_64F, 1, 0, ksize=5)
    gyMat = cv2.Sobel(img,cv2.CV_64F, 0, 1, ksize=5)

    gxMatShape = gradx(img).shape

    result = np.zeros(img.shape)
    # test = vGradx.getMat()

    # image height = number of rows
    # image width = number of columns
    height = gxMatShape[0]
    width = gxMatShape[1]

    y = 0  # counter 1: y-coordinate
    x = 0  # counter 2: x-coordinate

    while y < height:
        while x < width:
            gx = gxMat[y, x]
            gy = gyMat[y, x]
            ampl = math.sqrt(gx * gx + gy * gy)

            if ampl > minVal and ampl < maxVal:
                maxXY = max(abs(gx), abs(gy))
                gx = gx / maxXY
                gy = gy / maxXY
                tx = x - ray * gx
                ty = y - ray * gy
                if tx < 0 or tx > width or ty < 0 or ty > width:
                    tx = x
                    ty = y

                i = 0  # counter 3
                while i < ray:
                    tx = int(tx - gx)
                    ty = int(ty - gy)
                    # Increment result at position (ty, tx)
                    if tx < width and ty < height:
                        result[ty, tx] = result[ty, tx] + 1
                    i = i + 1
            x = x + 1
        x = 0
        y = y + 1

    return result


img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

result = radSymTransform(gray, 60, 0, 255)
print(result)
cv2.imshow("Output:", result)
cv2.waitKey(0)

image description