2018-07-24 02:31:57 -0600
| received badge | ● Popular Question
(source)
|
2017-09-21 16:36:03 -0600
| received badge | ● Notable Question
(source)
|
2016-06-08 08:27:44 -0600
| received badge | ● Famous Question
(source)
|
2016-04-25 22:44:12 -0600
| received badge | ● Notable Question
(source)
|
2015-12-30 03:58:46 -0600
| received badge | ● Popular Question
(source)
|
2014-12-09 13:51:47 -0600
| marked best answer | object recognition in real time on Android with FAST detector The aim of this work is to extract real-time key points from an existing image in a video scene processFrame has the execution that does not work, due to match, is that I can just display the key points of correspondence in the form of circles on the image in real time class Sample1View extends SampleViewBase {
public static final int VIEW_MODE_RGBA = 0;
public static final int VIEW_MODE_BLUE = 1;
public static final int VIEW_MODE_YELLOW = 2;
public static final int VIEW_MODE_DE = 3;
private Mat mYuv;
private Mat mRgba;
private Mat mGraySubmat;
private Mat mResult;
private Mat mIntermediateMat;
private Bitmap mBitmap;
private int mViewMode;
private Mat mColor;
private Mat mHsv;
TimingLogger timings;
private Mat img1;
private Mat descriptors;
private MatOfKeyPoint keypoints;
private FeatureDetector detector;
private DescriptorExtractor descriptor;
private DescriptorMatcher matcher;
private static final String TAG ="Sample::View";
public Sample1View(Context context) {
super(context);
mViewMode = VIEW_MODE_RGBA;
try {
img1=Utils.loadResource(getContext(), R.drawable.wings);
} catch (IOException e) {
// TODO Auto-generated catch block
Log.w("Activity::LoadResource","Unable to load resource R.drawable.wings");
e.printStackTrace();
}
descriptors = new Mat();
keypoints = new MatOfKeyPoint();
detector = FeatureDetector.create(FeatureDetector.FAST);
detector.detect(img1, keypoints);
descriptor = DescriptorExtractor.create(DescriptorExtractor.ORB);
descriptor.compute(img1, keypoints, descriptors);
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
}
@Override
protected void onPreviewStarted(int previewWidth, int previewHeight) {
Log.i(TAG, "preview Started");
synchronized (this) {
mYuv = new Mat(getFrameHeight() + getFrameHeight() / 2, getFrameWidth(), CvType.CV_8UC1);
mGraySubmat = mYuv.submat(0, getFrameHeight(), 0, getFrameWidth());
mRgba = new Mat();
mIntermediateMat = new Mat();
mBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Bitmap.Config.ARGB_8888);
mHsv = new Mat();
mColor = new Mat();
mResult = new Mat();
}
}
@Override
protected void onPreviewStopped() {
Log.i(TAG, "preview Stopped");
if(mBitmap != null) {
mBitmap.recycle();
}
synchronized (this) {
// Explicitly deallocate Mats
if (mYuv != null)
mYuv.release();
if (mRgba != null)
mRgba.release();
if (mGraySubmat != null)
mGraySubmat.release();
if (mIntermediateMat != null)
mIntermediateMat.release();
mYuv = null;
mRgba = null;
mGraySubmat = null;
mIntermediateMat = null;
if (mResult != null)
mResult.release();
if (mHsv != null)
mHsv.release();
if (mColor != null)
mColor.release();
mColor = null;
mResult = null;
mHsv = null;
}
}
@Override
protected Bitmap processFrame(byte[] data) {
mYuv.put(0, 0, data);
final int viewMode = mViewMode;
ColorDetection.cvt_YUVtoRGBtoHSV(mYuv,mGraySubmat);
MatOfKeyPoint mKeyPoints = new MatOfKeyPoint();
MatOfDMatch matches = new MatOfDMatch();
detector.detect(mGraySubmat, mKeyPoints);
descriptor.compute(mGraySubmat, mKeyPoints, mIntermediateMat);
matcher.match(mIntermediateMat, descriptors, matches);
mIntermediateMat2.create(resultSize, CvType.CV_8UC1);
Features2d.drawMatches(mGraySubmat, mKeyPoints, mGraySubmat, mKeyPoints, matches, mIntermediateMat2);
Imgproc.resize(mIntermediateMat2, mIntermediateMat2, mRgba.size());
Imgproc.cvtColor(mIntermediateMat2, mRgba, Imgproc.COLOR_RGBA2BGRA, 4);
break;
}
Bitmap bmp = mBitmap;
try {
Utils.matToBitmap(mRgba, bmp);
} catch(Exception e) {
Log.e("org.opencv.samples.*", "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmp.recycle();
bmp = null;
}
return bmp;
}
public void setViewMode(int viewMode) {
mViewMode = viewMode;
}
}
|
2014-11-04 01:08:19 -0600
| received badge | ● Notable Question
(source)
|
2014-09-26 15:58:19 -0600
| received badge | ● Popular Question
(source)
|
2014-03-08 20:03:28 -0600
| received badge | ● Popular Question
(source)
|
2013-05-14 11:18:17 -0600
| commented question | How to perform Pose estimation for 3D object rendering in Android did you found any solution |
2013-05-06 03:13:43 -0600
| commented question | POSE estimation in OpenCv Java using cvFindExtrinsicsCameraParams2 Thank you, now I'm in the stage of registration of 3d graphics objects on the recognized object, that's why I did it like this 1-calibration of the camera 2-2D-3D matches 3-Calculating the pose 4-Registration of virtual object i'm in step 2 can you give me ideas |
2013-05-04 08:36:12 -0600
| commented question | POSE estimation in OpenCv Java using cvFindExtrinsicsCameraParams2 me too am in the process of implementing this algorithm and I'm referring to this book mastering_opencv_with_practical_computer_vision_projects , but until now I can not solve it |
2013-04-30 06:32:58 -0600
| answered a question | superimpose 3D graphics objects on the recognized object I do like this 1-calibration of the camera
2-2D-3D matches
3-Calculating the pose
4-Registration of virtual object
is that possible ? how to do it simply ? otherwise any help. |
2013-04-29 15:35:56 -0600
| commented answer | object recognition in real time on Android with FAST detector |
2013-04-29 08:58:25 -0600
| commented answer | object recognition in real time on Android with FAST detector @StevenPuttemans after the recognition phase I want to insert a 3D object on the object detected, so what method to do this ? i don't wont to use OpenGL ES |
2013-04-29 05:36:02 -0600
| commented answer | detect all objects in real-time with Android |
2013-04-28 10:56:35 -0600
| asked a question | superimpose 3D graphics objects on the recognized object after the phase of object recognition I took the best point from matching
and would like a registration of 3D objects Please guide me on the big lines
PS: I have read articles on the pose of the camera and camera calibration |
2013-04-22 04:03:26 -0600
| asked a question | minVal & maxVal = 0.0 with TemplateMatching Android Hi i use this code link text but it gives me minVal=maxVal=0.0 ! public void TemplateMatching(Mat mFind,Bitmap dst)
{
Mat Input = new Mat(mFind.width(), mFind.height(), CvType.CV_8UC4);
Utils.bitmapToMat(dst, Input);
Mat mResult8u = new Mat(mFind.width(), mFind.height(), CvType.CV_8UC4);
Mat mResult9u = new Mat(mFind.width(), mFind.height(), CvType.CV_8UC4);
Mat mResult = new Mat(mFind.width(), mFind.height(), CvType.CV_8UC4);
Imgproc.matchTemplate(mFind, Input, mResult, Imgproc.TM_CCOEFF_NORMED);
Core.normalize(mResult, mResult9u, 0, 255, Core.NORM_MINMAX, CvType.CV_8U);
MinMaxLocResult locRes = Core.minMaxLoc(mResult9u);
double minVal = locRes.minVal;
Point minLoc = locRes.minLoc;
double maxVal = locRes.maxVal;
Point maxLoc = locRes.maxLoc;
maxLoc.x+=25;
maxLoc.y+=25;
Point point = new Point();
point.x=maxLoc.x+Input.cols();
point.y=maxLoc.y+Input.rows();
double thresholds=0.08;
Log.w("max val", maxVal+"");
Log.w("min val", minVal+"");
if(maxVal>=thresholds)
Core.rectangle(mFind, maxLoc,point, new Scalar(0, 255, 0, 255), 3);
Bitmap bmp3= Bitmap.createBitmap(mFind.cols(), mFind.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mFind, bmp3);
imgb.setImageBitmap(bmp3);
}
|
2013-04-22 03:33:19 -0600
| commented question | Template Matching with Multiple Occurance Hi i use the same code but it gives me maxVal =minVal =0.0 !! |
2013-04-20 15:37:43 -0600
| received badge | ● Self-Learner
(source)
|
2013-04-20 14:02:35 -0600
| answered a question | detect object from images @Guanta, ok I'll try it, what you suppose like histSize ?
about recognize object have you any idea how to find DIST_LIMIT factor, I want to extract exactly the same picture, but I can not, or you have another solution (efficace condition) to find the base image. my solution is to compare the descriptors of each image in the database and return the image to the greater length of good matches but it is not efficace !!
here the code : matcher.match(descriptor2,descriptor1, matches);
int DIST_LIMIT = 80;
List<DMatch> matchesList = matches.toList();
List<DMatch> matches_final= new ArrayList<DMatch>();
for(int i=0; i<matchesList.size(); i++)
if(matchesList .get(i).distance <= DIST_LIMIT){
matches_final.add(matches.toList().get(i));
}
Log.w("good matches", matches_final.size()+"");
return matches_final.size();
}
|
2013-04-16 04:50:23 -0600
| answered a question | detect all objects in real-time with Android @Mathieu Barnachon, Hi after detecting an object from video using a spefic color, i want to recognize this object from images, so i use compare histogram but it gives me double d = 1.0 for all images ! then i try to use detect feature (ORB,FAST,SIFT) then i count the good matching and retrieve the image who had the biggest length of good_matching but it return me the false image. i don't exactly how to retrieve the approach image, |
2013-04-16 04:46:09 -0600
| commented answer | image comparison Hi after detecting an object from video using a spefic color, i want to recognize this object from images, so i use compare histogram but it gives me double d = 1.0 for all images ? then i try to use detect feature (ORB,FAST,SIFT) then i count the good matching and retrieve the image who had the biggest length of good_matching but it return me the false image. |
2013-04-16 04:41:18 -0600
| commented answer | Which matcher is best for SURF? @Szippy , do you solve your problem ? |
2013-04-16 04:27:33 -0600
| asked a question | detect object from images Hi after detecting an object from video using a spefic color, i want to recognize this object from images, so i use compare histogram but it gives me double d = 1.0 for all images ? then i try to use detect feature (ORB,FAST,SIFT) then i count the good matching and retrieve the image who had the biggest length of good_matching but it return me the false image. for compare hisogram i do like this : i travel all elements image : images List<Mat> imagesList=new ArrayList<Mat>();
imagesList.add(object);
List<Mat> imagesList1=new ArrayList<Mat>();
imagesList1.add(image);
MatOfInt channels=new MatOfInt(0);
Mat hist=new Mat();
Mat hist1=new Mat();
MatOfInt histSize=new MatOfInt(50);
float hrangesArray[]={0.0f,255.0f};
MatOfFloat ranges=new MatOfFloat(hrangesArray);
Mat mask=new Mat();
Imgproc.calcHist(imagesList, channels,mask, hist, histSize, ranges);
Imgproc.calcHist(imagesList1, channels,mask, hist1, histSize, ranges);
double i= Imgproc.compareHist(hist1, hist, Imgproc.CV_COMP_BHATTACHARYYA);
and for the detecting feature i proced like in the tutorial. but i don't know where is the problem ? thanks for help me |
2013-04-09 12:17:50 -0600
| commented answer | Convert Mat to MatOfByte in Android mat.total=0 mat.channel !=0
mat.toString gives me detail of the mat |
2013-04-09 10:55:26 -0600
| commented answer | Convert Mat to MatOfByte in Android it gives me an empty Byte data |
2013-04-08 04:07:10 -0600
| commented answer | Convert Mat to MatOfByte in Android yes i try it but not working, i found another solution is to convert Mat to Bitmap then to Byte[] Utils.matTobitmap(mRgba,bitmap);
ByteArrayOutputStream os = new ByteArrayOutputStream();
bitmap.compress(CompressFormat.PNG, 100, os);
bytes[] data= os.toByteArray();
//the opposite case :
BitmapFactory.decodeByteArray(bytes, 0, bytes.length); How you thnig about it ? it's efficace !
|
2013-04-07 12:36:02 -0600
| commented answer | convert Mat CV_8UC4 to byte[] Android |
2013-04-07 10:47:57 -0600
| asked a question | convert Mat CV_8UC4 to byte[] Android Hi i try to convert a Mat to Byte then the inverse so here is the code frame = new Mat(height, width, CvType.CV_8UC4);
// traitement ...
byte[] data = = new byte[(int) (frame.total() * frame.channels())];
frame.get(0, 0, data);
// but data.length=0
|
2013-04-07 06:45:43 -0600
| commented answer | Convert Mat to MatOfByte in Android @Andrey Pavlenko
i want to convert a Mat CV_8UC4 to byte[] but don't work any of those solution! |
2013-03-31 05:05:22 -0600
| commented answer | Is there any programs to recognize objects? @StevenPuttemans
thank you for the explanation, I have a small question to ask:
from a video capture and a set of images (sqlite) I want to set the image that is closest to it I detect matches the two images and then using a technique such as Histogram to define closest distance |
2013-03-31 03:47:00 -0600
| commented question | *java* api Histogram calculation Hi have you find a solution !! |
2013-03-25 11:25:24 -0600
| asked a question | select square from position using pointPolygonTest in Android The purpose of this method is when I click a square with OnTouch (x,y) in a video he displayed it in another view
Here are the steps: initially an image and a set of square (time varying) if the user clicks in a position then the algorithm check if he clicked on a square or not if yes it displays the square in another view using a mask (ROI) else display the initial image
the problem that working in console but in Android with jni don't response, my question is how can i optimize this algorithm or found another way it can be a performance problem => response time void displaySquares( Mat& image, vector<vector<Point> >& squares,int xa,int ya )
{
bool trouv=false;
vector<Rect> boundRect(squares.size()-squares.size()+1);
for(size_t i = 0; i < squares.size()/3; i++ )
{
vector<Point2f> points;
for(size_t j = 0; j < 4; j++ )
{
points.push_back(Point2f((int)squares[i][j].x,(int)squares[i][j].y));
}
if(pointPolygonTest((Mat)points, Point2f(xa,ya), false)>=0)
{
boundRect[0] = boundingRect( Mat(squares[i]) );
LOGI("good\n");
trouv=true;
break;
}
}
if(trouv)
{
Mat roi(image,boundRect[0]);
Rect boundrect=boundRect[0];
rectangle(image, cv::Point(boundrect.x, boundrect.y),
cv::Point(boundrect.x + boundrect.width, boundrect.y + boundrect.height), Scalar(0,255,0));
image=roi;
}
}
|
2013-03-23 09:23:09 -0600
| received badge | ● Teacher
(source)
|
2013-03-23 03:17:13 -0600
| answered a question | native opencv for android project no , is your Android.mk reference to OpenCV.mk ?
LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) include ../../sdk/native/jni/OpenCV.mk LOCAL_MODULE := Xxx
LOCAL_SRC_FILES := Xxxx.cpp
LOCAL_LDLIBS += -llog -ldl include $(BUILD_SHARED_LIBRARY) and do you add ${NDKROOT}/platforms/android-9/arch-arm/usr/include
${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/include
${NDKROOT}/sources/cxx-stl/gnu-libstdc++/4.6/libs/armeabi-v7a/include
${ProjDirPath}/../../sdk/native/jni/include ( Open Project Properties -> C/C++ General -> Paths and Symbols and add the following Include paths for C++: ) |
2013-03-22 06:10:32 -0600
| asked a question | recognizing objects c++ Android Hi , i just copied the code Features2D + Homography to find a known object from http://docs.opencv.org/doc/tutorials/features2d/feature_homography/feature_homography.html#feature-homography
on my Android project
2 inupts : a known object(Bitmap from drawable and i convert it to Mat) and an input frame
but in Logcat it show me "Preview Frame received. Need to create MAT and deliver it to clients" BLACK FRAME #include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#include<stdio.h>
#include <android/log.h>
#define LOG_TAG "native"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
using namespace cv;
cv::Mat tmp, nomask, mask;
extern "C" {
JNIEXPORT void JNICALL Java_com_example_features_MainActivity_setup(JNIEnv*, jobject,
jint rows, jint cols) {
tmp.create(rows, cols, CV_8UC1);
nomask = cv::Mat();
mask = cv::Mat::zeros(cv::Size(cols, rows), CV_8UC1); // swap rows <-> cols
cv::Mat roi(mask, cv::Rect(cols/2 - cols/4, rows/2 - rows/4, cols/2, rows/2));
LOGI("rect: (%d %d)", roi.rows, roi.cols);
roi = cv::Scalar(255);
}
JNIEXPORT void JNICALL Java_com_example_features_MainActivity_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)
{
Mat& img_object = *(Mat*)addrGray;
Mat& img_scene = *(Mat*)addrRgba;
int minHessian = 400;
OrbFeatureDetector detector( minHessian );
vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
OrbDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
// drawKeypoints(img_scene, keypoints_scene,img_scene);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
vector<Point2f> obj;
vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
vector<Point2f> scene_corners(4);
// perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0 ... (more) |
2013-03-21 09:03:52 -0600
| answered a question | locate the object recognition Android OpenCV i solve the problem it was simple :p
but same thing that give me things that not exist in fisrt image List<DMatch> goodMatchesList = new ArrayList<DMatch>();
DMatch dm12[]=matches12.toArray();
DMatch dm21[]=matches21.toArray();
for(int i=0;i<dm12.length;i++)
{
DMatch forward = dm12[i];
DMatch backward = dm21[forward.trainIdx];
if( backward.trainIdx == forward.queryIdx )
goodMatchesList.add(forward) ;
}
matches.fromList(goodMatchesList);
matcher.match(descriptors,mIntermediateMat, matches);
mIntermediateMat2.create(resultSize, CvType.CV_8UC1);
Features2d.drawMatches( mGraySubmat, mKeyPoints, img1, keypoints,matches, mIntermediateMat2,GREEN, RED, MATCH_MASK, Features2d.NOT_DRAW_SINGLE_POINTS);
|
2013-03-21 08:34:40 -0600
| commented question | locate the object recognition Android OpenCV @StevenPuttemans my problem is i drawFeatures for a good matching so i need to create a new MatOfDmatch to push good matching but i didn't find a good code for that because
Feature2d.drawMatches() need to MatOfDmatch and not List<Dmatch>
did you have a solution that create a MatOfDmatch from others MatOfDmatch |
2013-03-21 06:38:32 -0600
| answered a question | locate the object recognition Android OpenCV @Mathieu Barnachon i procced like this MatOfDMatch matches, matches12, matches21;
matcher.match( descriptors1, descriptors2, matches12 );
matcher.match( descriptors2, descriptors1, matches21 );
// iterate matches12
DMatch forward = matches12[i]; // error convert from c++ to java
DMatch backward = matches21[forward.trainIdx];
if( backward.trainIdx == forward.queryIdx )
matches.push_back( forward ); // error convert from c++ to java
get the good matches then draw it
Features2d.drawMatches( mGraySubmat, mKeyPoints,img1, keypoints, matches, mIntermediateMat2,GREEN, RED, MATCH_MASK, Features2d.NOT_DRAW_SINGLE_POINTS);
|