How to call LSD (LineSegmentDetector) from c language program?

I use LSD to detect straight lines in the image, the code I downloaded contains a minimal example of calling LSD, but it is static (i.e. it displays only the value in the main function). I want to apply the code to a video, which is a minimal example that outputs static results.

#include <stdio.h> #include "lsd.h" int main(void) { image_double image; ntuple_list out; unsigned int x,y,i,j; unsigned int X = 512; /* x image size */ unsigned int Y = 512; /* y image size */ /* create a simple image: left half black, right half gray */ image = new_image_double(X,Y); for(x=0;x<X;x++) for(y=0;y<Y;y++) image->data[ x + y * image->xsize ] = x<X/2 ? 0.0 : 64.0; /* image(x,y) */ IplImage* imgInTmp = cvLoadImage("C:\Documents and Settings\Eslam farag\My Documents\Visual Studio 2008\Projects\line\hand.JPEG", 0); /* call LSD */ out = lsd(image); /* print output */ printf("%u line segments found:\n",out->size); for(i=0;i<out->size;i++) { for(j=0;j<out->dim;j++) printf("%f ",out->values[ i * out->dim + j ]); printf("\n"); } /* free memory */ free_image_double(image); free_ntuple_list(out); return 0; } 

if anyone can help me apply the code on the video, I will be glad. best wishes,

+4
source share
2 answers

Since I could not find a complete example, I am sharing the code that I wrote that uses OpenCV to load a video file from disk and do some image processing on it.

The application accepts the file name as input (in the cmd line) and converts each video frame to its equivalent grayscale using the built-in OpenCV function cvCvtColor() for this.

I added some comments to the code to help you understand the main tasks.

read_video.cpp

 #include <stdio.h> #include <highgui.h> #include <cv.h> int main(int argc, char* argv[]) { cvNamedWindow("video", CV_WINDOW_AUTOSIZE); CvCapture *capture = cvCaptureFromAVI(argv[1]); if(!capture) { printf("!!! cvCaptureFromAVI failed (file not found?)\n"); return -1; } IplImage* frame; char key = 0; while (key != 'q') // Loop for querying video frames. Pressing Q will quit { frame = cvQueryFrame( capture ); if( !frame ) { printf("!!! cvQueryFrame failed\n"); break; } /* Let do a grayscale conversion just 4 fun */ // A grayscale image has only one channel, and most probably the original // video works with 3 channels (RGB). So, for the conversion to work, we // need to allocate an image with only 1 channel to store the result of // this operation. IplImage* gray_frame = 0; gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1); if (!gray_frame) { printf("!!! cvCreateImage failed!\n" ); return -1; } cvCvtColor(frame, gray_frame, CV_RGB2GRAY); // The conversion itself // Display processed frame on window cvShowImage("video", gray_frame); // Release allocated resources cvReleaseImage(&gray_frame); key = cvWaitKey(33); } cvReleaseCapture(&capture); cvDestroyWindow("video"); } 

Compiled with

 g++ read_video.cpp -o read `pkg-config --cflags --libs opencv` 

If you want to know how to iterate over the pixels of a frame to do your custom processing, you need to check out the following answer because it shows how to perform manual grayscale conversion. There you go: OpenCV cvSet2d ..... what it does

+3
source

here is sample code using LSD with opencv

 #include "lsd.h" void Test_LSD(IplImage* img) { IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvCvtColor(img, grey, CV_BGR2GRAY); image_double image; ntuple_list out; unsigned int x,y,i,j; image = new_image_double(img->width,img->height); for(x=0;x<grey->width;x++) for(y=0;y<grey->height;y++) { CvScalar s= cvGet2D(grey,y,x); double pix= s.val[0]; image->data[ x + y * image->xsize ]= pix; /* image(x,y) */ } /* call LSD */ out = lsd(image); //out= lsd_scale(image,1); /* print output */ printf("%u line segments found:\n",out->size); vector<Line> vec; for(i=0;i<out->size;i++) { //for(j=0;j<out->dim;j++) { //printf("%f ",out->values[ i * out->dim + j ]); Line line; line.x1= out->values[ i * out->dim + 0]; line.y1= out->values[ i * out->dim + 1]; line.x2= out->values[ i * out->dim + 2]; line.y2= out->values[ i * out->dim + 3]; vec.push_back(line); } //printf("\n"); } IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); cvZero(black); draw_lines(vec,black); /*cvNamedWindow("img", 0); cvShowImage("img", img);*/ cvSaveImage("lines_detect.png",black/*img*/); /* free memory */ free_image_double(image); free_ntuple_list(out); } 

or in this way

 IplImage* get_lines(IplImage* img,vector<Line>& vec_lines) { //to grey //IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); //cvCvtColor(img, grey, CV_BGR2GRAY); image_double image; ntuple_list out; unsigned int x,y,i,j; image = new_image_double(img->width,img->height); for(x=0;x</*grey*/img->width;x++) for(y=0;y</*grey*/img->height;y++) { CvScalar s= cvGet2D(/*grey*/img,y,x); double pix= s.val[0]; image->data[ x + y * image->xsize ]= pix; } /* call LSD */ out = lsd(image); //out= lsd_scale(image,1); /* print output */ //printf("%u line segments found:\n",out->size); //vector<Line> vec; for(i=0;i<out->size;i++) { //for(j=0;j<out->dim;j++) { //printf("%f ",out->values[ i * out->dim + j ]); Line line; line.x1= out->values[ i * out->dim + 0]; line.y1= out->values[ i * out->dim + 1]; line.x2= out->values[ i * out->dim + 2]; line.y2= out->values[ i * out->dim + 3]; /*vec*/vec_lines.push_back(line); } //printf("\n"); } IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvZero(black); for(int i=0;i<vec_lines.size();++i) { //if(vec[i].x1==vec[i].x2||vec[i].y1==vec[i].y2) cvLine(black,cvPoint(vec_lines[i].x1,vec_lines[i].y1),cvPoint(vec_lines[i].x2,vec_lines[i].y2),CV_RGB(255,255,255),1, CV_AA); } /*cvNamedWindow("img", 0); cvShowImage("img", img);*/ //cvSaveImage("lines_detect.png",black/*img*/); /* free memory */ //cvReleaseImage(&grey); free_image_double(image); free_ntuple_list(out); return black; } 
+1
source

All Articles