My naive implementation uses the method described in the OpenCV bounding box for tracking red spots.
:
std::vector<cv::Point2f> get_positions(cv::Mat& image)
{
if (image.channels() > 1)
{
std::cout << "get_positions: !!! Input image must have a single channel" << std::endl;
return std::vector<cv::Point2f>();
}
std::vector<std::vector<cv::Point> > contours;
cv::findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
std::vector<std::vector<cv::Point> > contours_poly(contours.size());
std::vector<cv::Point2f> center(contours.size());
std::vector<float> radius(contours.size());
for (unsigned int i = 0; i < contours.size(); i++ )
{
cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 5, true );
cv::minEnclosingCircle((cv::Mat)contours_poly[i], center[i], radius[i]);
}
return center;
}
, -. , @Dennis (, , ).
, .
int main()
{
cv::VideoCapture cap(0);
if (!cap.isOpened())
{
std::cout << "!!! Failed to open webcam" << std::endl;
return -1;
}
std::string wnd1 = "Input", wnd2 = "Red Objs", wnd3 = "Output";
int low_h = 160, low_s = 140, low_v = 50;
int high_h = 179, high_s = 255, high_v = 255;
cv::Mat frame, hsv_frame, red_objs;
while (true)
{
if (!cap.read(frame))
break;
cv::Mat orig_frame = frame.clone();
cv::imshow(wnd1, orig_frame);
orig_frame:

cv::cvtColor(frame, hsv_frame, CV_BGR2HSV);
cv::inRange(hsv_frame,
cv::Scalar(low_h, low_s, low_v),
cv::Scalar(high_h, high_s, high_v),
red_objs);
cv::erode(red_objs, red_objs, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)));
cv::dilate(red_objs, red_objs, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7, 7)));
cv::Mat objs = red_objs.clone();
cv::imshow(wnd2, objs);
objs:

std::vector<cv::Point2f> points = get_positions(objs);
for (unsigned int i = 0; i < points.size(); i++)
cv::circle(frame, points[i], 3, cv::Scalar(0, 255, 0), -1, 8, 0);
cv::imshow(wnd3, frame);

char key = cv::waitKey(33);
if (key == 27) {
break;
}
}
cap.release();
return 0;
}