#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/// Global variables
int threshold_value = 0;
int threshold_type = 3;;
int const max_value = 255;
int const max_type = 4;
int const max_BINARY_value = 255;
Mat frame, src_gray, dst;
char* window_name = "Threshold Demo";
char* trackbar_type = "Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
char* trackbar_value = "Value";
/// Function headers
void Threshold_Demo(int, void*);
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Convert the image to Gray
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Create a window to display results
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Create Trackbar to choose type of Threshold
createTrackbar(trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo);
createTrackbar(trackbar_value,
window_name, &threshold_value,
max_value, Threshold_Demo);
/// Call the function to initialize
Threshold_Demo(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
void Threshold_Demo(int, void*)
{
/* 0: Binary
1: Binary Inverted
2: Threshold Truncated
3: Threshold to Zero
4: Threshold to Zero Inverted
*/
threshold(src_gray, dst, threshold_value, max_BINARY_value, threshold_type);
imshow(window_name, dst);
}
วันพฤหัสบดีที่ 23 มีนาคม พ.ศ. 2560
C++ opencv Threshold
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Canny thresh:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Draw contours
Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours, i, Scalar(255,255,255), 1, 8, hierarchy, 0, Point());
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Canny thresh:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Draw contours
Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours, i, Scalar(255,255,255), 1, 8, hierarchy, 0, Point());
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
}
C++ opencv Template Matching
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/// Global Variables
Mat img; Mat templ; Mat result;
char* image_window = "Source Image";
char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
/// Function Headers
void MatchingMethod(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> img;
templ = imread("D:\\121.jpg", 1);
namedWindow("template", CV_WINDOW_AUTOSIZE);
imshow("template", templ);
/// Create windows
namedWindow(image_window, CV_WINDOW_AUTOSIZE);
namedWindow(result_window, CV_WINDOW_AUTOSIZE);
/// Create Trackbar
char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar(trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod);
MatchingMethod(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
/**
* @function MatchingMethod
* @brief Trackbar callback
*/
void MatchingMethod(int, void*)
{
/// Source image to display
Mat img_display;
img.copyTo(img_display);
/// Create the result matrix
int result_cols = img.cols * 2 - templ.cols + 1;
int result_rows = img.rows * 2 - templ.rows + 1;
result.create(result_rows, result_cols, CV_32FC1);
/// Do the Matching and Normalize
matchTemplate(img, templ, result, match_method);
normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if (match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED)
{
matchLoc = minLoc;
}
else
{
matchLoc = maxLoc;
}
/// Show me what you got
rectangle(img_display, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);
rectangle(result, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);
imshow(image_window, img_display);
imshow(result_window, result);
return;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/// Global Variables
Mat img; Mat templ; Mat result;
char* image_window = "Source Image";
char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
/// Function Headers
void MatchingMethod(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> img;
templ = imread("D:\\121.jpg", 1);
namedWindow("template", CV_WINDOW_AUTOSIZE);
imshow("template", templ);
/// Create windows
namedWindow(image_window, CV_WINDOW_AUTOSIZE);
namedWindow(result_window, CV_WINDOW_AUTOSIZE);
/// Create Trackbar
char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar(trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod);
MatchingMethod(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
/**
* @function MatchingMethod
* @brief Trackbar callback
*/
void MatchingMethod(int, void*)
{
/// Source image to display
Mat img_display;
img.copyTo(img_display);
/// Create the result matrix
int result_cols = img.cols * 2 - templ.cols + 1;
int result_rows = img.rows * 2 - templ.rows + 1;
result.create(result_rows, result_cols, CV_32FC1);
/// Do the Matching and Normalize
matchTemplate(img, templ, result, match_method);
normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if (match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED)
{
matchLoc = minLoc;
}
else
{
matchLoc = maxLoc;
}
/// Show me what you got
rectangle(img_display, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);
rectangle(result, matchLoc, Point(matchLoc.x + templ.cols, matchLoc.y + templ.rows), Scalar::all(0), 2, 8, 0);
imshow(image_window, img_display);
imshow(result_window, result);
return;
}
C++ opencv Sobel
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray;
Mat grad;
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
GaussianBlur(frame, frame, Size(3, 3), 0, 0, BORDER_DEFAULT);
/// Convert it to gray
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Create window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
/// Gradient Y
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
/// Total Gradient (approximate)
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 50, grad);
imshow(window_name, grad);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray;
Mat grad;
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
GaussianBlur(frame, frame, Size(3, 3), 0, 0, BORDER_DEFAULT);
/// Convert it to gray
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Create window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
/// Gradient Y
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
/// Total Gradient (approximate)
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 50, grad);
imshow(window_name, grad);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Smooth Image
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
/// Global Variables
int DELAY_CAPTION = 1500;
int DELAY_BLUR = 100;
int MAX_KERNEL_LENGTH = 31;
Mat frame;
Mat dst;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", frame);
blur(frame, dst, Size(25, 25), Point(-1, -1));
namedWindow("Homogeneous Blur", CV_WINDOW_AUTOSIZE);
imshow("Homogeneous Blur", dst);
/// Applying Gaussian Blur
GaussianBlur(frame, dst, Size(25, 25), 0, 0);
namedWindow("Gaussian Blur", CV_WINDOW_AUTOSIZE);
imshow("Gaussian Blur", dst);
/// Applying Median blur
medianBlur(frame, dst, 25);
namedWindow("Median Blur", CV_WINDOW_AUTOSIZE);
imshow("Median Blur", dst);
/// Applying Bilateral Filter
bilateralFilter(frame, dst, 25, 25 * 2, 25 / 2);
namedWindow("Bilateral Blur", CV_WINDOW_AUTOSIZE);
imshow("Bilateral Blur", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
/// Global Variables
int DELAY_CAPTION = 1500;
int DELAY_BLUR = 100;
int MAX_KERNEL_LENGTH = 31;
Mat frame;
Mat dst;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", frame);
blur(frame, dst, Size(25, 25), Point(-1, -1));
namedWindow("Homogeneous Blur", CV_WINDOW_AUTOSIZE);
imshow("Homogeneous Blur", dst);
/// Applying Gaussian Blur
GaussianBlur(frame, dst, Size(25, 25), 0, 0);
namedWindow("Gaussian Blur", CV_WINDOW_AUTOSIZE);
imshow("Gaussian Blur", dst);
/// Applying Median blur
medianBlur(frame, dst, 25);
namedWindow("Median Blur", CV_WINDOW_AUTOSIZE);
imshow("Median Blur", dst);
/// Applying Bilateral Filter
bilateralFilter(frame, dst, 25, 25 * 2, 25 / 2);
namedWindow("Bilateral Blur", CV_WINDOW_AUTOSIZE);
imshow("Bilateral Blur", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Remapping
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
int ind = 0;
/// Function Headers
void update_map(void);
/**
* @function main
*/
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
/// Create dst, map_x and map_y with the same size as src:
dst.create(frame.size(), frame.type());
map_x.create(frame.size(), CV_32FC1);
map_y.create(frame.size(), CV_32FC1);
// LLop for reducing half the size and centering
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
if (i > frame.cols*0.25 && i < frame.cols*0.75 && j > frame.rows*0.25 && j < frame.rows*0.75)
{
map_x.at<float>(j, i) = 2 * (i - frame.cols*0.25) + 0.5;
map_y.at<float>(j, i) = 2 * (j - frame.rows*0.25) + 0.5;
}
else
{
map_x.at<float>(j, i) = 0;
map_y.at<float>(j, i) = 0;
}
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("reducing half the size and centering", CV_WINDOW_AUTOSIZE);
imshow("reducing half the size and centering", dst);
//Loop for upside down
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("upside down", CV_WINDOW_AUTOSIZE);
imshow("upside down", dst);
//Loop for Reflecting x direction
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting x direction", CV_WINDOW_AUTOSIZE);
imshow("Reflecting x direction", dst);
//Loop for Reflecting both directions
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting both directions:", CV_WINDOW_AUTOSIZE);
imshow("Reflecting both directions:", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
int ind = 0;
/// Function Headers
void update_map(void);
/**
* @function main
*/
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
/// Create dst, map_x and map_y with the same size as src:
dst.create(frame.size(), frame.type());
map_x.create(frame.size(), CV_32FC1);
map_y.create(frame.size(), CV_32FC1);
// LLop for reducing half the size and centering
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
if (i > frame.cols*0.25 && i < frame.cols*0.75 && j > frame.rows*0.25 && j < frame.rows*0.75)
{
map_x.at<float>(j, i) = 2 * (i - frame.cols*0.25) + 0.5;
map_y.at<float>(j, i) = 2 * (j - frame.rows*0.25) + 0.5;
}
else
{
map_x.at<float>(j, i) = 0;
map_y.at<float>(j, i) = 0;
}
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("reducing half the size and centering", CV_WINDOW_AUTOSIZE);
imshow("reducing half the size and centering", dst);
//Loop for upside down
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("upside down", CV_WINDOW_AUTOSIZE);
imshow("upside down", dst);
//Loop for Reflecting x direction
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting x direction", CV_WINDOW_AUTOSIZE);
imshow("Reflecting x direction", dst);
//Loop for Reflecting both directions
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting both directions:", CV_WINDOW_AUTOSIZE);
imshow("Reflecting both directions:", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Remapping
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
int ind = 0;
/// Function Headers
void update_map(void);
/**
* @function main
*/
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
/// Create dst, map_x and map_y with the same size as src:
dst.create(frame.size(), frame.type());
map_x.create(frame.size(), CV_32FC1);
map_y.create(frame.size(), CV_32FC1);
// LLop for reducing half the size and centering
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
if (i > frame.cols*0.25 && i < frame.cols*0.75 && j > frame.rows*0.25 && j < frame.rows*0.75)
{
map_x.at<float>(j, i) = 2 * (i - frame.cols*0.25) + 0.5;
map_y.at<float>(j, i) = 2 * (j - frame.rows*0.25) + 0.5;
}
else
{
map_x.at<float>(j, i) = 0;
map_y.at<float>(j, i) = 0;
}
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("reducing half the size and centering", CV_WINDOW_AUTOSIZE);
imshow("reducing half the size and centering", dst);
//Loop for upside down
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("upside down", CV_WINDOW_AUTOSIZE);
imshow("upside down", dst);
//Loop for Reflecting x direction
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting x direction", CV_WINDOW_AUTOSIZE);
imshow("Reflecting x direction", dst);
//Loop for Reflecting both directions
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting both directions:", CV_WINDOW_AUTOSIZE);
imshow("Reflecting both directions:", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
int ind = 0;
/// Function Headers
void update_map(void);
/**
* @function main
*/
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
/// Create dst, map_x and map_y with the same size as src:
dst.create(frame.size(), frame.type());
map_x.create(frame.size(), CV_32FC1);
map_y.create(frame.size(), CV_32FC1);
// LLop for reducing half the size and centering
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
if (i > frame.cols*0.25 && i < frame.cols*0.75 && j > frame.rows*0.25 && j < frame.rows*0.75)
{
map_x.at<float>(j, i) = 2 * (i - frame.cols*0.25) + 0.5;
map_y.at<float>(j, i) = 2 * (j - frame.rows*0.25) + 0.5;
}
else
{
map_x.at<float>(j, i) = 0;
map_y.at<float>(j, i) = 0;
}
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("reducing half the size and centering", CV_WINDOW_AUTOSIZE);
imshow("reducing half the size and centering", dst);
//Loop for upside down
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("upside down", CV_WINDOW_AUTOSIZE);
imshow("upside down", dst);
//Loop for Reflecting x direction
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting x direction", CV_WINDOW_AUTOSIZE);
imshow("Reflecting x direction", dst);
//Loop for Reflecting both directions
for (int j = 0; j < frame.rows; j++)
{
for (int i = 0; i < frame.cols; i++)
{
map_x.at<float>(j, i) = frame.cols - i;
map_y.at<float>(j, i) = frame.rows - j;
}
}
remap(frame, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
namedWindow("Reflecting both directions:", CV_WINDOW_AUTOSIZE);
imshow("Reflecting both directions:", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Pyramid
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <math.h>
#include <stdlib.h>
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat frame, dst;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", frame);
/// Zoom Up
pyrUp(frame, dst, Size(frame.cols * 2, frame.rows * 2));
cout << "** Zoom In: Image x 2"<<endl;
namedWindow("Pyramids Up", CV_WINDOW_AUTOSIZE);
imshow("Pyramids Up", dst);
/// Zoom Down
pyrDown(frame, dst, Size(frame.cols / 2, frame.rows / 2));
cout << "** Zoom Out: Image / 2" << endl;
namedWindow("Pyramids Down", CV_WINDOW_AUTOSIZE);
imshow("Pyramids Down", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <math.h>
#include <stdlib.h>
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat frame, dst;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
imshow("Original image", frame);
/// Zoom Up
pyrUp(frame, dst, Size(frame.cols * 2, frame.rows * 2));
cout << "** Zoom In: Image x 2"<<endl;
namedWindow("Pyramids Up", CV_WINDOW_AUTOSIZE);
imshow("Pyramids Up", dst);
/// Zoom Down
pyrDown(frame, dst, Size(frame.cols / 2, frame.rows / 2));
cout << "** Zoom Out: Image / 2" << endl;
namedWindow("Pyramids Down", CV_WINDOW_AUTOSIZE);
imshow("Pyramids Down", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Morphological
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
/// Global variables
Mat frame, dst;
int morph_elem = 0;
int morph_size = 0;
int morph_operator = 0;
int const max_operator = 4;
int const max_elem = 2;
int const max_kernel_size = 21;
/** Function Headers */
void Morphology_Operations(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create window
namedWindow("Morphological", CV_WINDOW_AUTOSIZE);
/// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", "Morphological", &morph_operator, max_operator, Morphology_Operations);
/// Create Trackbar to select kernel type
createTrackbar("Element:\n 0: Rect - 1: Cross - 2: Ellipse", "Morphological",
&morph_elem, max_elem,
Morphology_Operations);
/// Create Trackbar to choose kernel size
createTrackbar("Kernel size:\n 2n +1", "Morphological",
&morph_size, max_kernel_size,
Morphology_Operations);
/// Default start
Morphology_Operations(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
//function Morphology_Operations
void Morphology_Operations(int, void*)
{
// Since MORPH_X : 2,3,4,5 and 6
int operation = morph_operator + 2;
Mat element = getStructuringElement(morph_elem, Size(2 * morph_size + 1, 2 * morph_size + 1), Point(morph_size, morph_size));
/// Apply the specified morphology operation
morphologyEx(frame, dst, operation, element);
imshow("Morphological", dst);
}
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
/// Global variables
Mat frame, dst;
int morph_elem = 0;
int morph_size = 0;
int morph_operator = 0;
int const max_operator = 4;
int const max_elem = 2;
int const max_kernel_size = 21;
/** Function Headers */
void Morphology_Operations(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create window
namedWindow("Morphological", CV_WINDOW_AUTOSIZE);
/// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", "Morphological", &morph_operator, max_operator, Morphology_Operations);
/// Create Trackbar to select kernel type
createTrackbar("Element:\n 0: Rect - 1: Cross - 2: Ellipse", "Morphological",
&morph_elem, max_elem,
Morphology_Operations);
/// Create Trackbar to choose kernel size
createTrackbar("Kernel size:\n 2n +1", "Morphological",
&morph_size, max_kernel_size,
Morphology_Operations);
/// Default start
Morphology_Operations(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
//function Morphology_Operations
void Morphology_Operations(int, void*)
{
// Since MORPH_X : 2,3,4,5 and 6
int operation = morph_operator + 2;
Mat element = getStructuringElement(morph_elem, Size(2 * morph_size + 1, 2 * morph_size + 1), Point(morph_size, morph_size));
/// Apply the specified morphology operation
morphologyEx(frame, dst, operation, element);
imshow("Morphological", dst);
}
C++ opencv Moment
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Canny thresh:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
/// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
/// Draw contours
Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours, i, Scalar(0,0,255), 1, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, Scalar(0,255,125), -1, 8, 0);
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for (int i = 0; i< contours.size(); i++)
{
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength(contours[i], true));
drawContours(drawing, contours, i, Scalar(0,255,0), 1, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, Scalar(255,0,0), -1, 8, 0);
}
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Canny thresh:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
/// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
/// Draw contours
Mat drawing = Mat::zeros(canny_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours, i, Scalar(0,0,255), 1, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, Scalar(0,255,125), -1, 8, 0);
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for (int i = 0; i< contours.size(); i++)
{
printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength(contours[i], true));
drawContours(drawing, contours, i, Scalar(0,255,0), 1, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, Scalar(255,0,0), -1, 8, 0);
}
}
C++ opencv Linear Filter
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
/// Declare variables
Mat frame, dst, kernel;
int kernel_size;
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original number", CV_WINDOW_AUTOSIZE);
imshow("Original number", frame);
/// Update kernel size for a normalized box filter
kernel_size = 15;
//3 + 2 * (20 % 5);
kernel = Mat::ones(kernel_size, kernel_size, CV_32F) / (float)(kernel_size*kernel_size);
/// Apply filter
filter2D(frame, dst, -1, kernel, Point(-1, -1), 0, BORDER_DEFAULT);
namedWindow("filter2D Demo", CV_WINDOW_AUTOSIZE);
imshow("filter2D Demo", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
/// Declare variables
Mat frame, dst, kernel;
int kernel_size;
VideoCapture cap(0);
while (1)
{
cap >> frame;
dst = frame.clone();
namedWindow("Original number", CV_WINDOW_AUTOSIZE);
imshow("Original number", frame);
/// Update kernel size for a normalized box filter
kernel_size = 15;
//3 + 2 * (20 % 5);
kernel = Mat::ones(kernel_size, kernel_size, CV_32F) / (float)(kernel_size*kernel_size);
/// Apply filter
filter2D(frame, dst, -1, kernel, Point(-1, -1), 0, BORDER_DEFAULT);
namedWindow("filter2D Demo", CV_WINDOW_AUTOSIZE);
imshow("filter2D Demo", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Laplace
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray, dst;
int kernel_size = 5;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
char* window_name = "Laplace Demo";
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Remove noise by blurring with a Gaussian filter
GaussianBlur(frame, frame, Size(3, 3), 0, 0, BORDER_DEFAULT);
/// Convert the image to grayscale
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Create window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Apply Laplace function
Mat abs_dst;
Laplacian(src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT);
convertScaleAbs(dst, abs_dst);
/// Show what you got
imshow(window_name, abs_dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray, dst;
int kernel_size = 5;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
char* window_name = "Laplace Demo";
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Remove noise by blurring with a Gaussian filter
GaussianBlur(frame, frame, Size(3, 3), 0, 0, BORDER_DEFAULT);
/// Convert the image to grayscale
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Create window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Apply Laplace function
Mat abs_dst;
Laplacian(src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT);
convertScaleAbs(dst, abs_dst);
/// Show what you got
imshow(window_name, abs_dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Hough Line
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat frame;
Mat dst, cdst;
VideoCapture cap(0);
while (1)
{
cap >> frame;
//Detecting the edges
Canny(frame, dst, 50, 200, 3);
//Converting to grey scale
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI / 180, 100, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(cdst, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI / 180, 50, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(34, 139, 34), 3, CV_AA);
}
#endif
imshow("source", frame);
imshow("detected lines", cdst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat frame;
Mat dst, cdst;
VideoCapture cap(0);
while (1)
{
cap >> frame;
//Detecting the edges
Canny(frame, dst, 50, 200, 3);
//Converting to grey scale
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI / 180, 100, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(cdst, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI / 180, 50, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(34, 139, 34), 3, CV_AA);
}
#endif
imshow("source", frame);
imshow("detected lines", cdst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Hough Circle
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Convert it to gray
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Reduce the noise so we avoid false circle detection
GaussianBlur(src_gray, src_gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
/// Apply the Hough Transform to find the circles
HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, src_gray.rows / 8, 200, 100, 0, 0);
/// Draw the circles detected
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle(frame, center, 3, Scalar(0, 255, 0), 1, 8, 0);
// circle outline
circle(frame, center, radius, Scalar(0, 0, 255), 1, 8, 0);
}
/// Show your results
namedWindow("Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE);
imshow("Hough Circle Transform Demo", frame);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
/** @function main */
int main(int argc, char** argv)
{
Mat frame, src_gray;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Convert it to gray
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Reduce the noise so we avoid false circle detection
GaussianBlur(src_gray, src_gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
/// Apply the Hough Transform to find the circles
HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, src_gray.rows / 8, 200, 100, 0, 0);
/// Draw the circles detected
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle(frame, center, 3, Scalar(0, 255, 0), 1, 8, 0);
// circle outline
circle(frame, center, radius, Scalar(0, 0, 255), 1, 8, 0);
}
/// Show your results
namedWindow("Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE);
imshow("Hough Circle Transform Demo", frame);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Histogram
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
VideoCapture cap(0);
Mat frame, dst;
while (1)
{
cap >> frame;
/// Separate the image in 3 places ( B, G and R )
vector<Mat> bgr_planes;
split(frame, bgr_planes);
/// Establish the number of bins
int histSize = 256;
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 };
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat b_hist, g_hist, r_hist;
/// Compute the histograms:
calcHist(&bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate);
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / histSize);
Mat histImage(hist_h, hist_w, CV_8UC3, Scalar(0, 0, 0));
/// Normalize the result to [ 0, histImage.rows ]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
/// Draw for each channel
for (int i = 1; i < histSize; i++)
{
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(b_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(b_hist.at<float>(i))),
Scalar(255, 0, 0), 1, 8, 0);
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(g_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(g_hist.at<float>(i))),
Scalar(0, 255, 0), 1, 8, 0);
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(r_hist.at<float>(i))),
Scalar(0, 0, 255), 1, 8, 0);
}
if (waitKey(30) == 27)
break;
/// Display
namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE);
namedWindow("Video", CV_WINDOW_AUTOSIZE);
imshow("calcHist Demo", histImage);
imshow("Video", frame);
}
return 0;
}
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
VideoCapture cap(0);
Mat frame, dst;
while (1)
{
cap >> frame;
/// Separate the image in 3 places ( B, G and R )
vector<Mat> bgr_planes;
split(frame, bgr_planes);
/// Establish the number of bins
int histSize = 256;
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 };
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat b_hist, g_hist, r_hist;
/// Compute the histograms:
calcHist(&bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate);
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / histSize);
Mat histImage(hist_h, hist_w, CV_8UC3, Scalar(0, 0, 0));
/// Normalize the result to [ 0, histImage.rows ]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
/// Draw for each channel
for (int i = 1; i < histSize; i++)
{
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(b_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(b_hist.at<float>(i))),
Scalar(255, 0, 0), 1, 8, 0);
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(g_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(g_hist.at<float>(i))),
Scalar(0, 255, 0), 1, 8, 0);
line(histImage, Point(bin_w*(i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))),
Point(bin_w*(i), hist_h - cvRound(r_hist.at<float>(i))),
Scalar(0, 0, 255), 1, 8, 0);
}
if (waitKey(30) == 27)
break;
/// Display
namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE);
namedWindow("Video", CV_WINDOW_AUTOSIZE);
imshow("calcHist Demo", histImage);
imshow("Video", frame);
}
return 0;
}
C++ opencv Histogram Equalization
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat src, dst;
VideoCapture cap(0);
while (1)
{
cap >> src;
dst = src.clone();
/// Convert to grayscale
cvtColor(src, src, CV_BGR2GRAY);
/// Apply Histogram Equalization
equalizeHist(src, dst);
/// Display results
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
namedWindow("equalized_window", CV_WINDOW_AUTOSIZE);
imshow("Original image", src);
imshow("equalized_window", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat src, dst;
VideoCapture cap(0);
while (1)
{
cap >> src;
dst = src.clone();
/// Convert to grayscale
cvtColor(src, src, CV_BGR2GRAY);
/// Apply Histogram Equalization
equalizeHist(src, dst);
/// Display results
namedWindow("Original image", CV_WINDOW_AUTOSIZE);
namedWindow("equalized_window", CV_WINDOW_AUTOSIZE);
imshow("Original image", src);
imshow("equalized_window", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Dilation Erosion
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
Mat frame, erosion_dst, dilation_dst;
int erosion_elem = 0;
int erosion_size = 0;
int dilation_elem = 0;
int dilation_size = 0;
int const max_elem = 2;
int const max_kernel_size = 21;
/* Function prototype */
void Erosion(int, void*);
void Dilation(int, void*);
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create windows
namedWindow("Erosion Demo", CV_WINDOW_AUTOSIZE);
namedWindow("Dilation Demo", CV_WINDOW_AUTOSIZE);
cvMoveWindow("Dilation Demo", frame.cols, 0);
/// Create Erosion Trackbar
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", &erosion_elem, max_elem, Erosion);
createTrackbar("Kernel size:\n 2n +1", "Erosion Demo", &erosion_size, max_kernel_size, Erosion);
/// Create Dilation Trackbar
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo", &dilation_elem, max_elem, Dilation);
createTrackbar("Kernel size:\n 2n +1", "Dilation Demo", &dilation_size, max_kernel_size, Dilation);
// Default start
Erosion(0, 0);
Dilation(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
/** @function Erosion */
void Erosion(int, void*)
{
int erosion_type;
if (erosion_elem == 0)
erosion_type = MORPH_RECT;
else if (erosion_elem == 1)
erosion_type = MORPH_CROSS;
else if (erosion_elem == 2)
erosion_type = MORPH_ELLIPSE;
Mat element = getStructuringElement(erosion_type, Size(2 * erosion_size + 1, 2 * erosion_size + 1), Point(erosion_size, erosion_size));
// Apply the erosion operation
erode(frame, erosion_dst, element);
imshow("Erosion Demo", erosion_dst);
}
/* @function Dilation */
void Dilation(int, void*)
{
int dilation_type;
if (dilation_elem == 0)
dilation_type = MORPH_RECT;
else if (dilation_elem == 1)
dilation_type = MORPH_CROSS;
else if (dilation_elem == 2)
dilation_type = MORPH_ELLIPSE;
Mat element = getStructuringElement(dilation_type, Size(2 * dilation_size + 1, 2 * dilation_size + 1), Point(dilation_size, dilation_size));
// Apply the dilation operation
dilate(frame, dilation_dst, element);
imshow("Dilation Demo", dilation_dst);
}
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
Mat frame, erosion_dst, dilation_dst;
int erosion_elem = 0;
int erosion_size = 0;
int dilation_elem = 0;
int dilation_size = 0;
int const max_elem = 2;
int const max_kernel_size = 21;
/* Function prototype */
void Erosion(int, void*);
void Dilation(int, void*);
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create windows
namedWindow("Erosion Demo", CV_WINDOW_AUTOSIZE);
namedWindow("Dilation Demo", CV_WINDOW_AUTOSIZE);
cvMoveWindow("Dilation Demo", frame.cols, 0);
/// Create Erosion Trackbar
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", &erosion_elem, max_elem, Erosion);
createTrackbar("Kernel size:\n 2n +1", "Erosion Demo", &erosion_size, max_kernel_size, Erosion);
/// Create Dilation Trackbar
createTrackbar("Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo", &dilation_elem, max_elem, Dilation);
createTrackbar("Kernel size:\n 2n +1", "Dilation Demo", &dilation_size, max_kernel_size, Dilation);
// Default start
Erosion(0, 0);
Dilation(0, 0);
if (waitKey(30) == 27)
break;
}
return 0;
}
/** @function Erosion */
void Erosion(int, void*)
{
int erosion_type;
if (erosion_elem == 0)
erosion_type = MORPH_RECT;
else if (erosion_elem == 1)
erosion_type = MORPH_CROSS;
else if (erosion_elem == 2)
erosion_type = MORPH_ELLIPSE;
Mat element = getStructuringElement(erosion_type, Size(2 * erosion_size + 1, 2 * erosion_size + 1), Point(erosion_size, erosion_size));
// Apply the erosion operation
erode(frame, erosion_dst, element);
imshow("Erosion Demo", erosion_dst);
}
/* @function Dilation */
void Dilation(int, void*)
{
int dilation_type;
if (dilation_elem == 0)
dilation_type = MORPH_RECT;
else if (dilation_elem == 1)
dilation_type = MORPH_CROSS;
else if (dilation_elem == 2)
dilation_type = MORPH_ELLIPSE;
Mat element = getStructuringElement(dilation_type, Size(2 * dilation_size + 1, 2 * dilation_size + 1), Point(dilation_size, dilation_size));
// Apply the dilation operation
dilate(frame, dilation_dst, element);
imshow("Dilation Demo", dilation_dst);
}
C++ opencv Convex Hull
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat src_copy = src.clone();
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
}
/// Draw contours + hull results
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, hull, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point());
}
/// Show in a window
namedWindow("Hull demo", CV_WINDOW_AUTOSIZE);
imshow("Hull demo", drawing);
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat src_copy = src.clone();
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
}
/// Draw contours + hull results
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, hull, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point());
}
/// Show in a window
namedWindow("Hull demo", CV_WINDOW_AUTOSIZE);
imshow("Hull demo", drawing);
}
C++ opencv Canny Edge
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, src_gray;
Mat dst, detected_edges;
int ratio = 3;
int kernel_size = 3;
char* window_name = "Edge Map";
void CannyThreshold(int, void*)
{
imshow(window_name, dst);
}
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
dst = frame.clone();
/// Convert the image to grayscale
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
/// Canny detector
Canny(detected_edges, detected_edges, 15, 10, kernel_size);
/// Using Canny's output as a mask, we display our result
dst = Scalar::all(0);
frame.copyTo(dst, detected_edges);
namedWindow("Canny Edge Map", CV_WINDOW_AUTOSIZE);
imshow("Canny Edge Map", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/// Global variables
Mat frame, src_gray;
Mat dst, detected_edges;
int ratio = 3;
int kernel_size = 3;
char* window_name = "Edge Map";
void CannyThreshold(int, void*)
{
imshow(window_name, dst);
}
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> frame;
namedWindow("Original", CV_WINDOW_AUTOSIZE);
imshow("Original", frame);
dst = frame.clone();
/// Convert the image to grayscale
cvtColor(frame, src_gray, CV_BGR2GRAY);
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
/// Canny detector
Canny(detected_edges, detected_edges, 15, 10, kernel_size);
/// Using Canny's output as a mask, we display our result
dst = Scalar::all(0);
frame.copyTo(dst, detected_edges);
namedWindow("Canny Edge Map", CV_WINDOW_AUTOSIZE);
imshow("Canny Edge Map", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Bounding Box
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for (int i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
boundRect[i] = boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]);
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours_poly, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point());
rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), Scalar(0,255,255), 1, 8, 0);
circle(drawing, center[i], (int)radius[i], Scalar(255, 255, 0) , 1, 8, 0);
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
/// Function header
void thresh_callback(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
/// Create Window
char* source_window = "Source";
namedWindow(source_window, CV_WINDOW_AUTOSIZE);
imshow(source_window, src);
createTrackbar(" Threshold:", "Source", &thresh, max_thresh, thresh_callback);
thresh_callback(0, 0);
if (waitKey(30) == 27)
break;
}
return(0);
}
/** @function thresh_callback */
void thresh_callback(int, void*)
{
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
/// Find contours
findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for (int i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
boundRect[i] = boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]);
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
drawContours(drawing, contours_poly, i, Scalar(0,255,0), 1, 8, vector<Vec4i>(), 0, Point());
rectangle(drawing, boundRect[i].tl(), boundRect[i].br(), Scalar(0,255,255), 1, 8, 0);
circle(drawing, center[i], (int)radius[i], Scalar(255, 255, 0) , 1, 8, 0);
}
/// Show in a window
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
}
C++ opencv Border
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/// Global Variables
Mat frame, dst;
int top, bottom, left, right;
Scalar value;
RNG rng(0xfffffff);
/** @function main */
int main(int argc, char** argv)
{
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create window
namedWindow("original picture", CV_WINDOW_AUTOSIZE);
imshow("original picture", frame);
/// Initialize arguments for the filter
top = (int)( 0.1 * frame.cols); //(int)(0.1*image.rows);
bottom = (int)(0.1*frame.cols);
left = (int)(0.1*frame.cols);
right = (int)(0.1*frame.cols);
dst = frame;
//random color
value = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
copyMakeBorder(frame, dst, top, bottom, left, right, BORDER_CONSTANT, value);
namedWindow("copyMakeBorder Demo", CV_WINDOW_AUTOSIZE);
imshow("copyMakeBorder Demo", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
/// Global Variables
Mat frame, dst;
int top, bottom, left, right;
Scalar value;
RNG rng(0xfffffff);
/** @function main */
int main(int argc, char** argv)
{
int c;
VideoCapture cap(0);
while (1)
{
cap >> frame;
/// Create window
namedWindow("original picture", CV_WINDOW_AUTOSIZE);
imshow("original picture", frame);
/// Initialize arguments for the filter
top = (int)( 0.1 * frame.cols); //(int)(0.1*image.rows);
bottom = (int)(0.1*frame.cols);
left = (int)(0.1*frame.cols);
right = (int)(0.1*frame.cols);
dst = frame;
//random color
value = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
copyMakeBorder(frame, dst, top, bottom, left, right, BORDER_CONSTANT, value);
namedWindow("copyMakeBorder Demo", CV_WINDOW_AUTOSIZE);
imshow("copyMakeBorder Demo", dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
C++ opencv Back Projection
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv; Mat hue;
int bins = 25;
/// Function Headers
void Hist_and_Backproj(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Transform it to HSV
cvtColor(src, hsv, CV_BGR2HSV);
/// Use only the Hue value
hue.create(hsv.size(), hsv.depth());
int ch[] = { 0, 0 };
mixChannels(&hsv, 1, &hue, 1, ch, 1);
/// Create Trackbar to enter the number of bins
char* window_image = "Source image";
namedWindow(window_image, CV_WINDOW_AUTOSIZE);
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj);
Hist_and_Backproj(0, 0);
/// Show the image
imshow(window_image, src);
if (waitKey(30) == 27)
break;
}
return 0;
}
void Hist_and_Backproj(int, void*)
{
MatND hist;
int histSize = MAX(bins, 2);
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
/// Get the Histogram and normalize it
calcHist(&hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false);
normalize(hist, hist, 0, 255, NORM_MINMAX, -1, Mat());
/// Get Backprojection
MatND backproj;
calcBackProject(&hue, 1, 0, hist, backproj, &ranges, 1, true);
/// Draw the backproj
imshow("BackProj", backproj);
/// Draw the histogram
int w = 400; int h = 400;
int bin_w = cvRound((double)w / histSize);
Mat histImg = Mat::zeros(w, h, CV_8UC3);
for (int i = 0; i < bins; i++)
{
rectangle(histImg, Point(i*bin_w, h), Point((i + 1)*bin_w, h - cvRound(hist.at<float>(i)*h / 255.0)), Scalar(0, 255,0), -1);
}
imshow("Histogram", histImg);
}
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global Variables
Mat src; Mat hsv; Mat hue;
int bins = 25;
/// Function Headers
void Hist_and_Backproj(int, void*);
/** @function main */
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Transform it to HSV
cvtColor(src, hsv, CV_BGR2HSV);
/// Use only the Hue value
hue.create(hsv.size(), hsv.depth());
int ch[] = { 0, 0 };
mixChannels(&hsv, 1, &hue, 1, ch, 1);
/// Create Trackbar to enter the number of bins
char* window_image = "Source image";
namedWindow(window_image, CV_WINDOW_AUTOSIZE);
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj);
Hist_and_Backproj(0, 0);
/// Show the image
imshow(window_image, src);
if (waitKey(30) == 27)
break;
}
return 0;
}
void Hist_and_Backproj(int, void*)
{
MatND hist;
int histSize = MAX(bins, 2);
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
/// Get the Histogram and normalize it
calcHist(&hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false);
normalize(hist, hist, 0, 255, NORM_MINMAX, -1, Mat());
/// Get Backprojection
MatND backproj;
calcBackProject(&hue, 1, 0, hist, backproj, &ranges, 1, true);
/// Draw the backproj
imshow("BackProj", backproj);
/// Draw the histogram
int w = 400; int h = 400;
int bin_w = cvRound((double)w / histSize);
Mat histImg = Mat::zeros(w, h, CV_8UC3);
for (int i = 0; i < bins; i++)
{
rectangle(histImg, Point(i*bin_w, h), Point((i + 1)*bin_w, h - cvRound(hist.at<float>(i)*h / 255.0)), Scalar(0, 255,0), -1);
}
imshow("Histogram", histImg);
}
C++ opencv Affine Transform
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Point2f srcTri[3];
Point2f dstTri[3];
Mat rot_mat(2, 3, CV_32FC1);
Mat warp_mat(2, 3, CV_32FC1);
Mat src, warp_dst, warp_rotate_dst;
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Set the dst image the same type and size as src
warp_dst = src.clone();
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f(0, 0);
srcTri[1] = Point2f(src.cols - 1, 0);
srcTri[2] = Point2f(0, src.rows - 1);
dstTri[0] = Point2f(src.cols*0.0, src.rows*0.33);
dstTri[1] = Point2f(src.cols*0.85, src.rows*0.25);
dstTri[2] = Point2f(src.cols*0.15, src.rows*0.7);
/// Get the Affine Transform
warp_mat = getAffineTransform(srcTri, dstTri);
/// Apply the Affine Transform just found to the src image
warpAffine(src, warp_dst, warp_mat, warp_dst.size());
/** Rotating the image after Warp */
/// Compute a rotation matrix with respect to the center of the image
Point center = Point(warp_dst.cols / 2, warp_dst.rows / 2);
double angle = -50.0;
double scale = 0.6;
/// Get the rotation matrix with the specifications above
rot_mat = getRotationMatrix2D(center, angle, scale);
/// Rotate the warped image
warpAffine(warp_dst, warp_rotate_dst, rot_mat, warp_dst.size());
/// Show what you got
namedWindow("Source image", CV_WINDOW_AUTOSIZE);
imshow("Source image", src);
namedWindow("Warp", CV_WINDOW_AUTOSIZE);
imshow("Warp", warp_dst);
namedWindow("Warp + Rotate", CV_WINDOW_AUTOSIZE);
imshow("Warp + Rotate", warp_rotate_dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Point2f srcTri[3];
Point2f dstTri[3];
Mat rot_mat(2, 3, CV_32FC1);
Mat warp_mat(2, 3, CV_32FC1);
Mat src, warp_dst, warp_rotate_dst;
VideoCapture cap(0);
while (1)
{
cap >> src;
/// Set the dst image the same type and size as src
warp_dst = src.clone();
/// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f(0, 0);
srcTri[1] = Point2f(src.cols - 1, 0);
srcTri[2] = Point2f(0, src.rows - 1);
dstTri[0] = Point2f(src.cols*0.0, src.rows*0.33);
dstTri[1] = Point2f(src.cols*0.85, src.rows*0.25);
dstTri[2] = Point2f(src.cols*0.15, src.rows*0.7);
/// Get the Affine Transform
warp_mat = getAffineTransform(srcTri, dstTri);
/// Apply the Affine Transform just found to the src image
warpAffine(src, warp_dst, warp_mat, warp_dst.size());
/** Rotating the image after Warp */
/// Compute a rotation matrix with respect to the center of the image
Point center = Point(warp_dst.cols / 2, warp_dst.rows / 2);
double angle = -50.0;
double scale = 0.6;
/// Get the rotation matrix with the specifications above
rot_mat = getRotationMatrix2D(center, angle, scale);
/// Rotate the warped image
warpAffine(warp_dst, warp_rotate_dst, rot_mat, warp_dst.size());
/// Show what you got
namedWindow("Source image", CV_WINDOW_AUTOSIZE);
imshow("Source image", src);
namedWindow("Warp", CV_WINDOW_AUTOSIZE);
imshow("Warp", warp_dst);
namedWindow("Warp + Rotate", CV_WINDOW_AUTOSIZE);
imshow("Warp + Rotate", warp_rotate_dst);
if (waitKey(30) == 27)
break;
}
return 0;
}
สมัครสมาชิก:
บทความ (Atom)