feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,73 @@
/**
* @file AddingImagesTrackbar.cpp
* @brief Simple linear blender ( dst = alpha*src1 + beta*src2 )
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using std::cout;
/** Global Variables */
const int alpha_slider_max = 100;
int alpha_slider;
double alpha;
double beta;
/** Matrices to store images */
Mat src1;
Mat src2;
Mat dst;
//![on_trackbar]
/**
* @function on_trackbar
* @brief Callback for trackbar
*/
static void on_trackbar( int, void* )
{
alpha = (double) alpha_slider/alpha_slider_max ;
beta = ( 1.0 - alpha );
addWeighted( src1, alpha, src2, beta, 0.0, dst);
imshow( "Linear Blend", dst );
}
//![on_trackbar]
/**
* @function main
* @brief Main function
*/
int main( void )
{
//![load]
/// Read images ( both have to be of the same size and type )
src1 = imread( samples::findFile("LinuxLogo.jpg") );
src2 = imread( samples::findFile("WindowsLogo.jpg") );
//![load]
if( src1.empty() ) { cout << "Error loading src1 \n"; return -1; }
if( src2.empty() ) { cout << "Error loading src2 \n"; return -1; }
/// Initialize values
alpha_slider = 0;
//![window]
namedWindow("Linear Blend", WINDOW_AUTOSIZE); // Create Window
//![window]
//![create_trackbar]
char TrackbarName[50];
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
//![create_trackbar]
/// Show some stuff
on_trackbar( alpha_slider, 0 );
/// Wait until user press some key
waitKey(0);
return 0;
}

View File

@ -0,0 +1,73 @@
/**
* @file BasicLinearTransformsTrackbar.cpp
* @brief Simple program to change contrast and brightness
* @date Mon, June 6, 2011
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using namespace cv;
/** Global Variables */
const int alpha_max = 5;
const int beta_max = 125;
int alpha; /**< Simple contrast control */
int beta; /**< Simple brightness control*/
/** Matrices to store images */
Mat image;
/**
* @function on_trackbar
* @brief Called whenever any of alpha or beta changes
*/
static void on_trackbar( int, void* )
{
Mat new_image = Mat::zeros( image.size(), image.type() );
for( int y = 0; y < image.rows; y++ )
for( int x = 0; x < image.cols; x++ )
for( int c = 0; c < 3; c++ )
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
imshow("New Image", new_image);
}
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
/// Read image given by user
String imageName("lena.jpg"); // by default
if (argc > 1)
{
imageName = argv[1];
}
image = imread( samples::findFile( imageName ) );
/// Initialize values
alpha = 1;
beta = 0;
/// Create Windows
namedWindow("Original Image", 1);
namedWindow("New Image", 1);
/// Create Trackbars
createTrackbar( "Contrast", "New Image", &alpha, alpha_max, on_trackbar );
createTrackbar( "Brightness", "New Image", &beta, beta_max, on_trackbar );
/// Show some stuff
imshow("Original Image", image);
imshow("New Image", image);
/// Wait until user press some key
waitKey();
return 0;
}

View File

@ -0,0 +1,51 @@
/**
* @function EqualizeHist_Demo.cpp
* @brief Demo code for equalizeHist function
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/**
* @function main
*/
int main( int argc, char** argv )
{
//! [Load image]
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Load image]
//! [Convert to grayscale]
cvtColor( src, src, COLOR_BGR2GRAY );
//! [Convert to grayscale]
//! [Apply Histogram Equalization]
Mat dst;
equalizeHist( src, dst );
//! [Apply Histogram Equalization]
//! [Display results]
imshow( "Source image", src );
imshow( "Equalized Image", dst );
//! [Display results]
//! [Wait until user exits the program]
waitKey();
//! [Wait until user exits the program]
return 0;
}

View File

@ -0,0 +1,137 @@
/**
* @file MatchTemplate_Demo.cpp
* @brief Sample code to use the function MatchTemplate
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
//! [declare]
/// Global Variables
bool use_mask;
Mat img; Mat templ; Mat mask; Mat result;
const char* image_window = "Source Image";
const char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
//! [declare]
/// Function Headers
void MatchingMethod( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
if (argc < 3)
{
cout << "Not enough parameters" << endl;
cout << "Usage:\n" << argv[0] << " <image_name> <template_name> [<mask_name>]" << endl;
return -1;
}
//! [load_image]
/// Load image and template
img = imread( argv[1], IMREAD_COLOR );
templ = imread( argv[2], IMREAD_COLOR );
if(argc > 3) {
use_mask = true;
mask = imread( argv[3], IMREAD_COLOR );
}
if(img.empty() || templ.empty() || (use_mask && mask.empty()))
{
cout << "Can't read one of the images" << endl;
return EXIT_FAILURE;
}
//! [load_image]
//! [create_windows]
/// Create windows
namedWindow( image_window, WINDOW_AUTOSIZE );
namedWindow( result_window, WINDOW_AUTOSIZE );
//! [create_windows]
//! [create_trackbar]
/// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
//! [create_trackbar]
MatchingMethod( 0, 0 );
//! [wait_key]
waitKey(0);
return EXIT_SUCCESS;
//! [wait_key]
}
/**
* @function MatchingMethod
* @brief Trackbar callback
*/
void MatchingMethod( int, void* )
{
//! [copy_source]
/// Source image to display
Mat img_display;
img.copyTo( img_display );
//! [copy_source]
//! [create_result_matrix]
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_rows, result_cols, CV_32FC1 );
//! [create_result_matrix]
//! [match_template]
/// Do the Matching and Normalize
bool method_accepts_mask = (TM_SQDIFF == match_method || match_method == TM_CCORR_NORMED);
if (use_mask && method_accepts_mask)
{ matchTemplate( img, templ, result, match_method, mask); }
else
{ matchTemplate( img, templ, result, match_method); }
//! [match_template]
//! [normalize]
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
//! [normalize]
//! [best_match]
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
//! [best_match]
//! [match_loc]
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
{ matchLoc = maxLoc; }
//! [match_loc]
//! [imshow]
/// Show me what you got
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );
//! [imshow]
return;
}

View File

@ -0,0 +1,106 @@
/**
* @file BackProject_Demo1.cpp
* @brief Sample code for backproject function usage
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global Variables
Mat hue;
int bins = 25;
/// Function Headers
void Hist_and_Backproj(int, void* );
/**
* @function main
*/
int main( int argc, char* argv[] )
{
//! [Read the image]
CommandLineParser parser( argc, argv, "{@input | | input image}" );
Mat src = imread( parser.get<String>( "@input" ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Read the image]
//! [Transform it to HSV]
Mat hsv;
cvtColor( src, hsv, COLOR_BGR2HSV );
//! [Transform it to HSV]
//! [Use only the Hue value]
hue.create(hsv.size(), hsv.depth());
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
//! [Use only the Hue value]
//! [Create Trackbar to enter the number of bins]
const char* window_image = "Source image";
namedWindow( window_image );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);
//! [Create Trackbar to enter the number of bins]
//! [Show the image]
imshow( window_image, src );
// Wait until user exits the program
waitKey();
//! [Show the image]
return 0;
}
/**
* @function Hist_and_Backproj
* @brief Callback to Trackbar
*/
void Hist_and_Backproj(int, void* )
{
//! [initialize]
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges[] = { hue_range };
//! [initialize]
//! [Get the Histogram and normalize it]
Mat hist;
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
//! [Get the Histogram and normalize it]
//! [Get Backprojection]
Mat backproj;
calcBackProject( &hue, 1, 0, hist, backproj, ranges, 1, true );
//! [Get Backprojection]
//! [Draw the backproj]
imshow( "BackProj", backproj );
//! [Draw the backproj]
//! [Draw the histogram]
int w = 400, h = 400;
int bin_w = cvRound( (double) w / histSize );
Mat histImg = Mat::zeros( h, w, CV_8UC3 );
for (int i = 0; i < bins; i++)
{
rectangle( histImg, Point( i*bin_w, h ), Point( (i+1)*bin_w, h - cvRound( hist.at<float>(i)*h/255.0 ) ),
Scalar( 0, 0, 255 ), FILLED );
}
imshow( "Histogram", histImg );
//! [Draw the histogram]
}

View File

@ -0,0 +1,105 @@
/**
* @file BackProject_Demo2.cpp
* @brief Sample code for backproject function usage ( a bit more elaborated )
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global Variables
Mat src, hsv, mask;
int low = 20, up = 20;
const char* window_image = "Source image";
/// Function Headers
void Hist_and_Backproj( );
void pickPoint (int event, int x, int y, int, void* );
/**
* @function main
*/
int main( int, char** argv )
{
/// Read the image
src = imread( argv[1] );
/// Transform it to HSV
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image
namedWindow( window_image );
imshow( window_image, src );
/// Set Trackbars for floodfill thresholds
createTrackbar( "Low thresh", window_image, &low, 255, 0 );
createTrackbar( "High thresh", window_image, &up, 255, 0 );
/// Set a Mouse Callback
setMouseCallback( window_image, pickPoint, 0 );
waitKey();
return 0;
}
/**
* @function pickPoint
*/
void pickPoint (int event, int x, int y, int, void* )
{
if( event != EVENT_LBUTTONDOWN )
{
return;
}
// Fill and get the mask
Point seed = Point( x, y );
int newMaskVal = 255;
Scalar newVal = Scalar( 120, 120, 120 );
int connectivity = 8;
int flags = connectivity + (newMaskVal << 8 ) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Mat mask2 = Mat::zeros( src.rows + 2, src.cols + 2, CV_8U );
floodFill( src, mask2, seed, newVal, 0, Scalar( low, low, low ), Scalar( up, up, up), flags );
mask = mask2( Range( 1, mask2.rows - 1 ), Range( 1, mask2.cols - 1 ) );
imshow( "Mask", mask );
Hist_and_Backproj( );
}
/**
* @function Hist_and_Backproj
*/
void Hist_and_Backproj( )
{
Mat hist;
int h_bins = 30; int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_range[] = { 0, 180 };
float s_range[] = { 0, 256 };
const float* ranges[] = { h_range, s_range };
int channels[] = { 0, 1 };
/// Get the Histogram and normalize it
calcHist( &hsv, 1, channels, mask, hist, 2, histSize, ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
/// Get Backprojection
Mat backproj;
calcBackProject( &hsv, 1, channels, hist, backproj, ranges, 1, true );
/// Draw the backproj
imshow( "BackProj", backproj );
}

View File

@ -0,0 +1,89 @@
/**
* @function calcHist_Demo.cpp
* @brief Demo code to use the function calcHist
* @author
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
/**
* @function main
*/
int main(int argc, char** argv)
{
//! [Load image]
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
if( src.empty() )
{
return EXIT_FAILURE;
}
//! [Load image]
//! [Separate the image in 3 places ( B, G and R )]
vector<Mat> bgr_planes;
split( src, bgr_planes );
//! [Separate the image in 3 places ( B, G and R )]
//! [Establish the number of bins]
int histSize = 256;
//! [Establish the number of bins]
//! [Set the ranges ( for B,G,R) )]
float range[] = { 0, 256 }; //the upper boundary is exclusive
const float* histRange[] = { range };
//! [Set the ranges ( for B,G,R) )]
//! [Set histogram param]
bool uniform = true, accumulate = false;
//! [Set histogram param]
//! [Compute the histograms]
Mat b_hist, g_hist, r_hist;
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, histRange, uniform, accumulate );
//! [Compute the histograms]
//! [Draw the histograms for B, G and R]
int hist_w = 512, hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
//! [Draw the histograms for B, G and R]
//! [Normalize the result to ( 0, histImage.rows )]
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
//! [Normalize the result to ( 0, histImage.rows )]
//! [Draw for each channel]
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ),
Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
Scalar( 255, 0, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ),
Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
Scalar( 0, 255, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ),
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
Scalar( 0, 0, 255), 2, 8, 0 );
}
//! [Draw for each channel]
//! [Display]
imshow("Source image", src );
imshow("calcHist Demo", histImage );
waitKey();
//! [Display]
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,95 @@
/**
* @file compareHist_Demo.cpp
* @brief Sample code to use the function compareHist
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
const char* keys =
"{ help h| | Print help message. }"
"{ @input1 | | Path to input image 1. }"
"{ @input2 | | Path to input image 2. }"
"{ @input3 | | Path to input image 3. }";
/**
* @function main
*/
int main( int argc, char** argv )
{
//! [Load three images with different environment settings]
CommandLineParser parser( argc, argv, keys );
Mat src_base = imread( parser.get<String>("input1") );
Mat src_test1 = imread( parser.get<String>("input2") );
Mat src_test2 = imread( parser.get<String>("input3") );
if( src_base.empty() || src_test1.empty() || src_test2.empty() )
{
cout << "Could not open or find the images!\n" << endl;
parser.printMessage();
return -1;
}
//! [Load three images with different environment settings]
//! [Convert to HSV]
Mat hsv_base, hsv_test1, hsv_test2;
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
//! [Convert to HSV]
//! [Convert to HSV half]
Mat hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows ), Range( 0, hsv_base.cols ) );
//! [Convert to HSV half]
//! [Using 50 bins for hue and 60 for saturation]
int h_bins = 50, s_bins = 60;
int histSize[] = { h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255
float h_ranges[] = { 0, 180 };
float s_ranges[] = { 0, 256 };
const float* ranges[] = { h_ranges, s_ranges };
// Use the 0-th and 1-st channels
int channels[] = { 0, 1 };
//! [Using 50 bins for hue and 60 for saturation]
//! [Calculate the histograms for the HSV images]
Mat hist_base, hist_half_down, hist_test1, hist_test2;
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
//! [Calculate the histograms for the HSV images]
//! [Apply the histogram comparison methods]
for( int compare_method = 0; compare_method < 4; compare_method++ )
{
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
cout << "Method " << compare_method << " Perfect, Base-Half, Base-Test(1), Base-Test(2) : "
<< base_base << " / " << base_half << " / " << base_test1 << " / " << base_test2 << endl;
}
//! [Apply the histogram comparison methods]
cout << "Done \n";
return 0;
}

View File

@ -0,0 +1,74 @@
/**
* @file BasicLinearTransforms.cpp
* @brief Simple program to change contrast and brightness
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
using std::cout;
using std::endl;
using namespace cv;
/**
* @function main
* @brief Main function
*/
int main( int argc, char** argv )
{
/// Read image given by user
//! [basic-linear-transform-load]
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
Mat image = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( image.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [basic-linear-transform-load]
//! [basic-linear-transform-output]
Mat new_image = Mat::zeros( image.size(), image.type() );
//! [basic-linear-transform-output]
//! [basic-linear-transform-parameters]
double alpha = 1.0; /*< Simple contrast control */
int beta = 0; /*< Simple brightness control */
/// Initialize values
cout << " Basic Linear Transforms " << endl;
cout << "-------------------------" << endl;
cout << "* Enter the alpha value [1.0-3.0]: "; cin >> alpha;
cout << "* Enter the beta value [0-100]: "; cin >> beta;
//! [basic-linear-transform-parameters]
/// Do the operation new_image(i,j) = alpha*image(i,j) + beta
/// Instead of these 'for' loops we could have used simply:
/// image.convertTo(new_image, -1, alpha, beta);
/// but we wanted to show you how to access the pixels :)
//! [basic-linear-transform-operation]
for( int y = 0; y < image.rows; y++ ) {
for( int x = 0; x < image.cols; x++ ) {
for( int c = 0; c < image.channels(); c++ ) {
new_image.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
}
}
}
//! [basic-linear-transform-operation]
//! [basic-linear-transform-display]
/// Show stuff
imshow("Original Image", image);
imshow("New Image", new_image);
/// Wait until the user press a key
waitKey();
//! [basic-linear-transform-display]
return 0;
}

View File

@ -0,0 +1,44 @@
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
int main(){
Mat input_image = (Mat_<uchar>(8, 8) <<
0, 0, 0, 0, 0, 0, 0, 0,
0, 255, 255, 255, 0, 0, 0, 255,
0, 255, 255, 255, 0, 0, 0, 0,
0, 255, 255, 255, 0, 255, 0, 0,
0, 0, 255, 0, 0, 0, 0, 0,
0, 0, 255, 0, 0, 255, 255, 0,
0, 255, 0, 255, 0, 0, 255, 0,
0, 255, 255, 255, 0, 0, 0, 0);
Mat kernel = (Mat_<int>(3, 3) <<
0, 1, 0,
1, -1, 1,
0, 1, 0);
Mat output_image;
morphologyEx(input_image, output_image, MORPH_HITMISS, kernel);
const int rate = 50;
kernel = (kernel + 1) * 127;
kernel.convertTo(kernel, CV_8U);
resize(kernel, kernel, Size(), rate, rate, INTER_NEAREST);
imshow("kernel", kernel);
moveWindow("kernel", 0, 0);
resize(input_image, input_image, Size(), rate, rate, INTER_NEAREST);
imshow("Original", input_image);
moveWindow("Original", 0, 200);
resize(output_image, output_image, Size(), rate, rate, INTER_NEAREST);
imshow("Hit or Miss", output_image);
moveWindow("Hit or Miss", 500, 200);
waitKey(0);
return 0;
}

View File

@ -0,0 +1,118 @@
/**
* @file Morphology_1.cpp
* @brief Erosion and Dilation sample code
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat src, erosion_dst, dilation_dst;
int erosion_elem = 0;
int erosion_size = 0;
int dilation_elem = 0;
int dilation_size = 0;
int const max_elem = 2;
int const max_kernel_size = 21;
/** Function Headers */
void Erosion( int, void* );
void Dilation( int, void* );
//![main]
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load an image
CommandLineParser parser( argc, argv, "{@input | LinuxLogo.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Create windows
namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
moveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
&erosion_elem, max_elem,
Erosion );
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
&erosion_size, max_kernel_size,
Erosion );
/// Create Dilation Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
&dilation_elem, max_elem,
Dilation );
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
&dilation_size, max_kernel_size,
Dilation );
/// Default start
Erosion( 0, 0 );
Dilation( 0, 0 );
waitKey(0);
return 0;
}
//![main]
//![erosion]
/**
* @function Erosion
*/
void Erosion( int, void* )
{
int erosion_type = 0;
if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; }
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
//![kernel]
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
//![kernel]
/// Apply the erosion operation
erode( src, erosion_dst, element );
imshow( "Erosion Demo", erosion_dst );
}
//![erosion]
//![dilation]
/**
* @function Dilation
*/
void Dilation( int, void* )
{
int dilation_type = 0;
if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; }
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( dilation_type,
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
Point( dilation_size, dilation_size ) );
/// Apply the dilation operation
dilate( src, dilation_dst, element );
imshow( "Dilation Demo", dilation_dst );
}
//![dilation]

View File

@ -0,0 +1,93 @@
/**
* @file Morphology_2.cpp
* @brief Advanced morphology Transformations sample code
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
/// Global variables
Mat src, dst;
int morph_elem = 0;
int morph_size = 0;
int morph_operator = 0;
int const max_operator = 4;
int const max_elem = 2;
int const max_kernel_size = 21;
const char* window_name = "Morphology Transformations Demo";
/** Function Headers */
void Morphology_Operations( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
//![load]
CommandLineParser parser( argc, argv, "{@input | baboon.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
if (src.empty())
{
std::cout << "Could not open or find the image!\n" << std::endl;
std::cout << "Usage: " << argv[0] << " <Input image>" << std::endl;
return EXIT_FAILURE;
}
//![load]
//![window]
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create window
//![window]
//![create_trackbar1]
/// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
//![create_trackbar1]
//![create_trackbar2]
/// Create Trackbar to select kernel type
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
&morph_elem, max_elem,
Morphology_Operations );
//![create_trackbar2]
//![create_trackbar3]
/// Create Trackbar to choose kernel size
createTrackbar( "Kernel size:\n 2n +1", window_name,
&morph_size, max_kernel_size,
Morphology_Operations );
//![create_trackbar3]
/// Default start
Morphology_Operations( 0, 0 );
waitKey(0);
return 0;
}
//![morphology_operations]
/**
* @function Morphology_Operations
*/
void Morphology_Operations( int, void* )
{
// Since MORPH_X : 2,3,4,5 and 6
//![operation]
int operation = morph_operator + 2;
//![operation]
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
/// Apply the specified morphology operation
morphologyEx( src, dst, operation, element );
imshow( window_name, dst );
}
//![morphology_operations]

View File

@ -0,0 +1,69 @@
/**
* @file Pyramids.cpp
* @brief Sample code of image pyramids (pyrDown and pyrUp)
* @author OpenCV team
*/
#include "iostream"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
const char* window_name = "Pyramids Demo";
/**
* @function main
*/
int main( int argc, char** argv )
{
/// General instructions
cout << "\n Zoom In-Out demo \n "
"------------------ \n"
" * [i] -> Zoom in \n"
" * [o] -> Zoom out \n"
" * [ESC] -> Close program \n" << endl;
//![load]
const char* filename = argc >=2 ? argv[1] : "chicky_512.png";
// Loads an image
Mat src = imread( samples::findFile( filename ) );
// Check if image is loaded fine
if(src.empty()){
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default chicky_512.png] \n");
return EXIT_FAILURE;
}
//![load]
//![loop]
for(;;)
{
//![show_image]
imshow( window_name, src );
//![show_image]
char c = (char)waitKey(0);
if( c == 27 )
{ break; }
//![pyrup]
else if( c == 'i' )
{ pyrUp( src, src, Size( src.cols*2, src.rows*2 ) );
printf( "** Zoom In: Image x 2 \n" );
}
//![pyrup]
//![pyrdown]
else if( c == 'o' )
{ pyrDown( src, src, Size( src.cols/2, src.rows/2 ) );
printf( "** Zoom Out: Image / 2 \n" );
}
//![pyrdown]
}
//![loop]
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,153 @@
/**
* file Smoothing.cpp
* brief Sample code for simple filters
* author OpenCV team
*/
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
/// Global Variables
int DELAY_CAPTION = 1500;
int DELAY_BLUR = 100;
int MAX_KERNEL_LENGTH = 31;
Mat src; Mat dst;
char window_name[] = "Smoothing Demo";
/// Function headers
int display_caption( const char* caption );
int display_dst( int delay );
/**
* function main
*/
int main( int argc, char ** argv )
{
namedWindow( window_name, WINDOW_AUTOSIZE );
/// Load the source image
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
src = imread( samples::findFile( filename ), IMREAD_COLOR );
if (src.empty())
{
printf(" Error opening image\n");
printf(" Usage:\n %s [image_name-- default lena.jpg] \n", argv[0]);
return EXIT_FAILURE;
}
if( display_caption( "Original Image" ) != 0 )
{
return 0;
}
dst = src.clone();
if( display_dst( DELAY_CAPTION ) != 0 )
{
return 0;
}
/// Applying Homogeneous blur
if( display_caption( "Homogeneous Blur" ) != 0 )
{
return 0;
}
//![blur]
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{
blur( src, dst, Size( i, i ), Point(-1,-1) );
if( display_dst( DELAY_BLUR ) != 0 )
{
return 0;
}
}
//![blur]
/// Applying Gaussian blur
if( display_caption( "Gaussian Blur" ) != 0 )
{
return 0;
}
//![gaussianblur]
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{
GaussianBlur( src, dst, Size( i, i ), 0, 0 );
if( display_dst( DELAY_BLUR ) != 0 )
{
return 0;
}
}
//![gaussianblur]
/// Applying Median blur
if( display_caption( "Median Blur" ) != 0 )
{
return 0;
}
//![medianblur]
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{
medianBlur ( src, dst, i );
if( display_dst( DELAY_BLUR ) != 0 )
{
return 0;
}
}
//![medianblur]
/// Applying Bilateral Filter
if( display_caption( "Bilateral Blur" ) != 0 )
{
return 0;
}
//![bilateralfilter]
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
{
bilateralFilter ( src, dst, i, i*2, i/2 );
if( display_dst( DELAY_BLUR ) != 0 )
{
return 0;
}
}
//![bilateralfilter]
/// Done
display_caption( "Done!" );
return 0;
}
/**
* @function display_caption
*/
int display_caption( const char* caption )
{
dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption,
Point( src.cols/4, src.rows/2),
FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
return display_dst(DELAY_CAPTION);
}
/**
* @function display_dst
*/
int display_dst( int delay )
{
imshow( window_name, dst );
int c = waitKey ( delay );
if( c >= 0 ) { return -1; }
return 0;
}

View File

@ -0,0 +1,87 @@
/**
* @file Threshold.cpp
* @brief Sample code that shows how to use the diverse threshold options offered by OpenCV
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using std::cout;
/// Global variables
int threshold_value = 0;
int threshold_type = 3;
int const max_value = 255;
int const max_type = 4;
int const max_binary_value = 255;
Mat src, src_gray, dst;
const char* window_name = "Threshold Demo";
const char* trackbar_type = "Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
const char* trackbar_value = "Value";
//![Threshold_Demo]
/**
* @function Threshold_Demo
*/
static void Threshold_Demo( int, void* )
{
/* 0: Binary
1: Binary Inverted
2: Threshold Truncated
3: Threshold to Zero
4: Threshold to Zero Inverted
*/
threshold( src_gray, dst, threshold_value, max_binary_value, threshold_type );
imshow( window_name, dst );
}
//![Threshold_Demo]
/**
* @function main
*/
int main( int argc, char** argv )
{
//! [load]
String imageName("stuff.jpg"); // by default
if (argc > 1)
{
imageName = argv[1];
}
src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
if (src.empty())
{
cout << "Cannot read the image: " << imageName << std::endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to Gray
//! [load]
//! [window]
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create a window to display results
//! [window]
//! [trackbar]
createTrackbar( trackbar_type,
window_name, &threshold_type,
max_type, Threshold_Demo ); // Create a Trackbar to choose type of Threshold
createTrackbar( trackbar_value,
window_name, &threshold_value,
max_value, Threshold_Demo ); // Create a Trackbar to choose Threshold value
//! [trackbar]
Threshold_Demo( 0, 0 ); // Call the function to initialize
/// Wait until the user finishes the program
waitKey();
return 0;
}

View File

@ -0,0 +1,105 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"
#include <iostream>
using namespace cv;
/** Global Variables */
const int max_value_H = 360/2;
const int max_value = 255;
const String window_capture_name = "Video Capture";
const String window_detection_name = "Object Detection";
int low_H = 0, low_S = 0, low_V = 0;
int high_H = max_value_H, high_S = max_value, high_V = max_value;
//! [low]
static void on_low_H_thresh_trackbar(int, void *)
{
low_H = min(high_H-1, low_H);
setTrackbarPos("Low H", window_detection_name, low_H);
}
//! [low]
//! [high]
static void on_high_H_thresh_trackbar(int, void *)
{
high_H = max(high_H, low_H+1);
setTrackbarPos("High H", window_detection_name, high_H);
}
//! [high]
static void on_low_S_thresh_trackbar(int, void *)
{
low_S = min(high_S-1, low_S);
setTrackbarPos("Low S", window_detection_name, low_S);
}
static void on_high_S_thresh_trackbar(int, void *)
{
high_S = max(high_S, low_S+1);
setTrackbarPos("High S", window_detection_name, high_S);
}
static void on_low_V_thresh_trackbar(int, void *)
{
low_V = min(high_V-1, low_V);
setTrackbarPos("Low V", window_detection_name, low_V);
}
static void on_high_V_thresh_trackbar(int, void *)
{
high_V = max(high_V, low_V+1);
setTrackbarPos("High V", window_detection_name, high_V);
}
int main(int argc, char* argv[])
{
//! [cap]
VideoCapture cap(argc > 1 ? atoi(argv[1]) : 0);
//! [cap]
//! [window]
namedWindow(window_capture_name);
namedWindow(window_detection_name);
//! [window]
//! [trackbar]
// Trackbars to set thresholds for HSV values
createTrackbar("Low H", window_detection_name, &low_H, max_value_H, on_low_H_thresh_trackbar);
createTrackbar("High H", window_detection_name, &high_H, max_value_H, on_high_H_thresh_trackbar);
createTrackbar("Low S", window_detection_name, &low_S, max_value, on_low_S_thresh_trackbar);
createTrackbar("High S", window_detection_name, &high_S, max_value, on_high_S_thresh_trackbar);
createTrackbar("Low V", window_detection_name, &low_V, max_value, on_low_V_thresh_trackbar);
createTrackbar("High V", window_detection_name, &high_V, max_value, on_high_V_thresh_trackbar);
//! [trackbar]
Mat frame, frame_HSV, frame_threshold;
while (true) {
//! [while]
cap >> frame;
if(frame.empty())
{
break;
}
// Convert from BGR to HSV colorspace
cvtColor(frame, frame_HSV, COLOR_BGR2HSV);
// Detect the object based on HSV Range Values
inRange(frame_HSV, Scalar(low_H, low_S, low_V), Scalar(high_H, high_S, high_V), frame_threshold);
//! [while]
//! [show]
// Show the frames
imshow(window_capture_name, frame);
imshow(window_detection_name, frame_threshold);
//! [show]
char key = (char) waitKey(30);
if (key == 'q' || key == 27)
{
break;
}
}
return 0;
}

View File

@ -0,0 +1,113 @@
/**
* @brief You will learn how to segment an anisotropic image with a single local orientation by a gradient structure tensor (GST)
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
*/
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
using namespace cv;
using namespace std;
//! [calcGST_proto]
void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut, int w);
//! [calcGST_proto]
int main()
{
int W = 52; // window size is WxW
double C_Thr = 0.43; // threshold for coherency
int LowThr = 35; // threshold1 for orientation, it ranges from 0 to 180
int HighThr = 57; // threshold2 for orientation, it ranges from 0 to 180
Mat imgIn = imread("input.jpg", IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
cout << "ERROR : Image cannot be loaded..!!" << endl;
return -1;
}
//! [main_extra]
//! [main]
Mat imgCoherency, imgOrientation;
calcGST(imgIn, imgCoherency, imgOrientation, W);
//! [thresholding]
Mat imgCoherencyBin;
imgCoherencyBin = imgCoherency > C_Thr;
Mat imgOrientationBin;
inRange(imgOrientation, Scalar(LowThr), Scalar(HighThr), imgOrientationBin);
//! [thresholding]
//! [combining]
Mat imgBin;
imgBin = imgCoherencyBin & imgOrientationBin;
//! [combining]
//! [main]
normalize(imgCoherency, imgCoherency, 0, 255, NORM_MINMAX);
normalize(imgOrientation, imgOrientation, 0, 255, NORM_MINMAX);
imwrite("result.jpg", 0.5*(imgIn + imgBin));
imwrite("Coherency.jpg", imgCoherency);
imwrite("Orientation.jpg", imgOrientation);
//! [main_extra]
return 0;
}
//! [calcGST]
//! [calcJ_header]
void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut, int w)
{
Mat img;
inputImg.convertTo(img, CV_32F);
// GST components calculation (start)
// J = (J11 J12; J12 J22) - GST
Mat imgDiffX, imgDiffY, imgDiffXY;
Sobel(img, imgDiffX, CV_32F, 1, 0, 3);
Sobel(img, imgDiffY, CV_32F, 0, 1, 3);
multiply(imgDiffX, imgDiffY, imgDiffXY);
//! [calcJ_header]
Mat imgDiffXX, imgDiffYY;
multiply(imgDiffX, imgDiffX, imgDiffXX);
multiply(imgDiffY, imgDiffY, imgDiffYY);
Mat J11, J22, J12; // J11, J22 and J12 are GST components
boxFilter(imgDiffXX, J11, CV_32F, Size(w, w));
boxFilter(imgDiffYY, J22, CV_32F, Size(w, w));
boxFilter(imgDiffXY, J12, CV_32F, Size(w, w));
// GST components calculation (stop)
// eigenvalue calculation (start)
// lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))
// lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))
Mat tmp1, tmp2, tmp3, tmp4;
tmp1 = J11 + J22;
tmp2 = J11 - J22;
multiply(tmp2, tmp2, tmp2);
multiply(J12, J12, tmp3);
sqrt(tmp2 + 4.0 * tmp3, tmp4);
Mat lambda1, lambda2;
lambda1 = tmp1 + tmp4;
lambda1 = 0.5*lambda1; // biggest eigenvalue
lambda2 = tmp1 - tmp4;
lambda2 = 0.5*lambda2; // smallest eigenvalue
// eigenvalue calculation (stop)
// Coherency calculation (start)
// Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism
// Coherency is anisotropy degree (consistency of local orientation)
divide(lambda1 - lambda2, lambda1 + lambda2, imgCoherencyOut);
// Coherency calculation (stop)
// orientation angle calculation (start)
// tan(2*Alpha) = 2*J12/(J22 - J11)
// Alpha = 0.5 atan2(2*J12/(J22 - J11))
phase(J22 - J11, 2.0*J12, imgOrientationOut, true);
imgOrientationOut = 0.5*imgOrientationOut;
// orientation angle calculation (stop)
}
//! [calcGST]

View File

@ -0,0 +1,185 @@
/**
* @file Drawing_1.cpp
* @brief Simple geometric drawing
* @author OpenCV team
*/
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#define w 400
using namespace cv;
/// Function headers
void MyEllipse( Mat img, double angle );
void MyFilledCircle( Mat img, Point center );
void MyPolygon( Mat img );
void MyLine( Mat img, Point start, Point end );
/**
* @function main
* @brief Main function
*/
int main( void ){
//![create_images]
/// Windows names
char atom_window[] = "Drawing 1: Atom";
char rook_window[] = "Drawing 2: Rook";
/// Create black empty images
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
//![create_images]
/// 1. Draw a simple atom:
/// -----------------------
//![draw_atom]
/// 1.a. Creating ellipses
MyEllipse( atom_image, 90 );
MyEllipse( atom_image, 0 );
MyEllipse( atom_image, 45 );
MyEllipse( atom_image, -45 );
/// 1.b. Creating circles
MyFilledCircle( atom_image, Point( w/2, w/2) );
//![draw_atom]
/// 2. Draw a rook
/// ------------------
//![draw_rook]
/// 2.a. Create a convex polygon
MyPolygon( rook_image );
//![rectangle]
/// 2.b. Creating rectangles
rectangle( rook_image,
Point( 0, 7*w/8 ),
Point( w, w),
Scalar( 0, 255, 255 ),
FILLED,
LINE_8 );
//![rectangle]
/// 2.c. Create a few lines
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
MyLine( rook_image, Point( 3*w/4, 7*w/8 ), Point( 3*w/4, w ) );
//![draw_rook]
/// 3. Display your stuff!
imshow( atom_window, atom_image );
moveWindow( atom_window, 0, 200 );
imshow( rook_window, rook_image );
moveWindow( rook_window, w, 200 );
waitKey( 0 );
return(0);
}
/// Function Declaration
/**
* @function MyEllipse
* @brief Draw a fixed-size ellipse with different angles
*/
//![my_ellipse]
void MyEllipse( Mat img, double angle )
{
int thickness = 2;
int lineType = 8;
ellipse( img,
Point( w/2, w/2 ),
Size( w/4, w/16 ),
angle,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
}
//![my_ellipse]
/**
* @function MyFilledCircle
* @brief Draw a fixed-size filled circle
*/
//![my_filled_circle]
void MyFilledCircle( Mat img, Point center )
{
circle( img,
center,
w/32,
Scalar( 0, 0, 255 ),
FILLED,
LINE_8 );
}
//![my_filled_circle]
/**
* @function MyPolygon
* @brief Draw a simple concave polygon (rook)
*/
//![my_polygon]
void MyPolygon( Mat img )
{
int lineType = LINE_8;
/** Create some points */
Point rook_points[1][20];
rook_points[0][0] = Point( w/4, 7*w/8 );
rook_points[0][1] = Point( 3*w/4, 7*w/8 );
rook_points[0][2] = Point( 3*w/4, 13*w/16 );
rook_points[0][3] = Point( 11*w/16, 13*w/16 );
rook_points[0][4] = Point( 19*w/32, 3*w/8 );
rook_points[0][5] = Point( 3*w/4, 3*w/8 );
rook_points[0][6] = Point( 3*w/4, w/8 );
rook_points[0][7] = Point( 26*w/40, w/8 );
rook_points[0][8] = Point( 26*w/40, w/4 );
rook_points[0][9] = Point( 22*w/40, w/4 );
rook_points[0][10] = Point( 22*w/40, w/8 );
rook_points[0][11] = Point( 18*w/40, w/8 );
rook_points[0][12] = Point( 18*w/40, w/4 );
rook_points[0][13] = Point( 14*w/40, w/4 );
rook_points[0][14] = Point( 14*w/40, w/8 );
rook_points[0][15] = Point( w/4, w/8 );
rook_points[0][16] = Point( w/4, 3*w/8 );
rook_points[0][17] = Point( 13*w/32, 3*w/8 );
rook_points[0][18] = Point( 5*w/16, 13*w/16 );
rook_points[0][19] = Point( w/4, 13*w/16 );
const Point* ppt[1] = { rook_points[0] };
int npt[] = { 20 };
fillPoly( img,
ppt,
npt,
1,
Scalar( 255, 255, 255 ),
lineType );
}
//![my_polygon]
/**
* @function MyLine
* @brief Draw a simple line
*/
//![my_line]
void MyLine( Mat img, Point start, Point end )
{
int thickness = 2;
int lineType = LINE_8;
line( img,
start,
end,
Scalar( 0, 0, 0 ),
thickness,
lineType );
}
//![my_line]

View File

@ -0,0 +1,326 @@
/**
* @file Drawing_2.cpp
* @brief Simple sample code
*/
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
/// Global Variables
const int NUMBER = 100;
const int DELAY = 5;
const int window_width = 900;
const int window_height = 600;
int x_1 = -window_width/2;
int x_2 = window_width*3/2;
int y_1 = -window_width/2;
int y_2 = window_width*3/2;
/// Function headers
static Scalar randomColor( RNG& rng );
int Drawing_Random_Lines( Mat image, char* window_name, RNG rng );
int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng );
int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng );
int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng );
int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng );
int Drawing_Random_Circles( Mat image, char* window_name, RNG rng );
int Displaying_Random_Text( Mat image, char* window_name, RNG rng );
int Displaying_Big_End( Mat image, char* window_name, RNG rng );
/**
* @function main
*/
int main( void )
{
int c;
/// Start creating a window
char window_name[] = "Drawing_2 Tutorial";
/// Also create a random object (RNG)
RNG rng( 0xFFFFFFFF );
/// Initialize a matrix filled with zeros
Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
/// Show it in a window during DELAY ms
imshow( window_name, image );
waitKey( DELAY );
/// Now, let's draw some lines
c = Drawing_Random_Lines(image, window_name, rng);
if( c != 0 ) return 0;
/// Go on drawing, this time nice rectangles
c = Drawing_Random_Rectangles(image, window_name, rng);
if( c != 0 ) return 0;
/// Draw some ellipses
c = Drawing_Random_Ellipses( image, window_name, rng );
if( c != 0 ) return 0;
/// Now some polylines
c = Drawing_Random_Polylines( image, window_name, rng );
if( c != 0 ) return 0;
/// Draw filled polygons
c = Drawing_Random_Filled_Polygons( image, window_name, rng );
if( c != 0 ) return 0;
/// Draw circles
c = Drawing_Random_Circles( image, window_name, rng );
if( c != 0 ) return 0;
/// Display text in random positions
c = Displaying_Random_Text( image, window_name, rng );
if( c != 0 ) return 0;
/// Displaying the big end!
c = Displaying_Big_End( image, window_name, rng );
if( c != 0 ) return 0;
waitKey(0);
return 0;
}
/// Function definitions
/**
* @function randomColor
* @brief Produces a random color given a random object
*/
static Scalar randomColor( RNG& rng )
{
int icolor = (unsigned) rng;
return Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 );
}
/**
* @function Drawing_Random_Lines
*/
int Drawing_Random_Lines( Mat image, char* window_name, RNG rng )
{
Point pt1, pt2;
for( int i = 0; i < NUMBER; i++ )
{
pt1.x = rng.uniform( x_1, x_2 );
pt1.y = rng.uniform( y_1, y_2 );
pt2.x = rng.uniform( x_1, x_2 );
pt2.y = rng.uniform( y_1, y_2 );
line( image, pt1, pt2, randomColor(rng), rng.uniform(1, 10), 8 );
imshow( window_name, image );
if( waitKey( DELAY ) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Drawing_Rectangles
*/
int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng )
{
Point pt1, pt2;
int lineType = 8;
int thickness = rng.uniform( -3, 10 );
for( int i = 0; i < NUMBER; i++ )
{
pt1.x = rng.uniform( x_1, x_2 );
pt1.y = rng.uniform( y_1, y_2 );
pt2.x = rng.uniform( x_1, x_2 );
pt2.y = rng.uniform( y_1, y_2 );
rectangle( image, pt1, pt2, randomColor(rng), MAX( thickness, -1 ), lineType );
imshow( window_name, image );
if( waitKey( DELAY ) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Drawing_Random_Ellipses
*/
int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
{
int lineType = 8;
for ( int i = 0; i < NUMBER; i++ )
{
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
Size axes;
axes.width = rng.uniform(0, 200);
axes.height = rng.uniform(0, 200);
double angle = rng.uniform(0, 180);
ellipse( image, center, axes, angle, angle - 100, angle + 200,
randomColor(rng), rng.uniform(-1,9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Drawing_Random_Polylines
*/
int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng )
{
int lineType = 8;
for( int i = 0; i< NUMBER; i++ )
{
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Drawing_Random_Filled_Polygons
*/
int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng )
{
int lineType = 8;
for ( int i = 0; i < NUMBER; i++ )
{
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
fillPoly( image, ppt, npt, 2, randomColor(rng), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Drawing_Random_Circles
*/
int Drawing_Random_Circles( Mat image, char* window_name, RNG rng )
{
int lineType = 8;
for (int i = 0; i < NUMBER; i++)
{
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
circle( image, center, rng.uniform(0, 300), randomColor(rng),
rng.uniform(-1, 9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Displaying_Random_Text
*/
int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
{
int lineType = 8;
for ( int i = 1; i < NUMBER; i++ )
{
Point org;
org.x = rng.uniform(x_1, x_2);
org.y = rng.uniform(y_1, y_2);
putText( image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
/**
* @function Displaying_Big_End
*/
int Displaying_Big_End( Mat image, char* window_name, RNG )
{
Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
Mat image2;
for( int i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}

View File

@ -0,0 +1,98 @@
#include <iostream>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cout;
using std::endl;
using namespace cv;
namespace
{
/** Global Variables */
int alpha = 100;
int beta = 100;
int gamma_cor = 100;
Mat img_original, img_corrected, img_gamma_corrected;
void basicLinearTransform(const Mat &img, const double alpha_, const int beta_)
{
Mat res;
img.convertTo(res, -1, alpha_, beta_);
hconcat(img, res, img_corrected);
imshow("Brightness and contrast adjustments", img_corrected);
}
void gammaCorrection(const Mat &img, const double gamma_)
{
CV_Assert(gamma_ >= 0);
//! [changing-contrast-brightness-gamma-correction]
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.ptr();
for( int i = 0; i < 256; ++i)
p[i] = saturate_cast<uchar>(pow(i / 255.0, gamma_) * 255.0);
Mat res = img.clone();
LUT(img, lookUpTable, res);
//! [changing-contrast-brightness-gamma-correction]
hconcat(img, res, img_gamma_corrected);
imshow("Gamma correction", img_gamma_corrected);
}
void on_linear_transform_alpha_trackbar(int, void *)
{
double alpha_value = alpha / 100.0;
int beta_value = beta - 100;
basicLinearTransform(img_original, alpha_value, beta_value);
}
void on_linear_transform_beta_trackbar(int, void *)
{
double alpha_value = alpha / 100.0;
int beta_value = beta - 100;
basicLinearTransform(img_original, alpha_value, beta_value);
}
void on_gamma_correction_trackbar(int, void *)
{
double gamma_value = gamma_cor / 100.0;
gammaCorrection(img_original, gamma_value);
}
}
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
img_original = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( img_original.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
img_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());
img_gamma_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());
hconcat(img_original, img_original, img_corrected);
hconcat(img_original, img_original, img_gamma_corrected);
namedWindow("Brightness and contrast adjustments");
namedWindow("Gamma correction");
createTrackbar("Alpha gain (contrast)", "Brightness and contrast adjustments", &alpha, 500, on_linear_transform_alpha_trackbar);
createTrackbar("Beta bias (brightness)", "Brightness and contrast adjustments", &beta, 200, on_linear_transform_beta_trackbar);
createTrackbar("Gamma correction", "Gamma correction", &gamma_cor, 200, on_gamma_correction_trackbar);
on_linear_transform_alpha_trackbar(0, 0);
on_gamma_correction_trackbar(0, 0);
waitKey();
imwrite("linear_transform_correction.png", img_corrected);
imwrite("gamma_correction.png", img_gamma_corrected);
return 0;
}

View File

@ -0,0 +1,139 @@
/**
* @file Morphology_3(Extract_Lines).cpp
* @brief Use morphology transformations for extracting horizontal and vertical lines sample code
* @author OpenCV team
*/
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
void show_wait_destroy(const char* winname, cv::Mat img);
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
//! [load_image]
CommandLineParser parser(argc, argv, "{@input | notes.png | input image}");
Mat src = imread( samples::findFile( parser.get<String>("@input") ), IMREAD_COLOR);
if (src.empty())
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
// Show source image
imshow("src", src);
//! [load_image]
//! [gray]
// Transform source image to gray if it is not already
Mat gray;
if (src.channels() == 3)
{
cvtColor(src, gray, COLOR_BGR2GRAY);
}
else
{
gray = src;
}
// Show gray image
show_wait_destroy("gray", gray);
//! [gray]
//! [bin]
// Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
Mat bw;
adaptiveThreshold(~gray, bw, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, -2);
// Show binary image
show_wait_destroy("binary", bw);
//! [bin]
//! [init]
// Create the images that will use to extract the horizontal and vertical lines
Mat horizontal = bw.clone();
Mat vertical = bw.clone();
//! [init]
//! [horiz]
// Specify size on horizontal axis
int horizontal_size = horizontal.cols / 30;
// Create structure element for extracting horizontal lines through morphology operations
Mat horizontalStructure = getStructuringElement(MORPH_RECT, Size(horizontal_size, 1));
// Apply morphology operations
erode(horizontal, horizontal, horizontalStructure, Point(-1, -1));
dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1));
// Show extracted horizontal lines
show_wait_destroy("horizontal", horizontal);
//! [horiz]
//! [vert]
// Specify size on vertical axis
int vertical_size = vertical.rows / 30;
// Create structure element for extracting vertical lines through morphology operations
Mat verticalStructure = getStructuringElement(MORPH_RECT, Size(1, vertical_size));
// Apply morphology operations
erode(vertical, vertical, verticalStructure, Point(-1, -1));
dilate(vertical, vertical, verticalStructure, Point(-1, -1));
// Show extracted vertical lines
show_wait_destroy("vertical", vertical);
//! [vert]
//! [smooth]
// Inverse vertical image
bitwise_not(vertical, vertical);
show_wait_destroy("vertical_bit", vertical);
// Extract edges and smooth image according to the logic
// 1. extract edges
// 2. dilate(edges)
// 3. src.copyTo(smooth)
// 4. blur smooth img
// 5. smooth.copyTo(src, edges)
// Step 1
Mat edges;
adaptiveThreshold(vertical, edges, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, -2);
show_wait_destroy("edges", edges);
// Step 2
Mat kernel = Mat::ones(2, 2, CV_8UC1);
dilate(edges, edges, kernel);
show_wait_destroy("dilate", edges);
// Step 3
Mat smooth;
vertical.copyTo(smooth);
// Step 4
blur(smooth, smooth, Size(2, 2));
// Step 5
smooth.copyTo(vertical, edges);
// Show final result
show_wait_destroy("smooth - final", vertical);
//! [smooth]
return 0;
}
void show_wait_destroy(const char* winname, cv::Mat img) {
imshow(winname, img);
moveWindow(winname, 500, 0);
waitKey(0);
destroyWindow(winname);
}

View File

@ -0,0 +1,184 @@
/**
* @brief You will learn how to recover an image with motion blur distortion using a Wiener filter
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
*/
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
using namespace cv;
using namespace std;
void help();
void calcPSF(Mat& outputImg, Size filterSize, int len, double theta);
void fftshift(const Mat& inputImg, Mat& outputImg);
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr);
void edgetaper(const Mat& inputImg, Mat& outputImg, double gamma = 5.0, double beta = 0.2);
const String keys =
"{help h usage ? | | print this message }"
"{image |input.png | input image name }"
"{LEN |125 | length of a motion }"
"{THETA |0 | angle of a motion in degrees }"
"{SNR |700 | signal to noise ratio }"
;
int main(int argc, char *argv[])
{
help();
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
int LEN = parser.get<int>("LEN");
double THETA = parser.get<double>("THETA");
int snr = parser.get<int>("SNR");
string strInFileName = parser.get<String>("image");
if (!parser.check())
{
parser.printErrors();
return 0;
}
Mat imgIn;
imgIn = imread(strInFileName, IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
cout << "ERROR : Image cannot be loaded..!!" << endl;
return -1;
}
Mat imgOut;
//! [main]
// it needs to process even image only
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
//Hw calculation (start)
Mat Hw, h;
calcPSF(h, roi.size(), LEN, THETA);
calcWnrFilter(h, Hw, 1.0 / double(snr));
//Hw calculation (stop)
imgIn.convertTo(imgIn, CV_32F);
edgetaper(imgIn, imgIn);
// filtering (start)
filter2DFreq(imgIn(roi), imgOut, Hw);
// filtering (stop)
//! [main]
imgOut.convertTo(imgOut, CV_8U);
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
imwrite("result.jpg", imgOut);
return 0;
}
void help()
{
cout << "2018-08-14" << endl;
cout << "Motion_deblur_v2" << endl;
cout << "You will learn how to recover an image with motion blur distortion using a Wiener filter" << endl;
}
//! [calcPSF]
void calcPSF(Mat& outputImg, Size filterSize, int len, double theta)
{
Mat h(filterSize, CV_32F, Scalar(0));
Point point(filterSize.width / 2, filterSize.height / 2);
ellipse(h, point, Size(0, cvRound(float(len) / 2.0)), 90.0 - theta, 0, 360, Scalar(255), FILLED);
Scalar summa = sum(h);
outputImg = h / summa[0];
}
//! [calcPSF]
//! [fftshift]
void fftshift(const Mat& inputImg, Mat& outputImg)
{
outputImg = inputImg.clone();
int cx = outputImg.cols / 2;
int cy = outputImg.rows / 2;
Mat q0(outputImg, Rect(0, 0, cx, cy));
Mat q1(outputImg, Rect(cx, 0, cx, cy));
Mat q2(outputImg, Rect(0, cy, cx, cy));
Mat q3(outputImg, Rect(cx, cy, cx, cy));
Mat tmp;
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
//! [fftshift]
//! [filter2DFreq]
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
{
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_SCALE);
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
Mat complexH;
merge(planesH, 2, complexH);
Mat complexIH;
mulSpectrums(complexI, complexH, complexIH, 0);
idft(complexIH, complexIH);
split(complexIH, planes);
outputImg = planes[0];
}
//! [filter2DFreq]
//! [calcWnrFilter]
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr)
{
Mat h_PSF_shifted;
fftshift(input_h_PSF, h_PSF_shifted);
Mat planes[2] = { Mat_<float>(h_PSF_shifted.clone()), Mat::zeros(h_PSF_shifted.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI);
split(complexI, planes);
Mat denom;
pow(abs(planes[0]), 2, denom);
denom += nsr;
divide(planes[0], denom, output_G);
}
//! [calcWnrFilter]
//! [edgetaper]
void edgetaper(const Mat& inputImg, Mat& outputImg, double gamma, double beta)
{
int Nx = inputImg.cols;
int Ny = inputImg.rows;
Mat w1(1, Nx, CV_32F, Scalar(0));
Mat w2(Ny, 1, CV_32F, Scalar(0));
float* p1 = w1.ptr<float>(0);
float* p2 = w2.ptr<float>(0);
float dx = float(2.0 * CV_PI / Nx);
float x = float(-CV_PI);
for (int i = 0; i < Nx; i++)
{
p1[i] = float(0.5 * (tanh((x + gamma / 2) / beta) - tanh((x - gamma / 2) / beta)));
x += dx;
}
float dy = float(2.0 * CV_PI / Ny);
float y = float(-CV_PI);
for (int i = 0; i < Ny; i++)
{
p2[i] = float(0.5 * (tanh((y + gamma / 2) / beta) - tanh((y - gamma / 2) / beta)));
y += dy;
}
Mat w = w2 * w1;
multiply(inputImg, w, outputImg);
}
//! [edgetaper]

View File

@ -0,0 +1,149 @@
/**
* @brief You will learn how to recover an out-of-focus image by Wiener filter
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
*/
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
using namespace cv;
using namespace std;
void help();
void calcPSF(Mat& outputImg, Size filterSize, int R);
void fftshift(const Mat& inputImg, Mat& outputImg);
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr);
const String keys =
"{help h usage ? | | print this message }"
"{image |original.JPG | input image name }"
"{R |53 | radius }"
"{SNR |5200 | signal to noise ratio}"
;
int main(int argc, char *argv[])
{
help();
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
int R = parser.get<int>("R");
int snr = parser.get<int>("SNR");
string strInFileName = parser.get<String>("image");
if (!parser.check())
{
parser.printErrors();
return 0;
}
Mat imgIn;
imgIn = imread(strInFileName, IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
cout << "ERROR : Image cannot be loaded..!!" << endl;
return -1;
}
Mat imgOut;
//! [main]
// it needs to process even image only
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
//Hw calculation (start)
Mat Hw, h;
calcPSF(h, roi.size(), R);
calcWnrFilter(h, Hw, 1.0 / double(snr));
//Hw calculation (stop)
// filtering (start)
filter2DFreq(imgIn(roi), imgOut, Hw);
// filtering (stop)
//! [main]
imgOut.convertTo(imgOut, CV_8U);
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
imwrite("result.jpg", imgOut);
return 0;
}
void help()
{
cout << "2018-07-12" << endl;
cout << "DeBlur_v8" << endl;
cout << "You will learn how to recover an out-of-focus image by Wiener filter" << endl;
}
//! [calcPSF]
void calcPSF(Mat& outputImg, Size filterSize, int R)
{
Mat h(filterSize, CV_32F, Scalar(0));
Point point(filterSize.width / 2, filterSize.height / 2);
circle(h, point, R, 255, -1, 8);
Scalar summa = sum(h);
outputImg = h / summa[0];
}
//! [calcPSF]
//! [fftshift]
void fftshift(const Mat& inputImg, Mat& outputImg)
{
outputImg = inputImg.clone();
int cx = outputImg.cols / 2;
int cy = outputImg.rows / 2;
Mat q0(outputImg, Rect(0, 0, cx, cy));
Mat q1(outputImg, Rect(cx, 0, cx, cy));
Mat q2(outputImg, Rect(0, cy, cx, cy));
Mat q3(outputImg, Rect(cx, cy, cx, cy));
Mat tmp;
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
//! [fftshift]
//! [filter2DFreq]
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
{
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_SCALE);
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
Mat complexH;
merge(planesH, 2, complexH);
Mat complexIH;
mulSpectrums(complexI, complexH, complexIH, 0);
idft(complexIH, complexIH);
split(complexIH, planes);
outputImg = planes[0];
}
//! [filter2DFreq]
//! [calcWnrFilter]
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr)
{
Mat h_PSF_shifted;
fftshift(input_h_PSF, h_PSF_shifted);
Mat planes[2] = { Mat_<float>(h_PSF_shifted.clone()), Mat::zeros(h_PSF_shifted.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI);
split(complexI, planes);
Mat denom;
pow(abs(planes[0]), 2, denom);
denom += nsr;
divide(planes[0], denom, output_G);
}
//! [calcWnrFilter]

View File

@ -0,0 +1,148 @@
/**
* @brief You will learn how to remove periodic noise in the Fourier domain
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
*/
#include <iostream>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
void fftshift(const Mat& inputImg, Mat& outputImg);
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
void synthesizeFilterH(Mat& inputOutput_H, Point center, int radius);
void calcPSD(const Mat& inputImg, Mat& outputImg, int flag = 0);
int main()
{
Mat imgIn = imread("input.jpg", IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
cout << "ERROR : Image cannot be loaded..!!" << endl;
return -1;
}
imgIn.convertTo(imgIn, CV_32F);
//! [main]
// it needs to process even image only
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
imgIn = imgIn(roi);
// PSD calculation (start)
Mat imgPSD;
calcPSD(imgIn, imgPSD);
fftshift(imgPSD, imgPSD);
normalize(imgPSD, imgPSD, 0, 255, NORM_MINMAX);
// PSD calculation (stop)
//H calculation (start)
Mat H = Mat(roi.size(), CV_32F, Scalar(1));
const int r = 21;
synthesizeFilterH(H, Point(705, 458), r);
synthesizeFilterH(H, Point(850, 391), r);
synthesizeFilterH(H, Point(993, 325), r);
//H calculation (stop)
// filtering (start)
Mat imgOut;
fftshift(H, H);
filter2DFreq(imgIn, imgOut, H);
// filtering (stop)
//! [main]
imgOut.convertTo(imgOut, CV_8U);
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
imwrite("result.jpg", imgOut);
imwrite("PSD.jpg", imgPSD);
fftshift(H, H);
normalize(H, H, 0, 255, NORM_MINMAX);
imwrite("filter.jpg", H);
return 0;
}
//! [fftshift]
void fftshift(const Mat& inputImg, Mat& outputImg)
{
outputImg = inputImg.clone();
int cx = outputImg.cols / 2;
int cy = outputImg.rows / 2;
Mat q0(outputImg, Rect(0, 0, cx, cy));
Mat q1(outputImg, Rect(cx, 0, cx, cy));
Mat q2(outputImg, Rect(0, cy, cx, cy));
Mat q3(outputImg, Rect(cx, cy, cx, cy));
Mat tmp;
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
//! [fftshift]
//! [filter2DFreq]
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
{
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI, DFT_SCALE);
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
Mat complexH;
merge(planesH, 2, complexH);
Mat complexIH;
mulSpectrums(complexI, complexH, complexIH, 0);
idft(complexIH, complexIH);
split(complexIH, planes);
outputImg = planes[0];
}
//! [filter2DFreq]
//! [synthesizeFilterH]
void synthesizeFilterH(Mat& inputOutput_H, Point center, int radius)
{
Point c2 = center, c3 = center, c4 = center;
c2.y = inputOutput_H.rows - center.y;
c3.x = inputOutput_H.cols - center.x;
c4 = Point(c3.x,c2.y);
circle(inputOutput_H, center, radius, 0, -1, 8);
circle(inputOutput_H, c2, radius, 0, -1, 8);
circle(inputOutput_H, c3, radius, 0, -1, 8);
circle(inputOutput_H, c4, radius, 0, -1, 8);
}
//! [synthesizeFilterH]
// Function calculates PSD(Power spectrum density) by fft with two flags
// flag = 0 means to return PSD
// flag = 1 means to return log(PSD)
//! [calcPSD]
void calcPSD(const Mat& inputImg, Mat& outputImg, int flag)
{
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI);
split(complexI, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
planes[0].at<float>(0) = 0;
planes[1].at<float>(0) = 0;
// compute the PSD = sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)^2
Mat imgPSD;
magnitude(planes[0], planes[1], imgPSD); //imgPSD = sqrt(Power spectrum density)
pow(imgPSD, 2, imgPSD); //it needs ^2 in order to get PSD
outputImg = imgPSD;
// logPSD = log(1 + PSD)
if (flag)
{
Mat imglogPSD;
imglogPSD = imgPSD + Scalar::all(1);
log(imglogPSD, imglogPSD);
outputImg = imglogPSD;
}
}
//! [calcPSD]

View File

@ -0,0 +1,97 @@
/**
* @file CannyDetector_Demo.cpp
* @brief Sample code showing how to detect edges using the Canny Detector
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
//![variables]
Mat src, src_gray;
Mat dst, detected_edges;
int lowThreshold = 0;
const int max_lowThreshold = 100;
const int ratio = 3;
const int kernel_size = 3;
const char* window_name = "Edge Map";
//![variables]
/**
* @function CannyThreshold
* @brief Trackbar callback - Canny thresholds input with a ratio 1:3
*/
static void CannyThreshold(int, void*)
{
//![reduce_noise]
/// Reduce noise with a kernel 3x3
blur( src_gray, detected_edges, Size(3,3) );
//![reduce_noise]
//![canny]
/// Canny detector
Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size );
//![canny]
/// Using Canny's output as a mask, we display our result
//![fill]
dst = Scalar::all(0);
//![fill]
//![copyto]
src.copyTo( dst, detected_edges);
//![copyto]
//![display]
imshow( window_name, dst );
//![display]
}
/**
* @function main
*/
int main( int argc, char** argv )
{
//![load]
CommandLineParser parser( argc, argv, "{@input | fruits.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR ); // Load an image
if( src.empty() )
{
std::cout << "Could not open or find the image!\n" << std::endl;
std::cout << "Usage: " << argv[0] << " <Input image>" << std::endl;
return -1;
}
//![load]
//![create_mat]
/// Create a matrix of the same type and size as src (for dst)
dst.create( src.size(), src.type() );
//![create_mat]
//![convert_to_gray]
cvtColor( src, src_gray, COLOR_BGR2GRAY );
//![convert_to_gray]
//![create_window]
namedWindow( window_name, WINDOW_AUTOSIZE );
//![create_window]
//![create_trackbar]
/// Create a Trackbar for user to enter threshold
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
//![create_trackbar]
/// Show the image
CannyThreshold(0, 0);
/// Wait until user exit program by pressing a key
waitKey(0);
return 0;
}

View File

@ -0,0 +1,82 @@
/**
* @function Geometric_Transforms_Demo.cpp
* @brief Demo code for Geometric Transforms
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/**
* @function main
*/
int main( int argc, char** argv )
{
//! [Load the image]
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//! [Load the image]
//! [Set your 3 points to calculate the Affine Transform]
Point2f srcTri[3];
srcTri[0] = Point2f( 0.f, 0.f );
srcTri[1] = Point2f( src.cols - 1.f, 0.f );
srcTri[2] = Point2f( 0.f, src.rows - 1.f );
Point2f dstTri[3];
dstTri[0] = Point2f( 0.f, src.rows*0.33f );
dstTri[1] = Point2f( src.cols*0.85f, src.rows*0.25f );
dstTri[2] = Point2f( src.cols*0.15f, src.rows*0.7f );
//! [Set your 3 points to calculate the Affine Transform]
//! [Get the Affine Transform]
Mat warp_mat = getAffineTransform( srcTri, dstTri );
//! [Get the Affine Transform]
//! [Apply the Affine Transform just found to the src image]
/// Set the dst image the same type and size as src
Mat warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
warpAffine( src, warp_dst, warp_mat, warp_dst.size() );
//! [Apply the Affine Transform just found to the src image]
/** Rotating the image after Warp */
//! [Compute a rotation matrix with respect to the center of the image]
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6;
//! [Compute a rotation matrix with respect to the center of the image]
//! [Get the rotation matrix with the specifications above]
Mat rot_mat = getRotationMatrix2D( center, angle, scale );
//! [Get the rotation matrix with the specifications above]
//! [Rotate the warped image]
Mat warp_rotate_dst;
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
//! [Rotate the warped image]
//! [Show what you got]
imshow( "Source image", src );
imshow( "Warp", warp_dst );
imshow( "Warp + Rotate", warp_rotate_dst );
//! [Show what you got]
//! [Wait until user exits the program]
waitKey();
//! [Wait until user exits the program]
return 0;
}

View File

@ -0,0 +1,106 @@
/**
* @file HoughCircle_Demo.cpp
* @brief Demo code for Hough Transform
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
namespace
{
// windows and trackbars name
const std::string windowName = "Hough Circle Detection Demo";
const std::string cannyThresholdTrackbarName = "Canny threshold";
const std::string accumulatorThresholdTrackbarName = "Accumulator Threshold";
// initial and max values of the parameters of interests.
const int cannyThresholdInitialValue = 100;
const int accumulatorThresholdInitialValue = 50;
const int maxAccumulatorThreshold = 200;
const int maxCannyThreshold = 255;
void HoughDetection(const Mat& src_gray, const Mat& src_display, int cannyThreshold, int accumulatorThreshold)
{
// will hold the results of the detection
std::vector<Vec3f> circles;
// runs the actual detection
HoughCircles( src_gray, circles, HOUGH_GRADIENT, 1, src_gray.rows/8, cannyThreshold, accumulatorThreshold, 0, 0 );
// clone the colour, input image for displaying purposes
Mat display = src_display.clone();
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle( display, center, 3, Scalar(0,255,0), -1, 8, 0 );
// circle outline
circle( display, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
// shows the results
imshow( windowName, display);
}
}
int main(int argc, char** argv)
{
Mat src, src_gray;
// Read the image
String imageName("stuff.jpg"); // by default
if (argc > 1)
{
imageName = argv[1];
}
src = imread( samples::findFile( imageName ), IMREAD_COLOR );
if( src.empty() )
{
std::cerr << "Invalid input image\n";
std::cout << "Usage : " << argv[0] << " <path_to_input_image>\n";;
return -1;
}
// Convert it to gray
cvtColor( src, src_gray, COLOR_BGR2GRAY );
// Reduce the noise so we avoid false circle detection
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
//declare and initialize both parameters that are subjects to change
int cannyThreshold = cannyThresholdInitialValue;
int accumulatorThreshold = accumulatorThresholdInitialValue;
// create the main window, and attach the trackbars
namedWindow( windowName, WINDOW_AUTOSIZE );
createTrackbar(cannyThresholdTrackbarName, windowName, &cannyThreshold,maxCannyThreshold);
createTrackbar(accumulatorThresholdTrackbarName, windowName, &accumulatorThreshold, maxAccumulatorThreshold);
// infinite loop to display
// and refresh the content of the output image
// until the user presses q or Q
char key = 0;
while(key != 'q' && key != 'Q')
{
// those parameters cannot be =0
// so we must check here
cannyThreshold = std::max(cannyThreshold, 1);
accumulatorThreshold = std::max(accumulatorThreshold, 1);
//runs the detection, and update the display
HoughDetection(src_gray, src, cannyThreshold, accumulatorThreshold);
// get user key
key = (char)waitKey(10);
}
return 0;
}

View File

@ -0,0 +1,133 @@
/**
* @file HoughLines_Demo.cpp
* @brief Demo code for Hough Transform
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
/** General variables */
Mat src, edges;
Mat src_gray;
Mat standard_hough, probabilistic_hough;
int min_threshold = 50;
int max_trackbar = 150;
const char* standard_name = "Standard Hough Lines Demo";
const char* probabilistic_name = "Probabilistic Hough Lines Demo";
int s_trackbar = max_trackbar;
int p_trackbar = max_trackbar;
/// Function Headers
void help();
void Standard_Hough( int, void* );
void Probabilistic_Hough( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
// Read the image
String imageName("building.jpg"); // by default
if (argc > 1)
{
imageName = argv[1];
}
src = imread( samples::findFile( imageName ), IMREAD_COLOR );
if( src.empty() )
{ help();
return -1;
}
/// Pass the image to gray
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Apply Canny edge detector
Canny( src_gray, edges, 50, 200, 3 );
/// Create Trackbars for Thresholds
char thresh_label[50];
sprintf( thresh_label, "Thres: %d + input", min_threshold );
namedWindow( standard_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough);
namedWindow( probabilistic_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough);
/// Initialize
Standard_Hough(0, 0);
Probabilistic_Hough(0, 0);
waitKey(0);
return 0;
}
/**
* @function help
* @brief Indications of how to run this program and why is it for
*/
void help()
{
printf("\t Hough Transform to detect lines \n ");
printf("\t---------------------------------\n ");
printf(" Usage: ./HoughLines_Demo <image_name> \n");
}
/**
* @function Standard_Hough
*/
void Standard_Hough( int, void* )
{
vector<Vec2f> s_lines;
cvtColor( edges, standard_hough, COLOR_GRAY2BGR );
/// 1. Use Standard Hough Transform
HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold + s_trackbar, 0, 0 );
/// Show the result
for( size_t i = 0; i < s_lines.size(); i++ )
{
float r = s_lines[i][0], t = s_lines[i][1];
double cos_t = cos(t), sin_t = sin(t);
double x0 = r*cos_t, y0 = r*sin_t;
double alpha = 1000;
Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) );
Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) );
line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, LINE_AA);
}
imshow( standard_name, standard_hough );
}
/**
* @function Probabilistic_Hough
*/
void Probabilistic_Hough( int, void* )
{
vector<Vec4i> p_lines;
cvtColor( edges, probabilistic_hough, COLOR_GRAY2BGR );
/// 2. Use Probabilistic Hough Transform
HoughLinesP( edges, p_lines, 1, CV_PI/180, min_threshold + p_trackbar, 30, 10 );
/// Show the result
for( size_t i = 0; i < p_lines.size(); i++ )
{
Vec4i l = p_lines[i];
line( probabilistic_hough, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0,0), 3, LINE_AA);
}
imshow( probabilistic_name, probabilistic_hough );
}

View File

@ -0,0 +1,67 @@
/**
* @file Laplace_Demo.cpp
* @brief Sample code showing how to detect edges using the Laplace operator
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
/**
* @function main
*/
int main( int argc, char** argv )
{
//![variables]
// Declare the variables we are going to use
Mat src, src_gray, dst;
int kernel_size = 3;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
const char* window_name = "Laplace Demo";
//![variables]
//![load]
const char* imageName = argc >=2 ? argv[1] : "lena.jpg";
src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
// Check if image is loaded fine
if(src.empty()){
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default lena.jpg] \n");
return -1;
}
//![load]
//![reduce_noise]
// Reduce noise by blurring with a Gaussian filter ( kernel size = 3 )
GaussianBlur( src, src, Size(3, 3), 0, 0, BORDER_DEFAULT );
//![reduce_noise]
//![convert_to_gray]
cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to grayscale
//![convert_to_gray]
/// Apply Laplace function
Mat abs_dst;
//![laplacian]
Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT );
//![laplacian]
//![convert]
// converting back to CV_8U
convertScaleAbs( dst, abs_dst );
//![convert]
//![display]
imshow( window_name, abs_dst );
waitKey(0);
//![display]
return 0;
}

View File

@ -0,0 +1,114 @@
/**
* @function Remap_Demo.cpp
* @brief Demo code for Remap
* @author Ana Huaman
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
/// Function Headers
void update_map( int &ind, Mat &map_x, Mat &map_y );
/**
* @function main
*/
int main(int argc, const char** argv)
{
CommandLineParser parser(argc, argv, "{@image |chicky_512.png|input image name}");
std::string filename = parser.get<std::string>(0);
//! [Load]
/// Load the image
Mat src = imread( samples::findFile( filename ), IMREAD_COLOR );
if (src.empty())
{
std::cout << "Cannot read image: " << filename << std::endl;
return -1;
}
//! [Load]
//! [Create]
/// Create dst, map_x and map_y with the same size as src:
Mat dst(src.size(), src.type());
Mat map_x(src.size(), CV_32FC1);
Mat map_y(src.size(), CV_32FC1);
//! [Create]
//! [Window]
/// Create window
const char* remap_window = "Remap demo";
namedWindow( remap_window, WINDOW_AUTOSIZE );
//! [Window]
//! [Loop]
/// Index to switch between the remap modes
int ind = 0;
for(;;)
{
/// Update map_x & map_y. Then apply remap
update_map(ind, map_x, map_y);
remap( src, dst, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0) );
/// Display results
imshow( remap_window, dst );
/// Each 1 sec. Press ESC to exit the program
char c = (char)waitKey( 1000 );
if( c == 27 )
{
break;
}
}
//! [Loop]
return 0;
}
/**
* @function update_map
* @brief Fill the map_x and map_y matrices with 4 types of mappings
*/
//! [Update]
void update_map( int &ind, Mat &map_x, Mat &map_y )
{
for( int i = 0; i < map_x.rows; i++ )
{
for( int j = 0; j < map_x.cols; j++ )
{
switch( ind )
{
case 0:
if( j > map_x.cols*0.25 && j < map_x.cols*0.75 && i > map_x.rows*0.25 && i < map_x.rows*0.75 )
{
map_x.at<float>(i, j) = 2*( j - map_x.cols*0.25f ) + 0.5f;
map_y.at<float>(i, j) = 2*( i - map_x.rows*0.25f ) + 0.5f;
}
else
{
map_x.at<float>(i, j) = 0;
map_y.at<float>(i, j) = 0;
}
break;
case 1:
map_x.at<float>(i, j) = (float)j;
map_y.at<float>(i, j) = (float)(map_x.rows - i);
break;
case 2:
map_x.at<float>(i, j) = (float)(map_x.cols - j);
map_y.at<float>(i, j) = (float)i;
break;
case 3:
map_x.at<float>(i, j) = (float)(map_x.cols - j);
map_y.at<float>(i, j) = (float)(map_x.rows - i);
break;
default:
break;
} // end of switch
}
}
ind = (ind+1) % 4;
}
//! [Update]

View File

@ -0,0 +1,124 @@
/**
* @file Sobel_Demo.cpp
* @brief Sample code uses Sobel or Scharr OpenCV functions for edge detection
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/**
* @function main
*/
int main( int argc, char** argv )
{
cv::CommandLineParser parser(argc, argv,
"{@input |lena.jpg|input image}"
"{ksize k|1|ksize (hit 'K' to increase its value at run time)}"
"{scale s|1|scale (hit 'S' to increase its value at run time)}"
"{delta d|0|delta (hit 'D' to increase its value at run time)}"
"{help h|false|show help message}");
cout << "The sample uses Sobel or Scharr OpenCV functions for edge detection\n\n";
parser.printMessage();
cout << "\nPress 'ESC' to exit program.\nPress 'R' to reset values ( ksize will be -1 equal to Scharr function )";
//![variables]
// First we declare the variables we are going to use
Mat image,src, src_gray;
Mat grad;
const String window_name = "Sobel Demo - Simple Edge Detector";
int ksize = parser.get<int>("ksize");
int scale = parser.get<int>("scale");
int delta = parser.get<int>("delta");
int ddepth = CV_16S;
//![variables]
//![load]
String imageName = parser.get<String>("@input");
// As usual we load our source image (src)
image = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
// Check if image is loaded fine
if( image.empty() )
{
printf("Error opening image: %s\n", imageName.c_str());
return EXIT_FAILURE;
}
//![load]
for (;;)
{
//![reduce_noise]
// Remove noise by blurring with a Gaussian filter ( kernel size = 3 )
GaussianBlur(image, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
//![reduce_noise]
//![convert_to_gray]
// Convert the image to grayscale
cvtColor(src, src_gray, COLOR_BGR2GRAY);
//![convert_to_gray]
//![sobel]
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
/// Gradient X
Sobel(src_gray, grad_x, ddepth, 1, 0, ksize, scale, delta, BORDER_DEFAULT);
/// Gradient Y
Sobel(src_gray, grad_y, ddepth, 0, 1, ksize, scale, delta, BORDER_DEFAULT);
//![sobel]
//![convert]
// converting back to CV_8U
convertScaleAbs(grad_x, abs_grad_x);
convertScaleAbs(grad_y, abs_grad_y);
//![convert]
//![blend]
/// Total Gradient (approximate)
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
//![blend]
//![display]
imshow(window_name, grad);
char key = (char)waitKey(0);
//![display]
if(key == 27)
{
return EXIT_SUCCESS;
}
if (key == 'k' || key == 'K')
{
ksize = ksize < 30 ? ksize+2 : -1;
}
if (key == 's' || key == 'S')
{
scale++;
}
if (key == 'd' || key == 'D')
{
delta++;
}
if (key == 'r' || key == 'R')
{
scale = 1;
ksize = -1;
delta = 0;
}
}
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,84 @@
/**
* @file copyMakeBorder_demo.cpp
* @brief Sample code that shows the functionality of copyMakeBorder
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
//![variables]
// Declare the variables
Mat src, dst;
int top, bottom, left, right;
int borderType = BORDER_CONSTANT;
const char* window_name = "copyMakeBorder Demo";
RNG rng(12345);
//![variables]
/**
* @function main
*/
int main( int argc, char** argv )
{
//![load]
const char* imageName = argc >=2 ? argv[1] : "lena.jpg";
// Loads an image
src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
// Check if image is loaded fine
if( src.empty()) {
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default lena.jpg] \n");
return -1;
}
//![load]
// Brief how-to for this program
printf( "\n \t copyMakeBorder Demo: \n" );
printf( "\t -------------------- \n" );
printf( " ** Press 'c' to set the border to a random constant value \n");
printf( " ** Press 'r' to set the border to be replicated \n");
printf( " ** Press 'ESC' to exit the program \n");
//![create_window]
namedWindow( window_name, WINDOW_AUTOSIZE );
//![create_window]
//![init_arguments]
// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = top;
left = (int) (0.05*src.cols); right = left;
//![init_arguments]
for(;;)
{
//![update_value]
Scalar value( rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255) );
//![update_value]
//![copymakeborder]
copyMakeBorder( src, dst, top, bottom, left, right, borderType, value );
//![copymakeborder]
//![display]
imshow( window_name, dst );
//![display]
//![check_keypress]
char c = (char)waitKey(500);
if( c == 27 )
{ break; }
else if( c == 'c' )
{ borderType = BORDER_CONSTANT; }
else if( c == 'r' )
{ borderType = BORDER_REPLICATE; }
//![check_keypress]
}
return 0;
}

View File

@ -0,0 +1,74 @@
/**
* @file filter2D_demo.cpp
* @brief Sample code that shows how to implement your own linear filters by using filter2D function
* @author OpenCV team
*/
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
/**
* @function main
*/
int main ( int argc, char** argv )
{
// Declare variables
Mat src, dst;
Mat kernel;
Point anchor;
double delta;
int ddepth;
int kernel_size;
const char* window_name = "filter2D Demo";
//![load]
const char* imageName = argc >=2 ? argv[1] : "lena.jpg";
// Loads an image
src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
if( src.empty() )
{
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default lena.jpg] \n");
return EXIT_FAILURE;
}
//![load]
//![init_arguments]
// Initialize arguments for the filter
anchor = Point( -1, -1 );
delta = 0;
ddepth = -1;
//![init_arguments]
// Loop - Will filter the image with different kernel sizes each 0.5 seconds
int ind = 0;
for(;;)
{
//![update_kernel]
// Update kernel size for a normalized box filter
kernel_size = 3 + 2*( ind%5 );
kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size);
//![update_kernel]
//![apply_filter]
// Apply filter
filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT );
//![apply_filter]
imshow( window_name, dst );
char c = (char)waitKey(500);
// Press 'ESC' to exit the program
if( c == 27 )
{ break; }
ind++;
}
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,65 @@
/**
* @file houghcircles.cpp
* @brief This program demonstrates circle finding with the Hough transform
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
//![load]
const char* filename = argc >=2 ? argv[1] : "smarties.png";
// Loads an image
Mat src = imread( samples::findFile( filename ), IMREAD_COLOR );
// Check if image is loaded fine
if(src.empty()){
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default %s] \n", filename);
return EXIT_FAILURE;
}
//![load]
//![convert_to_gray]
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
//![convert_to_gray]
//![reduce_noise]
medianBlur(gray, gray, 5);
//![reduce_noise]
//![houghcircles]
vector<Vec3f> circles;
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
gray.rows/16, // change this value to detect circles with different distances to each other
100, 30, 1, 30 // change the last two parameters
// (min_radius & max_radius) to detect larger circles
);
//![houghcircles]
//![draw]
for( size_t i = 0; i < circles.size(); i++ )
{
Vec3i c = circles[i];
Point center = Point(c[0], c[1]);
// circle center
circle( src, center, 1, Scalar(0,100,100), 3, LINE_AA);
// circle outline
int radius = c[2];
circle( src, center, radius, Scalar(255,0,255), 3, LINE_AA);
}
//![draw]
//![display]
imshow("detected circles", src);
waitKey();
//![display]
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,89 @@
/**
* @file houghlines.cpp
* @brief This program demonstrates line finding with the Hough transform
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Declare the output variables
Mat dst, cdst, cdstP;
//![load]
const char* default_file = "sudoku.png";
const char* filename = argc >=2 ? argv[1] : default_file;
// Loads an image
Mat src = imread( samples::findFile( filename ), IMREAD_GRAYSCALE );
// Check if image is loaded fine
if(src.empty()){
printf(" Error opening image\n");
printf(" Program Arguments: [image_name -- default %s] \n", default_file);
return -1;
}
//![load]
//![edge_detection]
// Edge detection
Canny(src, dst, 50, 200, 3);
//![edge_detection]
// Copy edges to the images that will display the results in BGR
cvtColor(dst, cdst, COLOR_GRAY2BGR);
cdstP = cdst.clone();
//![hough_lines]
// Standard Hough Line Transform
vector<Vec2f> lines; // will hold the results of the detection
HoughLines(dst, lines, 1, CV_PI/180, 150, 0, 0 ); // runs the actual detection
//![hough_lines]
//![draw_lines]
// Draw the lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, LINE_AA);
}
//![draw_lines]
//![hough_lines_p]
// Probabilistic Line Transform
vector<Vec4i> linesP; // will hold the results of the detection
HoughLinesP(dst, linesP, 1, CV_PI/180, 50, 50, 10 ); // runs the actual detection
//![hough_lines_p]
//![draw_lines_p]
// Draw the lines
for( size_t i = 0; i < linesP.size(); i++ )
{
Vec4i l = linesP[i];
line( cdstP, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, LINE_AA);
}
//![draw_lines_p]
//![imshow]
// Show results
imshow("Source", src);
imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst);
imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP);
//![imshow]
//![exit]
// Wait and Exit
waitKey();
return 0;
//![exit]
}

View File

@ -0,0 +1,168 @@
/**
* @brief Sample code showing how to segment overlapping objects using Laplacian filtering, in addition to Watershed and Distance Transformation
* @author OpenCV Team
*/
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
//! [load_image]
// Load the image
CommandLineParser parser( argc, argv, "{@input | cards.png | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
// Show the source image
imshow("Source Image", src);
//! [load_image]
//! [black_bg]
// Change the background from white to black, since that will help later to extract
// better results during the use of Distance Transform
Mat mask;
inRange(src, Scalar(255, 255, 255), Scalar(255, 255, 255), mask);
src.setTo(Scalar(0, 0, 0), mask);
// Show output image
imshow("Black Background Image", src);
//! [black_bg]
//! [sharp]
// Create a kernel that we will use to sharpen our image
Mat kernel = (Mat_<float>(3,3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1); // an approximation of second derivative, a quite strong kernel
// do the laplacian filtering as it is
// well, we need to convert everything in something more deeper then CV_8U
// because the kernel has some negative values,
// and we can expect in general to have a Laplacian image with negative values
// BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
// so the possible negative number will be truncated
Mat imgLaplacian;
filter2D(src, imgLaplacian, CV_32F, kernel);
Mat sharp;
src.convertTo(sharp, CV_32F);
Mat imgResult = sharp - imgLaplacian;
// convert back to 8bits gray scale
imgResult.convertTo(imgResult, CV_8UC3);
imgLaplacian.convertTo(imgLaplacian, CV_8UC3);
// imshow( "Laplace Filtered Image", imgLaplacian );
imshow( "New Sharped Image", imgResult );
//! [sharp]
//! [bin]
// Create binary image from source image
Mat bw;
cvtColor(imgResult, bw, COLOR_BGR2GRAY);
threshold(bw, bw, 40, 255, THRESH_BINARY | THRESH_OTSU);
imshow("Binary Image", bw);
//! [bin]
//! [dist]
// Perform the distance transform algorithm
Mat dist;
distanceTransform(bw, dist, DIST_L2, 3);
// Normalize the distance image for range = {0.0, 1.0}
// so we can visualize and threshold it
normalize(dist, dist, 0, 1.0, NORM_MINMAX);
imshow("Distance Transform Image", dist);
//! [dist]
//! [peaks]
// Threshold to obtain the peaks
// This will be the markers for the foreground objects
threshold(dist, dist, 0.4, 1.0, THRESH_BINARY);
// Dilate a bit the dist image
Mat kernel1 = Mat::ones(3, 3, CV_8U);
dilate(dist, dist, kernel1);
imshow("Peaks", dist);
//! [peaks]
//! [seeds]
// Create the CV_8U version of the distance image
// It is needed for findContours()
Mat dist_8u;
dist.convertTo(dist_8u, CV_8U);
// Find total markers
vector<vector<Point> > contours;
findContours(dist_8u, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
// Create the marker image for the watershed algorithm
Mat markers = Mat::zeros(dist.size(), CV_32S);
// Draw the foreground markers
for (size_t i = 0; i < contours.size(); i++)
{
drawContours(markers, contours, static_cast<int>(i), Scalar(static_cast<int>(i)+1), -1);
}
// Draw the background marker
circle(markers, Point(5,5), 3, Scalar(255), -1);
Mat markers8u;
markers.convertTo(markers8u, CV_8U, 10);
imshow("Markers", markers8u);
//! [seeds]
//! [watershed]
// Perform the watershed algorithm
watershed(imgResult, markers);
Mat mark;
markers.convertTo(mark, CV_8U);
bitwise_not(mark, mark);
// imshow("Markers_v2", mark); // uncomment this if you want to see how the mark
// image looks like at that point
// Generate random colors
vector<Vec3b> colors;
for (size_t i = 0; i < contours.size(); i++)
{
int b = theRNG().uniform(0, 256);
int g = theRNG().uniform(0, 256);
int r = theRNG().uniform(0, 256);
colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
}
// Create the result image
Mat dst = Mat::zeros(markers.size(), CV_8UC3);
// Fill labeled objects with random colors
for (int i = 0; i < markers.rows; i++)
{
for (int j = 0; j < markers.cols; j++)
{
int index = markers.at<int>(i,j);
if (index > 0 && index <= static_cast<int>(contours.size()))
{
dst.at<Vec3b>(i,j) = colors[index-1];
}
}
}
// Visualize the final image
imshow("Final Result", dst);
//! [watershed]
waitKey();
return 0;
}

View File

@ -0,0 +1,78 @@
/**
* @function findContours_Demo.cpp
* @brief Demo code to find contours in an image
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image
CommandLineParser parser( argc, argv, "{@input | HappyFish.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window );
imshow( source_window, src );
const int max_thresh = 255;
createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
return 0;
}
/**
* @function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using Canny
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2 );
/// Find contours
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE );
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
drawContours( drawing, contours, (int)i, color, 2, LINE_8, hierarchy, 0 );
}
/// Show in a window
imshow( "Contours", drawing );
}

View File

@ -0,0 +1,111 @@
/**
* @function generalContours_demo1.cpp
* @brief Demo code to find contours in an image
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
//! [setup]
/// Load source image
CommandLineParser parser( argc, argv, "{@input | stuff.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
//! [setup]
//! [createWindow]
/// Create Window
const char* source_window = "Source";
namedWindow( source_window );
imshow( source_window, src );
//! [createWindow]
//! [trackbar]
const int max_thresh = 255;
createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
//! [trackbar]
waitKey();
return 0;
}
/**
* @function thresh_callback
*/
void thresh_callback(int, void* )
{
//! [Canny]
/// Detect edges using Canny
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2 );
//! [Canny]
//! [findContours]
/// Find contours
vector<vector<Point> > contours;
findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE );
//! [findContours]
//! [allthework]
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<Point2f>centers( contours.size() );
vector<float>radius( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
minEnclosingCircle( contours_poly[i], centers[i], radius[i] );
}
//! [allthework]
//! [zeroMat]
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
//! [zeroMat]
//! [forContour]
/// Draw polygonal contour + bonding rects + circles
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
drawContours( drawing, contours_poly, (int)i, color );
rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2 );
circle( drawing, centers[i], (int)radius[i], color, 2 );
}
//! [forContour]
//! [showDrawings]
/// Show in a window
imshow( "Contours", drawing );
//! [showDrawings]
}

View File

@ -0,0 +1,98 @@
/**
* @function generalContours_demo2.cpp
* @brief Demo code to obtain ellipses and rotated rectangles that contain detected contours
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | stuff.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window );
imshow( source_window, src );
const int max_thresh = 255;
createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
return 0;
}
/**
* @function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using Canny
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2 );
/// Find contours
vector<vector<Point> > contours;
findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
vector<RotatedRect> minRect( contours.size() );
vector<RotatedRect> minEllipse( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
minRect[i] = minAreaRect( contours[i] );
if( contours[i].size() > 5 )
{
minEllipse[i] = fitEllipse( contours[i] );
}
}
/// Draw contours + rotated rects + ellipses
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
// contour
drawContours( drawing, contours, (int)i, color );
// ellipse
ellipse( drawing, minEllipse[i], color, 2 );
// rotated rectangle
Point2f rect_points[4];
minRect[i].points( rect_points );
for ( int j = 0; j < 4; j++ )
{
line( drawing, rect_points[j], rect_points[(j+1)%4], color );
}
}
/// Show in a window
imshow( "Contours", drawing );
}

View File

@ -0,0 +1,85 @@
/**
* @function hull_demo.cpp
* @brief Demo code to find contours in an image
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | stuff.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window );
imshow( source_window, src );
const int max_thresh = 255;
createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
return 0;
}
/**
* @function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using Canny
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2 );
/// Find contours
vector<vector<Point> > contours;
findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
convexHull( contours[i], hull[i] );
}
/// Draw contours + hull results
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
drawContours( drawing, contours, (int)i, color );
drawContours( drawing, hull, (int)i, color );
}
/// Show in a window
imshow( "Hull demo", drawing );
}

View File

@ -0,0 +1,104 @@
/**
* @function moments_demo.cpp
* @brief Demo code to calculate moments
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <iomanip>
using namespace cv;
using namespace std;
Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image
CommandLineParser parser( argc, argv, "{@input | stuff.jpg | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window );
imshow( source_window, src );
const int max_thresh = 255;
createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
return 0;
}
/**
* @function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using canny
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
vector<vector<Point> > contours;
findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE );
/// Get the moments
vector<Moments> mu(contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
mu[i] = moments( contours[i] );
}
/// Get the mass centers
vector<Point2f> mc( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f( static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)) );
cout << "mc[" << i << "]=" << mc[i] << endl;
}
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
drawContours( drawing, contours, (int)i, color, 2 );
circle( drawing, mc[i], 4, color, -1 );
}
/// Show in a window
imshow( "Contours", drawing );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
cout << "\t Info: Area and Contour Length \n";
for( size_t i = 0; i < contours.size(); i++ )
{
cout << " * Contour[" << i << "] - Area (M_00) = " << std::fixed << std::setprecision(2) << mu[i].m00
<< " - Area OpenCV: " << contourArea(contours[i]) << " - Length: " << arcLength( contours[i], true ) << endl;
}
}

View File

@ -0,0 +1,88 @@
/**
* @function pointPolygonTest_demo.cpp
* @brief Demo code to use the pointPolygonTest function...fairly easy
* @author OpenCV team
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/**
* @function main
*/
int main( void )
{
/// Create an image
const int r = 100;
Mat src = Mat::zeros( Size( 4*r, 4*r ), CV_8U );
/// Create a sequence of points to make a contour
vector<Point2f> vert(6);
vert[0] = Point( 3*r/2, static_cast<int>(1.34*r) );
vert[1] = Point( 1*r, 2*r );
vert[2] = Point( 3*r/2, static_cast<int>(2.866*r) );
vert[3] = Point( 5*r/2, static_cast<int>(2.866*r) );
vert[4] = Point( 3*r, 2*r );
vert[5] = Point( 5*r/2, static_cast<int>(1.34*r) );
/// Draw it in src
for( int i = 0; i < 6; i++ )
{
line( src, vert[i], vert[(i+1)%6], Scalar( 255 ), 3 );
}
/// Get the contours
vector<vector<Point> > contours;
findContours( src, contours, RETR_TREE, CHAIN_APPROX_SIMPLE);
/// Calculate the distances to the contour
Mat raw_dist( src.size(), CV_32F );
for( int i = 0; i < src.rows; i++ )
{
for( int j = 0; j < src.cols; j++ )
{
raw_dist.at<float>(i,j) = (float)pointPolygonTest( contours[0], Point2f((float)j, (float)i), true );
}
}
double minVal, maxVal;
Point maxDistPt; // inscribed circle center
minMaxLoc(raw_dist, &minVal, &maxVal, NULL, &maxDistPt);
minVal = abs(minVal);
maxVal = abs(maxVal);
/// Depicting the distances graphically
Mat drawing = Mat::zeros( src.size(), CV_8UC3 );
for( int i = 0; i < src.rows; i++ )
{
for( int j = 0; j < src.cols; j++ )
{
if( raw_dist.at<float>(i,j) < 0 )
{
drawing.at<Vec3b>(i,j)[0] = (uchar)(255 - abs(raw_dist.at<float>(i,j)) * 255 / minVal);
}
else if( raw_dist.at<float>(i,j) > 0 )
{
drawing.at<Vec3b>(i,j)[2] = (uchar)(255 - raw_dist.at<float>(i,j) * 255 / maxVal);
}
else
{
drawing.at<Vec3b>(i,j)[0] = 255;
drawing.at<Vec3b>(i,j)[1] = 255;
drawing.at<Vec3b>(i,j)[2] = 255;
}
}
}
circle(drawing, maxDistPt, (int)maxVal, Scalar(255,255,255));
/// Show your results
imshow( "Source", src );
imshow( "Distance and inscribed circle", drawing );
waitKey();
return 0;
}

View File

@ -0,0 +1,130 @@
/**
* @function cornerDetector_Demo.cpp
* @brief Demo code for detecting corners using OpenCV built-in functions
* @author OpenCV team
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
Mat myHarris_dst, myHarris_copy, Mc;
Mat myShiTomasi_dst, myShiTomasi_copy;
int myShiTomasi_qualityLevel = 50;
int myHarris_qualityLevel = 50;
int max_qualityLevel = 100;
double myHarris_minVal, myHarris_maxVal;
double myShiTomasi_minVal, myShiTomasi_maxVal;
RNG rng(12345);
const char* myHarris_window = "My Harris corner detector";
const char* myShiTomasi_window = "My Shi Tomasi corner detector";
/// Function headers
void myShiTomasi_function( int, void* );
void myHarris_function( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | building.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if ( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Set some parameters
int blockSize = 3, apertureSize = 3;
/// My Harris matrix -- Using cornerEigenValsAndVecs
cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize );
/* calculate Mc */
Mc = Mat( src_gray.size(), CV_32FC1 );
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
float lambda_1 = myHarris_dst.at<Vec6f>(i, j)[0];
float lambda_2 = myHarris_dst.at<Vec6f>(i, j)[1];
Mc.at<float>(i, j) = lambda_1*lambda_2 - 0.04f*((lambda_1 + lambda_2) * (lambda_1 + lambda_2));
}
}
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal );
/* Create Window and Trackbar */
namedWindow( myHarris_window );
createTrackbar( "Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 );
/// My Shi-Tomasi -- Using cornerMinEigenVal
cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize );
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal );
/* Create Window and Trackbar */
namedWindow( myShiTomasi_window );
createTrackbar( "Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 );
waitKey();
return 0;
}
/**
* @function myShiTomasi_function
*/
void myShiTomasi_function( int, void* )
{
myShiTomasi_copy = src.clone();
myShiTomasi_qualityLevel = MAX(myShiTomasi_qualityLevel, 1);
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
if( myShiTomasi_dst.at<float>(i,j) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
{
circle( myShiTomasi_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
}
}
}
imshow( myShiTomasi_window, myShiTomasi_copy );
}
/**
* @function myHarris_function
*/
void myHarris_function( int, void* )
{
myHarris_copy = src.clone();
myHarris_qualityLevel = MAX(myHarris_qualityLevel, 1);
for( int i = 0; i < src_gray.rows; i++ )
{
for( int j = 0; j < src_gray.cols; j++ )
{
if( Mc.at<float>(i,j) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel )
{
circle( myHarris_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
}
}
}
imshow( myHarris_window, myHarris_copy );
}

View File

@ -0,0 +1,87 @@
/**
* @function cornerHarris_Demo.cpp
* @brief Demo code for detecting corners using Harris-Stephens method
* @author OpenCV team
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 200;
int max_thresh = 255;
const char* source_window = "Source image";
const char* corners_window = "Corners detected";
/// Function header
void cornerHarris_demo( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | building.jpg | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if ( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window and a trackbar
namedWindow( source_window );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey();
return 0;
}
/**
* @function cornerHarris_demo
* @brief Executes the corner detection and draw a circle around the possible corners
*/
void cornerHarris_demo( int, void* )
{
/// Detector parameters
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
/// Detecting corners
Mat dst = Mat::zeros( src.size(), CV_32FC1 );
cornerHarris( src_gray, dst, blockSize, apertureSize, k );
/// Normalizing
Mat dst_norm, dst_norm_scaled;
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
/// Drawing a circle around corners
for( int i = 0; i < dst_norm.rows ; i++ )
{
for( int j = 0; j < dst_norm.cols; j++ )
{
if( (int) dst_norm.at<float>(i,j) > thresh )
{
circle( dst_norm_scaled, Point(j,i), 5, Scalar(0), 2, 8, 0 );
}
}
}
/// Showing the result
namedWindow( corners_window );
imshow( corners_window, dst_norm_scaled );
}

View File

@ -0,0 +1,112 @@
/**
* @function cornerSubPix_Demo.cpp
* @brief Demo code for refining corner locations
* @author OpenCV team
*/
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 25;
RNG rng(12345);
const char* source_window = "Image";
/// Function header
void goodFeaturesToTrack_Demo( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | pic3.png | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
namedWindow( source_window );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey();
return 0;
}
/**
* @function goodFeaturesToTrack_Demo.cpp
* @brief Apply Shi-Tomasi corner detector
*/
void goodFeaturesToTrack_Demo( int, void* )
{
/// Parameters for Shi-Tomasi algorithm
maxCorners = MAX(maxCorners, 1);
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3, gradientSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
/// Copy the source image
Mat copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
gradientSize,
useHarrisDetector,
k );
/// Draw corners detected
cout << "** Number of corners detected: " << corners.size() << endl;
int radius = 4;
for( size_t i = 0; i < corners.size(); i++ )
{
circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
}
/// Show what you got
namedWindow( source_window );
imshow( source_window, copy );
/// Set the needed parameters to find the refined corners
Size winSize = Size( 5, 5 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );
/// Calculate the refined corner locations
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
/// Write them down
for( size_t i = 0; i < corners.size(); i++ )
{
cout << " -- Refined Corner [" << i << "] (" << corners[i].x << "," << corners[i].y << ")" << endl;
}
}

View File

@ -0,0 +1,99 @@
/**
* @function goodFeaturesToTrack_Demo.cpp
* @brief Demo code for detecting corners using Shi-Tomasi method
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int maxCorners = 23;
int maxTrackbar = 100;
RNG rng(12345);
const char* source_window = "Image";
/// Function header
void goodFeaturesToTrack_Demo( int, void* );
/**
* @function main
*/
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
CommandLineParser parser( argc, argv, "{@input | pic3.png | input image}" );
src = imread( samples::findFile( parser.get<String>( "@input" ) ) );
if( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
namedWindow( source_window );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey();
return 0;
}
/**
* @function goodFeaturesToTrack_Demo.cpp
* @brief Apply Shi-Tomasi corner detector
*/
void goodFeaturesToTrack_Demo( int, void* )
{
/// Parameters for Shi-Tomasi algorithm
maxCorners = MAX(maxCorners, 1);
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3, gradientSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
/// Copy the source image
Mat copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
gradientSize,
useHarrisDetector,
k );
/// Draw corners detected
cout << "** Number of corners detected: " << corners.size() << endl;
int radius = 4;
for( size_t i = 0; i < corners.size(); i++ )
{
circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
}
/// Show what you got
namedWindow( source_window );
imshow( source_window, copy );
}

View File

@ -0,0 +1,13 @@
<?xml version="1.0"?>
<opencv_storage>
<images>
images/CameraCalibraation/VID5/xx1.jpg
images/CameraCalibraation/VID5/xx2.jpg
images/CameraCalibraation/VID5/xx3.jpg
images/CameraCalibraation/VID5/xx4.jpg
images/CameraCalibraation/VID5/xx5.jpg
images/CameraCalibraation/VID5/xx6.jpg
images/CameraCalibraation/VID5/xx7.jpg
images/CameraCalibraation/VID5/xx8.jpg
</images>
</opencv_storage>

View File

@ -0,0 +1,751 @@
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdio>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace std;
class Settings
{
public:
Settings() : goodInput(false) {}
enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
enum InputType { INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST };
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{"
<< "BoardSize_Width" << boardSize.width
<< "BoardSize_Height" << boardSize.height
<< "Square_Size" << squareSize
<< "Calibrate_Pattern" << patternToUse
<< "Calibrate_NrOfFrameToUse" << nrFrames
<< "Calibrate_FixAspectRatio" << aspectRatio
<< "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
<< "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint
<< "Write_DetectedFeaturePoints" << writePoints
<< "Write_extrinsicParameters" << writeExtrinsics
<< "Write_gridPoints" << writeGrid
<< "Write_outputFileName" << outputFileName
<< "Show_UndistortedImage" << showUndistorted
<< "Input_FlipAroundHorizontalAxis" << flipVertical
<< "Input_Delay" << delay
<< "Input" << input
<< "}";
}
void read(const FileNode& node) //Read serialization for this class
{
node["BoardSize_Width" ] >> boardSize.width;
node["BoardSize_Height"] >> boardSize.height;
node["Calibrate_Pattern"] >> patternToUse;
node["Square_Size"] >> squareSize;
node["Calibrate_NrOfFrameToUse"] >> nrFrames;
node["Calibrate_FixAspectRatio"] >> aspectRatio;
node["Write_DetectedFeaturePoints"] >> writePoints;
node["Write_extrinsicParameters"] >> writeExtrinsics;
node["Write_gridPoints"] >> writeGrid;
node["Write_outputFileName"] >> outputFileName;
node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
node["Calibrate_UseFisheyeModel"] >> useFisheye;
node["Input_FlipAroundHorizontalAxis"] >> flipVertical;
node["Show_UndistortedImage"] >> showUndistorted;
node["Input"] >> input;
node["Input_Delay"] >> delay;
node["Fix_K1"] >> fixK1;
node["Fix_K2"] >> fixK2;
node["Fix_K3"] >> fixK3;
node["Fix_K4"] >> fixK4;
node["Fix_K5"] >> fixK5;
validate();
}
void validate()
{
goodInput = true;
if (boardSize.width <= 0 || boardSize.height <= 0)
{
cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl;
goodInput = false;
}
if (squareSize <= 10e-6)
{
cerr << "Invalid square size " << squareSize << endl;
goodInput = false;
}
if (nrFrames <= 0)
{
cerr << "Invalid number of frames " << nrFrames << endl;
goodInput = false;
}
if (input.empty()) // Check for valid input
inputType = INVALID;
else
{
if (input[0] >= '0' && input[0] <= '9')
{
stringstream ss(input);
ss >> cameraID;
inputType = CAMERA;
}
else
{
if (isListOfImages(input) && readStringList(input, imageList))
{
inputType = IMAGE_LIST;
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
}
else
inputType = VIDEO_FILE;
}
if (inputType == CAMERA)
inputCapture.open(cameraID);
if (inputType == VIDEO_FILE)
inputCapture.open(input);
if (inputType != IMAGE_LIST && !inputCapture.isOpened())
inputType = INVALID;
}
if (inputType == INVALID)
{
cerr << " Input does not exist: " << input;
goodInput = false;
}
flag = 0;
if(calibFixPrincipalPoint) flag |= CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CALIB_FIX_ASPECT_RATIO;
if(fixK1) flag |= CALIB_FIX_K1;
if(fixK2) flag |= CALIB_FIX_K2;
if(fixK3) flag |= CALIB_FIX_K3;
if(fixK4) flag |= CALIB_FIX_K4;
if(fixK5) flag |= CALIB_FIX_K5;
if (useFisheye) {
// the fisheye model has its own enum, so overwrite the flags
flag = fisheye::CALIB_FIX_SKEW | fisheye::CALIB_RECOMPUTE_EXTRINSIC;
if(fixK1) flag |= fisheye::CALIB_FIX_K1;
if(fixK2) flag |= fisheye::CALIB_FIX_K2;
if(fixK3) flag |= fisheye::CALIB_FIX_K3;
if(fixK4) flag |= fisheye::CALIB_FIX_K4;
if (calibFixPrincipalPoint) flag |= fisheye::CALIB_FIX_PRINCIPAL_POINT;
}
calibrationPattern = NOT_EXISTING;
if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD;
if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
if (calibrationPattern == NOT_EXISTING)
{
cerr << " Camera calibration mode does not exist: " << patternToUse << endl;
goodInput = false;
}
atImageList = 0;
}
Mat nextImage()
{
Mat result;
if( inputCapture.isOpened() )
{
Mat view0;
inputCapture >> view0;
view0.copyTo(result);
}
else if( atImageList < imageList.size() )
result = imread(imageList[atImageList++], IMREAD_COLOR);
return result;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.clear();
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
static bool isListOfImages( const string& filename)
{
string s(filename);
// Look for file extension
if( s.find(".xml") == string::npos && s.find(".yaml") == string::npos && s.find(".yml") == string::npos )
return false;
else
return true;
}
public:
Size boardSize; // The size of the board -> Number of items by width and height
Pattern calibrationPattern; // One of the Chessboard, circles, or asymmetric circle pattern
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
int nrFrames; // The number of frames to use from the input for calibration
float aspectRatio; // The aspect ratio
int delay; // In case of a video input
bool writePoints; // Write detected feature points
bool writeExtrinsics; // Write extrinsic parameters
bool writeGrid; // Write refined 3D target grid points
bool calibZeroTangentDist; // Assume zero tangential distortion
bool calibFixPrincipalPoint; // Fix the principal point at the center
bool flipVertical; // Flip the captured images around the horizontal axis
string outputFileName; // The name of the file where to write
bool showUndistorted; // Show undistorted images after calibration
string input; // The input ->
bool useFisheye; // use fisheye camera model for calibration
bool fixK1; // fix K1 distortion coefficient
bool fixK2; // fix K2 distortion coefficient
bool fixK3; // fix K3 distortion coefficient
bool fixK4; // fix K4 distortion coefficient
bool fixK5; // fix K5 distortion coefficient
int cameraID;
vector<string> imageList;
size_t atImageList;
VideoCapture inputCapture;
InputType inputType;
bool goodInput;
int flag;
private:
string patternToUse;
};
static inline void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
{
if(node.empty())
x = default_value;
else
x.read(node);
}
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, float grid_width, bool release_object);
int main(int argc, char* argv[])
{
const String keys
= "{help h usage ? | | print this message }"
"{@settings |default.xml| input setting file }"
"{d | | actual distance between top-left and top-right corners of "
"the calibration grid }"
"{winSize | 11 | Half of search window for cornerSubPix }";
CommandLineParser parser(argc, argv, keys);
parser.about("This is a camera calibration sample.\n"
"Usage: camera_calibration [configuration_file -- default ./default.xml]\n"
"Near the sample file you'll find the configuration file, which has detailed help of "
"how to edit it. It may be any OpenCV supported file format XML/YAML.");
if (!parser.check()) {
parser.printErrors();
return 0;
}
if (parser.has("help")) {
parser.printMessage();
return 0;
}
//! [file_read]
Settings s;
const string inputSettingsFile = parser.get<string>(0);
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
if (!fs.isOpened())
{
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
parser.printMessage();
return -1;
}
fs["Settings"] >> s;
fs.release(); // close Settings file
//! [file_read]
//FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML
//fout << "Settings" << s;
if (!s.goodInput)
{
cout << "Invalid input detected. Application stopping. " << endl;
return -1;
}
int winSize = parser.get<int>("winSize");
float grid_width = s.squareSize * (s.boardSize.width - 1);
bool release_object = false;
if (parser.has("d")) {
grid_width = parser.get<float>("d");
release_object = true;
}
vector<vector<Point2f> > imagePoints;
Mat cameraMatrix, distCoeffs;
Size imageSize;
int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
clock_t prevTimestamp = 0;
const Scalar RED(0,0,255), GREEN(0,255,0);
const char ESC_KEY = 27;
//! [get_input]
for(;;)
{
Mat view;
bool blinkOutput = false;
view = s.nextImage();
//----- If no more image, or got enough, then stop calibration and show result -------------
if( mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames )
{
if(runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints, grid_width,
release_object))
mode = CALIBRATED;
else
mode = DETECTION;
}
if(view.empty()) // If there are no more images stop the loop
{
// if calibration threshold was not reached yet, calibrate now
if( mode != CALIBRATED && !imagePoints.empty() )
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints, grid_width,
release_object);
break;
}
//! [get_input]
imageSize = view.size(); // Format input image.
if( s.flipVertical ) flip( view, view, 0 );
//! [find_pattern]
vector<Point2f> pointBuf;
bool found;
int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE;
if(!s.useFisheye) {
// fast check erroneously fails with high distortions like fisheye
chessBoardFlags |= CALIB_CB_FAST_CHECK;
}
switch( s.calibrationPattern ) // Find feature points on the input format
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf, chessBoardFlags);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
break;
default:
found = false;
break;
}
//! [find_pattern]
//! [pattern_found]
if ( found) // If done with success,
{
// improve the found corners' coordinate accuracy for chessboard
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(winSize,winSize),
Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 30, 0.0001 ));
}
if( mode == CAPTURING && // For camera only take new samples after delay time
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
{
imagePoints.push_back(pointBuf);
prevTimestamp = clock();
blinkOutput = s.inputCapture.isOpened();
}
// Draw the corners.
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
//! [pattern_found]
//----------------------------- Output Text ------------------------------------------------
//! [output_text]
string msg = (mode == CAPTURING) ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
{
if(s.showUndistorted)
msg = cv::format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
else
msg = cv::format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
}
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
if( blinkOutput )
bitwise_not(view, view);
//! [output_text]
//------------------------- Video capture output undistorted ------------------------------
//! [output_undistorted]
if( mode == CALIBRATED && s.showUndistorted )
{
Mat temp = view.clone();
if (s.useFisheye)
{
Mat newCamMat;
fisheye::estimateNewCameraMatrixForUndistortRectify(cameraMatrix, distCoeffs, imageSize,
Matx33d::eye(), newCamMat, 1);
cv::fisheye::undistortImage(temp, view, cameraMatrix, distCoeffs, newCamMat);
}
else
undistort(temp, view, cameraMatrix, distCoeffs);
}
//! [output_undistorted]
//------------------------------ Show image and check for input commands -------------------
//! [await_input]
imshow("Image View", view);
char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
if( key == ESC_KEY )
break;
if( key == 'u' && mode == CALIBRATED )
s.showUndistorted = !s.showUndistorted;
if( s.inputCapture.isOpened() && key == 'g' )
{
mode = CAPTURING;
imagePoints.clear();
}
//! [await_input]
}
// -----------------------Show the undistorted image for the image list ------------------------
//! [show_results]
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorted && !cameraMatrix.empty())
{
Mat view, rview, map1, map2;
if (s.useFisheye)
{
Mat newCamMat;
fisheye::estimateNewCameraMatrixForUndistortRectify(cameraMatrix, distCoeffs, imageSize,
Matx33d::eye(), newCamMat, 1);
fisheye::initUndistortRectifyMap(cameraMatrix, distCoeffs, Matx33d::eye(), newCamMat, imageSize,
CV_16SC2, map1, map2);
}
else
{
initUndistortRectifyMap(
cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize,
CV_16SC2, map1, map2);
}
for(size_t i = 0; i < s.imageList.size(); i++ )
{
view = imread(s.imageList[i], IMREAD_COLOR);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
char c = (char)waitKey();
if( c == ESC_KEY || c == 'q' || c == 'Q' )
break;
}
}
//! [show_results]
return 0;
}
//! [compute_errors]
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors, bool fisheye)
{
vector<Point2f> imagePoints2;
size_t totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for(size_t i = 0; i < objectPoints.size(); ++i )
{
if (fisheye)
{
fisheye::projectPoints(objectPoints[i], imagePoints2, rvecs[i], tvecs[i], cameraMatrix,
distCoeffs);
}
else
{
projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
}
err = norm(imagePoints[i], imagePoints2, NORM_L2);
size_t n = objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
//! [compute_errors]
//! [board_corners]
static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
corners.clear();
switch(patternType)
{
case Settings::CHESSBOARD:
case Settings::CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i )
for( int j = 0; j < boardSize.width; ++j )
corners.push_back(Point3f(j*squareSize, i*squareSize, 0));
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f((2*j + i % 2)*squareSize, i*squareSize, 0));
break;
default:
break;
}
}
//! [board_corners]
static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
vector<float>& reprojErrs, double& totalAvgErr, vector<Point3f>& newObjPoints,
float grid_width, bool release_object)
{
//! [fixed_aspect]
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( !s.useFisheye && s.flag & CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = s.aspectRatio;
//! [fixed_aspect]
if (s.useFisheye) {
distCoeffs = Mat::zeros(4, 1, CV_64F);
} else {
distCoeffs = Mat::zeros(8, 1, CV_64F);
}
vector<vector<Point3f> > objectPoints(1);
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints[0][s.boardSize.width - 1].x = objectPoints[0][0].x + grid_width;
newObjPoints = objectPoints[0];
objectPoints.resize(imagePoints.size(),objectPoints[0]);
//Find intrinsic and extrinsic camera parameters
double rms;
if (s.useFisheye) {
Mat _rvecs, _tvecs;
rms = fisheye::calibrate(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, _rvecs,
_tvecs, s.flag);
rvecs.reserve(_rvecs.rows);
tvecs.reserve(_tvecs.rows);
for(int i = 0; i < int(objectPoints.size()); i++){
rvecs.push_back(_rvecs.row(i));
tvecs.push_back(_tvecs.row(i));
}
} else {
int iFixedPoint = -1;
if (release_object)
iFixedPoint = s.boardSize.width - 1;
rms = calibrateCameraRO(objectPoints, imagePoints, imageSize, iFixedPoint,
cameraMatrix, distCoeffs, rvecs, tvecs, newObjPoints,
s.flag | CALIB_USE_LU);
}
if (release_object) {
cout << "New board corners: " << endl;
cout << newObjPoints[0] << endl;
cout << newObjPoints[s.boardSize.width - 1] << endl;
cout << newObjPoints[s.boardSize.width * (s.boardSize.height - 1)] << endl;
cout << newObjPoints.back() << endl;
}
cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
objectPoints.clear();
objectPoints.resize(imagePoints.size(), newObjPoints);
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix,
distCoeffs, reprojErrs, s.useFisheye);
return ok;
}
// Print camera parameters to the output file
static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints,
double totalAvgErr, const vector<Point3f>& newObjPoints )
{
FileStorage fs( s.outputFileName, FileStorage::WRITE );
time_t tm;
time( &tm );
struct tm *t2 = localtime( &tm );
char buf[1024];
strftime( buf, sizeof(buf), "%c", t2 );
fs << "calibration_time" << buf;
if( !rvecs.empty() || !reprojErrs.empty() )
fs << "nr_of_frames" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_width" << imageSize.width;
fs << "image_height" << imageSize.height;
fs << "board_width" << s.boardSize.width;
fs << "board_height" << s.boardSize.height;
fs << "square_size" << s.squareSize;
if( !s.useFisheye && s.flag & CALIB_FIX_ASPECT_RATIO )
fs << "fix_aspect_ratio" << s.aspectRatio;
if (s.flag)
{
std::stringstream flagsStringStream;
if (s.useFisheye)
{
flagsStringStream << "flags:"
<< (s.flag & fisheye::CALIB_FIX_SKEW ? " +fix_skew" : "")
<< (s.flag & fisheye::CALIB_FIX_K1 ? " +fix_k1" : "")
<< (s.flag & fisheye::CALIB_FIX_K2 ? " +fix_k2" : "")
<< (s.flag & fisheye::CALIB_FIX_K3 ? " +fix_k3" : "")
<< (s.flag & fisheye::CALIB_FIX_K4 ? " +fix_k4" : "")
<< (s.flag & fisheye::CALIB_RECOMPUTE_EXTRINSIC ? " +recompute_extrinsic" : "");
}
else
{
flagsStringStream << "flags:"
<< (s.flag & CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "")
<< (s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "")
<< (s.flag & CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "")
<< (s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "")
<< (s.flag & CALIB_FIX_K1 ? " +fix_k1" : "")
<< (s.flag & CALIB_FIX_K2 ? " +fix_k2" : "")
<< (s.flag & CALIB_FIX_K3 ? " +fix_k3" : "")
<< (s.flag & CALIB_FIX_K4 ? " +fix_k4" : "")
<< (s.flag & CALIB_FIX_K5 ? " +fix_k5" : "");
}
fs.writeComment(flagsStringStream.str());
}
fs << "flags" << s.flag;
fs << "fisheye_model" << s.useFisheye;
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "avg_reprojection_error" << totalAvgErr;
if (s.writeExtrinsics && !reprojErrs.empty())
fs << "per_view_reprojection_errors" << Mat(reprojErrs);
if(s.writeExtrinsics && !rvecs.empty() && !tvecs.empty() )
{
CV_Assert(rvecs[0].type() == tvecs[0].type());
Mat bigmat((int)rvecs.size(), 6, CV_MAKETYPE(rvecs[0].type(), 1));
bool needReshapeR = rvecs[0].depth() != 1 ? true : false;
bool needReshapeT = tvecs[0].depth() != 1 ? true : false;
for( size_t i = 0; i < rvecs.size(); i++ )
{
Mat r = bigmat(Range(int(i), int(i+1)), Range(0,3));
Mat t = bigmat(Range(int(i), int(i+1)), Range(3,6));
if(needReshapeR)
rvecs[i].reshape(1, 1).copyTo(r);
else
{
//*.t() is MatExpr (not Mat) so we can use assignment operator
CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
r = rvecs[i].t();
}
if(needReshapeT)
tvecs[i].reshape(1, 1).copyTo(t);
else
{
CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
t = tvecs[i].t();
}
}
fs.writeComment("a set of 6-tuples (rotation vector + translation vector) for each view");
fs << "extrinsic_parameters" << bigmat;
}
if(s.writePoints && !imagePoints.empty() )
{
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
for( size_t i = 0; i < imagePoints.size(); i++ )
{
Mat r = imagePtMat.row(int(i)).reshape(2, imagePtMat.cols);
Mat imgpti(imagePoints[i]);
imgpti.copyTo(r);
}
fs << "image_points" << imagePtMat;
}
if( s.writeGrid && !newObjPoints.empty() )
{
fs << "grid_points" << newObjPoints;
}
}
//! [run_and_save]
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, float grid_width, bool release_object)
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
vector<Point3f> newObjPoints;
bool ok = runCalibration(s, imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, reprojErrs,
totalAvgErr, newObjPoints, grid_width, release_object);
cout << (ok ? "Calibration succeeded" : "Calibration failed")
<< ". avg re projection error = " << totalAvgErr << endl;
if (ok)
saveCameraParams(s, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs, imagePoints,
totalAvgErr, newObjPoints);
return ok;
}
//! [run_and_save]

View File

@ -0,0 +1,59 @@
<?xml version="1.0"?>
<opencv_storage>
<Settings>
<!-- Number of inner corners per a item row and column. (square, circle) -->
<BoardSize_Width> 9</BoardSize_Width>
<BoardSize_Height>6</BoardSize_Height>
<!-- The size of a square in some user defined metric system (pixel, millimeter)-->
<Square_Size>50</Square_Size>
<!-- The type of input used for camera calibration. One of: CHESSBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID -->
<Calibrate_Pattern>"CHESSBOARD"</Calibrate_Pattern>
<!-- The input to use for calibration.
To use an input camera -> give the ID of the camera, like "1"
To use an input video -> give the path of the input video, like "/tmp/x.avi"
To use an image list -> give the path to the XML or YAML file containing the list of the images, like "/tmp/circles_list.xml"
-->
<Input>"images/CameraCalibration/VID5/VID5.xml"</Input>
<!-- If true (non-zero) we flip the input images around the horizontal axis.-->
<Input_FlipAroundHorizontalAxis>0</Input_FlipAroundHorizontalAxis>
<!-- Time delay between frames in case of camera. -->
<Input_Delay>100</Input_Delay>
<!-- How many frames to use, for calibration. -->
<Calibrate_NrOfFrameToUse>25</Calibrate_NrOfFrameToUse>
<!-- Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix.
Use or not setting. 0 - False Non-Zero - True-->
<Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio>
<!-- If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.-->
<Calibrate_AssumeZeroTangentialDistortion>1</Calibrate_AssumeZeroTangentialDistortion>
<!-- If true (non-zero) the principal point is not changed during the global optimization.-->
<Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter>
<!-- The name of the output log file. -->
<Write_outputFileName>"out_camera_data.xml"</Write_outputFileName>
<!-- If true (non-zero) we write to the output file the feature points.-->
<Write_DetectedFeaturePoints>1</Write_DetectedFeaturePoints>
<!-- If true (non-zero) we write to the output file the extrinsic camera parameters.-->
<Write_extrinsicParameters>1</Write_extrinsicParameters>
<!-- If true (non-zero) we write to the output file the refined 3D target grid points.-->
<Write_gridPoints>1</Write_gridPoints>
<!-- If true (non-zero) we show after calibration the undistorted images.-->
<Show_UndistortedImage>1</Show_UndistortedImage>
<!-- If true (non-zero) will be used fisheye camera model.-->
<Calibrate_UseFisheyeModel>0</Calibrate_UseFisheyeModel>
<!-- If true (non-zero) distortion coefficient k1 will be equals to zero.-->
<Fix_K1>0</Fix_K1>
<!-- If true (non-zero) distortion coefficient k2 will be equals to zero.-->
<Fix_K2>0</Fix_K2>
<!-- If true (non-zero) distortion coefficient k3 will be equals to zero.-->
<Fix_K3>0</Fix_K3>
<!-- If true (non-zero) distortion coefficient k4 will be equals to zero.-->
<Fix_K4>1</Fix_K4>
<!-- If true (non-zero) distortion coefficient k5 will be equals to zero.-->
<Fix_K5>1</Fix_K5>
</Settings>
</opencv_storage>

View File

@ -0,0 +1,352 @@
%YAML:1.0
calibration_Time: "08/19/11 20:44:38"
nrOfFrames: 8
image_Width: 640
image_Height: 480
board_Width: 9
board_Height: 6
square_Size: 50.
FixAspectRatio: 1.
# flags: +fix_aspectRatio +fix_principal_point +zero_tangent_dist
flagValue: 14
Camera_Matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 6.5746697810243404e+002, 0., 3.1950000000000000e+002, 0.,
6.5746697810243404e+002, 2.3950000000000000e+002, 0., 0., 1. ]
Distortion_Coefficients: !!opencv-matrix
rows: 5
cols: 1
dt: d
data: [ -4.1802327018241026e-001, 5.0715243805833121e-001, 0., 0.,
-5.7843596847939704e-001 ]
Avg_Reprojection_Error: 3.8441346462381665e-001
Per_View_Reprojection_Errors: !!opencv-matrix
rows: 8
cols: 1
dt: f
data: [ 5.04357755e-001, 4.85754758e-001, 3.99563968e-001,
4.13829178e-001, 3.53570908e-001, 3.21116358e-001,
2.74473161e-001, 2.39761785e-001 ]
# a set of 6-tuples (rotation vector + translation vector) for each view
Extrinsic_Parameters: !!opencv-matrix
rows: 8
cols: 6
dt: d
data: [ -7.8704123655486097e-002, -1.5922384772614945e-001,
3.1166227207451498e+000, 2.4224388101960471e+002,
1.1795590397660339e+002, 6.2576484126093249e+002,
-1.4117480285164308e-001, -1.7917415443804836e-002,
3.1333182268743949e+000, 2.5943034781849354e+002,
1.4039780562976958e+002, 6.3848706527260981e+002,
7.2230525186138789e-002, -7.5445981266787754e-002,
1.5712860749221762e+000, 1.7426560451795339e+002,
-1.9309240362258871e+002, 7.0891416556762647e+002,
2.0367310600105853e-002, 6.8565520026996951e-002,
-5.4313033031644169e-004, -2.0146314940404827e+002,
-1.3305643514116997e+002, 7.4933554744027231e+002,
-3.4468530027734055e-002, 2.1921265175331925e-002,
-1.5731053528054522e+000, -1.1155718744299284e+002,
2.0307615364261443e+002, 8.4915903914333899e+002,
3.7425562109513817e-002, 7.4883169379022230e-002,
-3.6031632305130512e-002, -2.0094505419395196e+002,
-1.1627359108310560e+002, 9.2021583518760133e+002,
6.8105689976949157e-002, 6.4426739692440949e-002,
-7.0967130057087435e-002, -1.9233852871740035e+002,
-1.0334652096641923e+002, 1.0755293563503658e+003,
-5.8017546499862287e-002, -1.6909812666033443e-003,
-1.5876137659782963e+000, -1.0242234847115104e+002,
2.2583088401423066e+002, 1.1125972190244058e+003 ]
Image_points: !!opencv-matrix
rows: 8
cols: 54
dt: "2f"
data: [ 5.58494690e+002, 3.55650085e+002, 5.13314697e+002,
3.59107666e+002, 4.65728333e+002, 3.62133911e+002,
4.15701111e+002, 3.65026459e+002, 3.64399353e+002,
3.67339203e+002, 3.12101196e+002, 3.69211914e+002,
2.59208405e+002, 3.70413513e+002, 2.07456192e+002,
3.71175995e+002, 1.56619507e+002, 3.72176544e+002,
5.60868713e+002, 3.08104828e+002, 5.15191772e+002,
3.10485626e+002, 4.67032959e+002, 3.12660004e+002,
4.16112152e+002, 3.14887177e+002, 3.64010712e+002,
3.16825775e+002, 3.10712372e+002, 3.18640808e+002,
2.56853943e+002, 3.20017365e+002, 2.04168182e+002,
3.20908417e+002, 1.52469528e+002, 3.22105377e+002,
5.62328369e+002, 2.58646881e+002, 5.16396301e+002,
2.59919281e+002, 4.67907654e+002, 2.61257874e+002,
4.16463440e+002, 2.62675537e+002, 3.63546570e+002,
2.64064117e+002, 3.09528137e+002, 2.65489990e+002,
2.54765533e+002, 2.66862030e+002, 2.01299225e+002,
2.67997345e+002, 1.48913437e+002, 2.69627167e+002,
5.63098022e+002, 2.08423523e+002, 5.16782654e+002,
2.08424667e+002, 4.68059296e+002, 2.08661697e+002,
4.16216431e+002, 2.09268982e+002, 3.62888763e+002,
2.10013397e+002, 3.08458557e+002, 2.11074738e+002,
2.53267990e+002, 2.12496582e+002, 1.99121384e+002,
2.14005814e+002, 1.46551376e+002, 2.15851318e+002,
5.62997437e+002, 1.57966492e+002, 5.16406494e+002,
1.56580688e+002, 4.67334900e+002, 1.55756500e+002,
4.15378235e+002, 1.55492874e+002, 3.62096710e+002,
1.55498734e+002, 3.07522827e+002, 1.56133240e+002,
2.52235214e+002, 1.57516571e+002, 1.97876328e+002,
1.59318787e+002, 1.45078247e+002, 1.61638428e+002,
5.62097168e+002, 1.07469536e+002, 5.15766846e+002,
1.04902527e+002, 4.66562866e+002, 1.03045807e+002,
4.14651459e+002, 1.01924713e+002, 3.61240662e+002,
1.01357826e+002, 3.06746613e+002, 1.01582802e+002,
2.51568024e+002, 1.02920105e+002, 1.97343307e+002,
1.04941299e+002, 1.44756821e+002, 1.07737488e+002,
5.68062500e+002, 3.73591125e+002, 5.25272644e+002,
3.77019318e+002, 4.79870941e+002, 3.80086578e+002,
4.31823730e+002, 3.83036652e+002, 3.81995758e+002,
3.85271759e+002, 3.30728729e+002, 3.86998779e+002,
2.78071167e+002, 3.88151031e+002, 2.26231567e+002,
3.88669586e+002, 1.74855331e+002, 3.89197998e+002,
5.69792542e+002, 3.27097382e+002, 5.26866028e+002,
3.29362366e+002, 4.81278229e+002, 3.31532928e+002,
4.32783203e+002, 3.33552185e+002, 3.82408234e+002,
3.35186554e+002, 3.30427399e+002, 3.36404053e+002,
2.77138519e+002, 3.37450958e+002, 2.24525131e+002,
3.37957092e+002, 1.72285507e+002, 3.38503540e+002,
5.70942749e+002, 2.79243713e+002, 5.27789307e+002,
2.80073486e+002, 4.82146576e+002, 2.81226410e+002,
4.33247375e+002, 2.82237427e+002, 3.82503662e+002,
2.83062286e+002, 3.30138885e+002, 2.83794434e+002,
2.76433228e+002, 2.84549286e+002, 2.23158783e+002,
2.84981049e+002, 1.70520218e+002, 2.85720886e+002,
5.71001953e+002, 2.30928329e+002, 5.27846863e+002,
2.30519928e+002, 4.82114563e+002, 2.30268906e+002,
4.33114563e+002, 2.30243515e+002, 3.82384857e+002,
2.30311340e+002, 3.29870392e+002, 2.30454620e+002,
2.76012634e+002, 2.30882156e+002, 2.22529434e+002,
2.31355362e+002, 1.69742065e+002, 2.32063004e+002,
5.70199036e+002, 1.82609772e+002, 5.27030884e+002,
1.80973267e+002, 4.81193573e+002, 1.79573792e+002,
4.32409821e+002, 1.78475616e+002, 3.81855530e+002,
1.77680283e+002, 3.29641937e+002, 1.77092087e+002,
2.75895782e+002, 1.77155502e+002, 2.22438889e+002,
1.77605667e+002, 1.69884583e+002, 1.78365585e+002,
5.69026245e+002, 1.34654831e+002, 5.26171570e+002,
1.31798691e+002, 4.80653503e+002, 1.29171509e+002,
4.31869904e+002, 1.27280067e+002, 3.81419739e+002,
1.25591202e+002, 3.29466644e+002, 1.24407089e+002,
2.76225342e+002, 1.24174736e+002, 2.23024109e+002,
1.24463333e+002, 1.70838898e+002, 1.25398903e+002,
4.73812897e+002, 6.94673386e+001, 4.74245453e+002,
1.12387466e+002, 4.74243347e+002, 1.56034164e+002,
4.73834778e+002, 2.00523651e+002, 4.72891602e+002,
2.44457306e+002, 4.71412811e+002, 2.87981171e+002,
4.69708252e+002, 3.30783173e+002, 4.67558228e+002,
3.71818420e+002, 4.65495667e+002, 4.11996979e+002,
4.31027649e+002, 6.75546722e+001, 4.31269440e+002,
1.10960022e+002, 4.31185486e+002, 1.55113556e+002,
4.30830139e+002, 2.00061066e+002, 4.30168427e+002,
2.44456863e+002, 4.29107544e+002, 2.88479645e+002,
4.27829071e+002, 3.31813507e+002, 4.26131653e+002,
3.73071228e+002, 4.24718811e+002, 4.13476563e+002,
3.86868805e+002, 6.61982269e+001, 3.86895416e+002,
1.09904411e+002, 3.86690216e+002, 1.54396423e+002,
3.86368591e+002, 1.99800369e+002, 3.85792206e+002,
2.44538574e+002, 3.85117279e+002, 2.88826447e+002,
3.84405273e+002, 3.32408020e+002, 3.83303772e+002,
3.74074097e+002, 3.82477448e+002, 4.14638977e+002,
3.41941437e+002, 6.54462357e+001, 3.41628204e+002,
1.09383698e+002, 3.41402344e+002, 1.54105545e+002,
3.41168854e+002, 1.99655045e+002, 3.40816681e+002,
2.44469910e+002, 3.40516937e+002, 2.88975800e+002,
3.40365662e+002, 3.32670990e+002, 3.39935211e+002,
3.74465759e+002, 3.39847626e+002, 4.14742279e+002,
2.96694000e+002, 6.56859589e+001, 2.96075226e+002,
1.09505333e+002, 2.95704895e+002, 1.54202652e+002,
2.95653107e+002, 1.99734131e+002, 2.95589661e+002,
2.44549530e+002, 2.95629547e+002, 2.88889496e+002,
2.96138733e+002, 3.32610931e+002, 2.96520905e+002,
3.74608551e+002, 2.96987091e+002, 4.14774902e+002,
2.51414978e+002, 6.65755463e+001, 2.50681854e+002,
1.10189331e+002, 2.50183380e+002, 1.54658005e+002,
2.50331161e+002, 2.00073761e+002, 2.50590790e+002,
2.44719513e+002, 2.51083817e+002, 2.88868286e+002,
2.52134262e+002, 3.32266937e+002, 2.53097809e+002,
3.74022491e+002, 2.54404007e+002, 4.14018066e+002,
1.49524078e+002, 1.27699501e+002, 1.89511658e+002,
1.25816605e+002, 2.31050888e+002, 1.24260918e+002,
2.74076721e+002, 1.23023209e+002, 3.17643005e+002,
1.22288109e+002, 3.61785889e+002, 1.22105164e+002,
4.06142670e+002, 1.22401566e+002, 4.49623962e+002,
1.23246025e+002, 4.92677216e+002, 1.24087708e+002,
1.48706085e+002, 1.69077423e+002, 1.88827805e+002,
1.67750443e+002, 2.30439865e+002, 1.66769333e+002,
2.73830933e+002, 1.65871170e+002, 3.17596741e+002,
1.65410919e+002, 3.61983459e+002, 1.65327866e+002,
4.06748322e+002, 1.65463974e+002, 4.50450226e+002,
1.66126526e+002, 4.93614655e+002, 1.66970413e+002,
1.48312607e+002, 2.11499451e+002, 1.88574097e+002,
2.10860214e+002, 2.30130676e+002, 2.10261612e+002,
2.73557709e+002, 2.09837143e+002, 3.17542572e+002,
2.09633057e+002, 3.62091248e+002, 2.09732620e+002,
4.06934570e+002, 2.09926758e+002, 4.50914612e+002,
2.10320221e+002, 4.94044495e+002, 2.10900925e+002,
1.48613831e+002, 2.53997177e+002, 1.88797791e+002,
2.53912842e+002, 2.30240204e+002, 2.53975067e+002,
2.73746704e+002, 2.54010208e+002, 3.17718262e+002,
2.54106003e+002, 3.62188965e+002, 2.54205475e+002,
4.06908783e+002, 2.54317505e+002, 4.50824951e+002,
2.54539490e+002, 4.93825714e+002, 2.54753876e+002,
1.49541687e+002, 2.96404175e+002, 1.89357727e+002,
2.97117523e+002, 2.30807007e+002, 2.97805603e+002,
2.74325470e+002, 2.97966522e+002, 3.18042206e+002,
2.98304535e+002, 3.62105774e+002, 2.98552643e+002,
4.06672272e+002, 2.98572418e+002, 4.50363068e+002,
2.98569550e+002, 4.93109894e+002, 2.98516205e+002,
1.50883698e+002, 3.38493195e+002, 1.90633621e+002,
3.39862610e+002, 2.31920990e+002, 3.40869415e+002,
2.74971252e+002, 3.41453766e+002, 3.18235229e+002,
3.41952637e+002, 3.62063477e+002, 3.42314026e+002,
4.06098938e+002, 3.42221802e+002, 4.49477386e+002,
3.42063812e+002, 4.91864716e+002, 3.41727600e+002,
2.36129852e+002, 3.92798004e+002, 2.34999939e+002,
3.56118683e+002, 2.34376099e+002, 3.18607025e+002,
2.33822159e+002, 2.80400696e+002, 2.33565445e+002,
2.42213104e+002, 2.33583069e+002, 2.03937286e+002,
2.34028824e+002, 1.65756607e+002, 2.34613373e+002,
1.28586639e+002, 2.35190308e+002, 9.18279037e+001,
2.73031616e+002, 3.93267242e+002, 2.72295166e+002,
3.56342743e+002, 2.71799347e+002, 3.18847412e+002,
2.71418854e+002, 2.80287872e+002, 2.71161469e+002,
2.41881134e+002, 2.71248962e+002, 2.03348145e+002,
2.71379303e+002, 1.64895874e+002, 2.71946045e+002,
1.27450935e+002, 2.72322418e+002, 9.06900787e+001,
3.10670715e+002, 3.93568848e+002, 3.10389160e+002,
3.56545959e+002, 3.10084625e+002, 3.18814514e+002,
3.09801544e+002, 2.80242737e+002, 3.09678711e+002,
2.41574814e+002, 3.09779663e+002, 2.02989838e+002,
3.09842712e+002, 1.64338043e+002, 3.10076782e+002,
1.26870911e+002, 3.10243286e+002, 8.98413315e+001,
3.48618134e+002, 3.93563202e+002, 3.48617065e+002,
3.56472382e+002, 3.48608795e+002, 3.18855621e+002,
3.48544556e+002, 2.80011017e+002, 3.48556396e+002,
2.41388168e+002, 3.48585388e+002, 2.02692429e+002,
3.48435089e+002, 1.64099731e+002, 3.48442902e+002,
1.26549957e+002, 3.48338043e+002, 8.98002014e+001,
3.86625610e+002, 3.93188599e+002, 3.87047729e+002,
3.56377594e+002, 3.87306274e+002, 3.18714752e+002,
3.87337799e+002, 2.79868896e+002, 3.87402740e+002,
2.41228760e+002, 3.87295166e+002, 2.02695313e+002,
3.87030273e+002, 1.64203415e+002, 3.86741211e+002,
1.26606262e+002, 3.86337311e+002, 8.99655075e+001,
4.24534088e+002, 3.92702545e+002, 4.25310822e+002,
3.55900452e+002, 4.25869019e+002, 3.18160614e+002,
4.25909790e+002, 2.79615753e+002, 4.25977295e+002,
2.41165100e+002, 4.25826477e+002, 2.02876389e+002,
4.25331665e+002, 1.64527618e+002, 4.24775787e+002,
1.27097328e+002, 4.23985138e+002, 9.08176651e+001,
1.79142670e+002, 1.58573654e+002, 2.12791580e+002,
1.56291031e+002, 2.47140106e+002, 1.54265656e+002,
2.82607300e+002, 1.52373688e+002, 3.18175507e+002,
1.50692184e+002, 3.54185852e+002, 1.49404175e+002,
3.90455200e+002, 1.48229370e+002, 4.26106689e+002,
1.47507843e+002, 4.61576141e+002, 1.46712479e+002,
1.80388336e+002, 1.93027603e+002, 2.14026459e+002,
1.91128204e+002, 2.48376541e+002, 1.89414978e+002,
2.83795807e+002, 1.87720856e+002, 3.19472473e+002,
1.86192383e+002, 3.55483826e+002, 1.84929199e+002,
3.91970764e+002, 1.83747040e+002, 4.27654572e+002,
1.82931534e+002, 4.63295227e+002, 1.81977234e+002,
1.81914261e+002, 2.27955460e+002, 2.15291260e+002,
2.26512482e+002, 2.49628265e+002, 2.25067520e+002,
2.85066406e+002, 2.23593185e+002, 3.20846680e+002,
2.22337708e+002, 3.56862885e+002, 2.21191040e+002,
3.93279907e+002, 2.19905640e+002, 4.29202271e+002,
2.18870361e+002, 4.64728424e+002, 2.17972977e+002,
1.83496948e+002, 2.62963226e+002, 2.16930527e+002,
2.61755219e+002, 2.51115829e+002, 2.60777222e+002,
2.86553406e+002, 2.59500336e+002, 3.22299896e+002,
2.58380737e+002, 3.58307648e+002, 2.57236694e+002,
3.94551819e+002, 2.56009125e+002, 4.30358948e+002,
2.54925797e+002, 4.65684998e+002, 2.54021484e+002,
1.85461685e+002, 2.97687378e+002, 2.18712234e+002,
2.96999207e+002, 2.52770218e+002, 2.96270752e+002,
2.88213776e+002, 2.95168213e+002, 3.23698334e+002,
2.94233032e+002, 3.59477722e+002, 2.93170715e+002,
3.95647766e+002, 2.91897400e+002, 4.31309845e+002,
2.90856995e+002, 4.66494110e+002, 2.89726410e+002,
1.87661331e+002, 3.32186188e+002, 2.20767746e+002,
3.31906250e+002, 2.54839096e+002, 3.31398651e+002,
2.89963745e+002, 3.30524139e+002, 3.25207642e+002,
3.29771820e+002, 3.60686035e+002, 3.28762695e+002,
3.96576447e+002, 3.27542206e+002, 4.31994415e+002,
3.26294189e+002, 4.66894653e+002, 3.24949921e+002,
2.03543015e+002, 1.77473557e+002, 2.32777847e+002,
1.74712509e+002, 2.62628723e+002, 1.72331970e+002,
2.93045898e+002, 1.69686768e+002, 3.23527618e+002,
1.67496246e+002, 3.54206787e+002, 1.65446075e+002,
3.85180176e+002, 1.63360580e+002, 4.15484253e+002,
1.61536423e+002, 4.45720947e+002, 1.59896164e+002,
2.05864395e+002, 2.07228104e+002, 2.35242096e+002,
2.04699326e+002, 2.64853973e+002, 2.02407455e+002,
2.95353882e+002, 1.99972321e+002, 3.25811890e+002,
1.97671921e+002, 3.56471252e+002, 1.95763168e+002,
3.87280548e+002, 1.93597977e+002, 4.17615814e+002,
1.91867371e+002, 4.48018677e+002, 1.90067413e+002,
2.08421249e+002, 2.37166977e+002, 2.37513824e+002,
2.34982773e+002, 2.67261261e+002, 2.32802841e+002,
2.97555817e+002, 2.30466080e+002, 3.28118103e+002,
2.28462463e+002, 3.58699707e+002, 2.26417038e+002,
3.89468842e+002, 2.24356827e+002, 4.19895996e+002,
2.22421921e+002, 4.50077850e+002, 2.20683517e+002,
2.11095444e+002, 2.66940186e+002, 2.40241348e+002,
2.64970093e+002, 2.69563019e+002, 2.63153290e+002,
2.99863464e+002, 2.60983551e+002, 3.30282440e+002,
2.58911560e+002, 3.60724792e+002, 2.56935730e+002,
3.91487915e+002, 2.54799423e+002, 4.21789093e+002,
2.52929688e+002, 4.51818481e+002, 2.51059357e+002,
2.13829117e+002, 2.96591217e+002, 2.42742859e+002,
2.94884583e+002, 2.72209076e+002, 2.93215668e+002,
3.02402985e+002, 2.91230591e+002, 3.32536072e+002,
2.89165192e+002, 3.62860901e+002, 2.87413605e+002,
3.93481842e+002, 2.85199615e+002, 4.23728851e+002,
2.83277496e+002, 4.53453094e+002, 2.81229309e+002,
2.16799316e+002, 3.25975220e+002, 2.45605515e+002,
3.24619904e+002, 2.74777344e+002, 3.22958679e+002,
3.04762817e+002, 3.21008057e+002, 3.34797150e+002,
3.19291443e+002, 3.65005798e+002, 3.17295044e+002,
3.95311981e+002, 3.15296021e+002, 4.25312592e+002,
3.13086945e+002, 4.54931152e+002, 3.11027130e+002,
2.60550232e+002, 3.70739563e+002, 2.59674011e+002,
3.42115936e+002, 2.58910492e+002, 3.13278015e+002,
2.58195618e+002, 2.84013580e+002, 2.57727173e+002,
2.55017166e+002, 2.57326263e+002, 2.25760986e+002,
2.57096619e+002, 1.96577972e+002, 2.57031860e+002,
1.68026199e+002, 2.56873383e+002, 1.39550308e+002,
2.89019318e+002, 3.70481354e+002, 2.88355560e+002,
3.41833252e+002, 2.87601471e+002, 3.12872925e+002,
2.87057190e+002, 2.83485535e+002, 2.86599762e+002,
2.54255096e+002, 2.86174438e+002, 2.25023285e+002,
2.85775940e+002, 1.95715347e+002, 2.85577087e+002,
1.66989502e+002, 2.85395477e+002, 1.38597382e+002,
3.18072754e+002, 3.70118317e+002, 3.17432709e+002,
3.41398743e+002, 3.16917267e+002, 3.12476044e+002,
3.16284363e+002, 2.83001587e+002, 3.15799072e+002,
2.53725845e+002, 3.15411957e+002, 2.24337708e+002,
3.15070374e+002, 1.95034119e+002, 3.14736847e+002,
1.66195313e+002, 3.14439789e+002, 1.37797058e+002,
3.47083588e+002, 3.69678101e+002, 3.46717987e+002,
3.40949524e+002, 3.46185303e+002, 3.12009857e+002,
3.45728088e+002, 2.82454071e+002, 3.45226624e+002,
2.53109863e+002, 3.44883606e+002, 2.23839539e+002,
3.44373535e+002, 1.94399933e+002, 3.43879852e+002,
1.65690643e+002, 3.43438629e+002, 1.37252930e+002,
3.76341522e+002, 3.68972321e+002, 3.76086884e+002,
3.40412842e+002, 3.75708893e+002, 3.11398376e+002,
3.75143494e+002, 2.81901520e+002, 3.74762970e+002,
2.52577988e+002, 3.74223969e+002, 2.23348221e+002,
3.73600891e+002, 1.93979538e+002, 3.72983917e+002,
1.65201294e+002, 3.72517273e+002, 1.36871033e+002,
4.05512115e+002, 3.68243225e+002, 4.05366333e+002,
3.39678650e+002, 4.05090027e+002, 3.10679108e+002,
4.04612366e+002, 2.81203522e+002, 4.04152649e+002,
2.52051605e+002, 4.03539703e+002, 2.22930420e+002,
4.02903351e+002, 1.93625381e+002, 4.02272827e+002,
1.65004440e+002, 4.01353333e+002, 1.36796814e+002 ]

View File

@ -0,0 +1,21 @@
set(sample_dir ${CMAKE_CURRENT_SOURCE_DIR}/tutorial_code/calib3d/real_time_pose_estimation/src/)
set(target example_tutorial_)
set(sample_pnplib
${sample_dir}CsvReader.cpp
${sample_dir}CsvWriter.cpp
${sample_dir}ModelRegistration.cpp
${sample_dir}Mesh.cpp
${sample_dir}Model.cpp
${sample_dir}PnPProblem.cpp
${sample_dir}Utils.cpp
${sample_dir}RobustMatcher.cpp
)
ocv_include_modules_recurse(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
add_executable( ${target}pnp_registration ${sample_dir}main_registration.cpp ${sample_pnplib} )
add_executable( ${target}pnp_detection ${sample_dir}main_detection.cpp ${sample_pnplib} )
ocv_target_link_libraries(${target}pnp_registration PRIVATE ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
ocv_target_link_libraries(${target}pnp_detection PRIVATE ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})

View File

@ -0,0 +1,31 @@
ply
format ascii 1.0
comment made by anonymous
comment this file is a cube
element vertex 8
property float32 x
property float32 y
property float32 z
element face 12
property list uint8 int32 vertex_index
end_header
0 0 0
0 25.8 0
18.9 0 0
18.9 25.8 0
0 0 7.5
0 25.8 7.5
18.9 0 7.5
18.9 25.8 7.5
3 5 1 0
3 5 4 0
3 4 0 2
3 4 6 2
3 7 5 4
3 7 6 4
3 3 2 1
3 1 2 0
3 5 7 1
3 7 1 3
3 7 6 3
3 6 3 2

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

View File

@ -0,0 +1,79 @@
#include "CsvReader.h"
/** The default constructor of the CSV reader Class */
CsvReader::CsvReader(const string &path, char separator){
_file.open(path.c_str(), ifstream::in);
_separator = separator;
}
/* Read a plane text file with .ply format */
void CsvReader::readPLY(vector<Point3f> &list_vertex, vector<vector<int> > &list_triangles)
{
std::string line, tmp_str, n;
int num_vertex = 0, num_triangles = 0;
int count = 0;
bool end_header = false;
bool end_vertex = false;
// Read the whole *.ply file
while (getline(_file, line)) {
stringstream liness(line);
// read header
if(!end_header)
{
getline(liness, tmp_str, _separator);
if( tmp_str == "element" )
{
getline(liness, tmp_str, _separator);
getline(liness, n);
if(tmp_str == "vertex") num_vertex = StringToInt(n);
if(tmp_str == "face") num_triangles = StringToInt(n);
}
if(tmp_str == "end_header") end_header = true;
}
// read file content
else if(end_header)
{
// read vertex and add into 'list_vertex'
if(!end_vertex && count < num_vertex)
{
string x, y, z;
getline(liness, x, _separator);
getline(liness, y, _separator);
getline(liness, z);
cv::Point3f tmp_p;
tmp_p.x = (float)StringToInt(x);
tmp_p.y = (float)StringToInt(y);
tmp_p.z = (float)StringToInt(z);
list_vertex.push_back(tmp_p);
count++;
if(count == num_vertex)
{
count = 0;
end_vertex = !end_vertex;
}
}
// read faces and add into 'list_triangles'
else if(end_vertex && count < num_triangles)
{
string num_pts_per_face, id0, id1, id2;
getline(liness, num_pts_per_face, _separator);
getline(liness, id0, _separator);
getline(liness, id1, _separator);
getline(liness, id2);
std::vector<int> tmp_triangle(3);
tmp_triangle[0] = StringToInt(id0);
tmp_triangle[1] = StringToInt(id1);
tmp_triangle[2] = StringToInt(id2);
list_triangles.push_back(tmp_triangle);
count++;
}
}
}
}

View File

@ -0,0 +1,40 @@
#ifndef CSVREADER_H
#define CSVREADER_H
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include "Utils.h"
using namespace std;
using namespace cv;
class CsvReader {
public:
/**
* The default constructor of the CSV reader Class.
* The default separator is ' ' (empty space)
*
* @param path - The path of the file to read
* @param separator - The separator character between words per line
* @return
*/
CsvReader(const string &path, char separator = ' ');
/**
* Read a plane text file with .ply format
*
* @param list_vertex - The container of the vertices list of the mesh
* @param list_triangle - The container of the triangles list of the mesh
* @return
*/
void readPLY(vector<Point3f> &list_vertex, vector<vector<int> > &list_triangles);
private:
/** The current stream file for the reader */
ifstream _file;
/** The separator character between words for each line */
char _separator;
};
#endif

View File

@ -0,0 +1,45 @@
#include "CsvWriter.h"
CsvWriter::CsvWriter(const string &path, const string &separator){
_file.open(path.c_str(), ofstream::out);
_isFirstTerm = true;
_separator = separator;
}
CsvWriter::~CsvWriter() {
_file.flush();
_file.close();
}
void CsvWriter::writeXYZ(const vector<Point3f> &list_points3d)
{
for(size_t i = 0; i < list_points3d.size(); ++i)
{
string x = FloatToString(list_points3d[i].x);
string y = FloatToString(list_points3d[i].y);
string z = FloatToString(list_points3d[i].z);
_file << x << _separator << y << _separator << z << std::endl;
}
}
void CsvWriter::writeUVXYZ(const vector<Point3f> &list_points3d, const vector<Point2f> &list_points2d, const Mat &descriptors)
{
for(size_t i = 0; i < list_points3d.size(); ++i)
{
string u = FloatToString(list_points2d[i].x);
string v = FloatToString(list_points2d[i].y);
string x = FloatToString(list_points3d[i].x);
string y = FloatToString(list_points3d[i].y);
string z = FloatToString(list_points3d[i].z);
_file << u << _separator << v << _separator << x << _separator << y << _separator << z;
for(int j = 0; j < 32; ++j)
{
string descriptor_str = FloatToString(descriptors.at<float>((int)i,j));
_file << _separator << descriptor_str;
}
_file << std::endl;
}
}

View File

@ -0,0 +1,25 @@
#ifndef CSVWRITER_H
#define CSVWRITER_H
#include <iostream>
#include <fstream>
#include <opencv2/core.hpp>
#include "Utils.h"
using namespace std;
using namespace cv;
class CsvWriter {
public:
CsvWriter(const string &path, const string &separator = " ");
~CsvWriter();
void writeXYZ(const vector<Point3f> &list_points3d);
void writeUVXYZ(const vector<Point3f> &list_points3d, const vector<Point2f> &list_points2d, const Mat &descriptors);
private:
ofstream _file;
string _separator;
bool _isFirstTerm;
};
#endif

View File

@ -0,0 +1,78 @@
/*
* Mesh.cpp
*
* Created on: Apr 9, 2014
* Author: edgar
*/
#include "Mesh.h"
#include "CsvReader.h"
// --------------------------------------------------- //
// TRIANGLE CLASS //
// --------------------------------------------------- //
/** The custom constructor of the Triangle Class */
Triangle::Triangle(const cv::Point3f& V0, const cv::Point3f& V1, const cv::Point3f& V2) :
v0_(V0), v1_(V1), v2_(V2)
{
}
/** The default destructor of the Class */
Triangle::~Triangle()
{
// TODO Auto-generated destructor stub
}
// --------------------------------------------------- //
// RAY CLASS //
// --------------------------------------------------- //
/** The custom constructor of the Ray Class */
Ray::Ray(const cv::Point3f& P0, const cv::Point3f& P1) :
p0_(P0), p1_(P1)
{
}
/** The default destructor of the Class */
Ray::~Ray()
{
// TODO Auto-generated destructor stub
}
// --------------------------------------------------- //
// OBJECT MESH CLASS //
// --------------------------------------------------- //
/** The default constructor of the ObjectMesh Class */
Mesh::Mesh() : num_vertices_(0), num_triangles_(0),
list_vertex_(0) , list_triangles_(0)
{
}
/** The default destructor of the ObjectMesh Class */
Mesh::~Mesh()
{
// TODO Auto-generated destructor stub
}
/** Load a CSV with *.ply format **/
void Mesh::load(const std::string& path)
{
// Create the reader
CsvReader csvReader(path);
// Clear previous data
list_vertex_.clear();
list_triangles_.clear();
// Read from .ply file
csvReader.readPLY(list_vertex_, list_triangles_);
// Update mesh attributes
num_vertices_ = (int)list_vertex_.size();
num_triangles_ = (int)list_triangles_.size();
}

View File

@ -0,0 +1,82 @@
/*
* Mesh.h
*
* Created on: Apr 9, 2014
* Author: edgar
*/
#ifndef MESH_H_
#define MESH_H_
#include <iostream>
#include <opencv2/core/core.hpp>
// --------------------------------------------------- //
// TRIANGLE CLASS //
// --------------------------------------------------- //
class Triangle {
public:
explicit Triangle(const cv::Point3f& V0, const cv::Point3f& V1, const cv::Point3f& V2);
virtual ~Triangle();
cv::Point3f getV0() const { return v0_; }
cv::Point3f getV1() const { return v1_; }
cv::Point3f getV2() const { return v2_; }
private:
/** The three vertices that defines the triangle */
cv::Point3f v0_, v1_, v2_;
};
// --------------------------------------------------- //
// RAY CLASS //
// --------------------------------------------------- //
class Ray {
public:
explicit Ray(const cv::Point3f& P0, const cv::Point3f& P1);
virtual ~Ray();
cv::Point3f getP0() { return p0_; }
cv::Point3f getP1() { return p1_; }
private:
/** The two points that defines the ray */
cv::Point3f p0_, p1_;
};
// --------------------------------------------------- //
// OBJECT MESH CLASS //
// --------------------------------------------------- //
class Mesh
{
public:
Mesh();
virtual ~Mesh();
std::vector<std::vector<int> > getTrianglesList() const { return list_triangles_; }
cv::Point3f getVertex(int pos) const { return list_vertex_[pos]; }
int getNumVertices() const { return num_vertices_; }
void load(const std::string& path_file);
private:
/** The current number of vertices in the mesh */
int num_vertices_;
/** The current number of triangles in the mesh */
int num_triangles_;
/* The list of triangles of the mesh */
std::vector<cv::Point3f> list_vertex_;
/* The list of triangles of the mesh */
std::vector<std::vector<int> > list_triangles_;
};
#endif /* OBJECTMESH_H_ */

View File

@ -0,0 +1,83 @@
/*
* Model.cpp
*
* Created on: Apr 9, 2014
* Author: edgar
*/
#include "Model.h"
#include "CsvWriter.h"
Model::Model() : n_correspondences_(0), list_points2d_in_(0), list_points2d_out_(0), list_points3d_in_(0), training_img_path_()
{
}
Model::~Model()
{
// TODO Auto-generated destructor stub
}
void Model::add_correspondence(const cv::Point2f &point2d, const cv::Point3f &point3d)
{
list_points2d_in_.push_back(point2d);
list_points3d_in_.push_back(point3d);
n_correspondences_++;
}
void Model::add_outlier(const cv::Point2f &point2d)
{
list_points2d_out_.push_back(point2d);
}
void Model::add_descriptor(const cv::Mat &descriptor)
{
descriptors_.push_back(descriptor);
}
void Model::add_keypoint(const cv::KeyPoint &kp)
{
list_keypoints_.push_back(kp);
}
void Model::set_trainingImagePath(const std::string &path)
{
training_img_path_ = path;
}
/** Save a YAML file and fill the object mesh */
void Model::save(const std::string &path)
{
cv::Mat points3dmatrix = cv::Mat(list_points3d_in_);
cv::Mat points2dmatrix = cv::Mat(list_points2d_in_);
cv::FileStorage storage(path, cv::FileStorage::WRITE);
storage << "points_3d" << points3dmatrix;
storage << "points_2d" << points2dmatrix;
storage << "keypoints" << list_keypoints_;
storage << "descriptors" << descriptors_;
storage << "training_image_path" << training_img_path_;
storage.release();
}
/** Load a YAML file using OpenCv functions **/
void Model::load(const std::string &path)
{
cv::Mat points3d_mat;
cv::FileStorage storage(path, cv::FileStorage::READ);
storage["points_3d"] >> points3d_mat;
storage["descriptors"] >> descriptors_;
if (!storage["keypoints"].empty())
{
storage["keypoints"] >> list_keypoints_;
}
if (!storage["training_image_path"].empty())
{
storage["training_image_path"] >> training_img_path_;
}
points3d_mat.copyTo(list_points3d_in_);
storage.release();
}

View File

@ -0,0 +1,55 @@
/*
* Model.h
*
* Created on: Apr 9, 2014
* Author: edgar
*/
#ifndef MODEL_H_
#define MODEL_H_
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
class Model
{
public:
Model();
virtual ~Model();
std::vector<cv::Point2f> get_points2d_in() const { return list_points2d_in_; }
std::vector<cv::Point2f> get_points2d_out() const { return list_points2d_out_; }
std::vector<cv::Point3f> get_points3d() const { return list_points3d_in_; }
std::vector<cv::KeyPoint> get_keypoints() const { return list_keypoints_; }
cv::Mat get_descriptors() const { return descriptors_; }
int get_numDescriptors() const { return descriptors_.rows; }
std::string get_trainingImagePath() const { return training_img_path_; }
void add_correspondence(const cv::Point2f &point2d, const cv::Point3f &point3d);
void add_outlier(const cv::Point2f &point2d);
void add_descriptor(const cv::Mat &descriptor);
void add_keypoint(const cv::KeyPoint &kp);
void set_trainingImagePath(const std::string &path);
void save(const std::string &path);
void load(const std::string &path);
private:
/** The current number of correspondences */
int n_correspondences_;
/** The list of 2D points on the model surface */
std::vector<cv::KeyPoint> list_keypoints_;
/** The list of 2D points on the model surface */
std::vector<cv::Point2f> list_points2d_in_;
/** The list of 2D points outside the model surface */
std::vector<cv::Point2f> list_points2d_out_;
/** The list of 3D points on the model surface */
std::vector<cv::Point3f> list_points3d_in_;
/** The list of 2D points descriptors */
cv::Mat descriptors_;
/** Path to the training image */
std::string training_img_path_;
};
#endif /* OBJECTMODEL_H_ */

View File

@ -0,0 +1,34 @@
/*
* ModelRegistration.cpp
*
* Created on: Apr 18, 2014
* Author: edgar
*/
#include "ModelRegistration.h"
ModelRegistration::ModelRegistration() : n_registrations_(0), max_registrations_(0),
list_points2d_(), list_points3d_()
{
}
ModelRegistration::~ModelRegistration()
{
// TODO Auto-generated destructor stub
}
void ModelRegistration::registerPoint(const cv::Point2f &point2d, const cv::Point3f &point3d)
{
// add correspondence at the end of the vector
list_points2d_.push_back(point2d);
list_points3d_.push_back(point3d);
n_registrations_++;
}
void ModelRegistration::reset()
{
n_registrations_ = 0;
max_registrations_ = 0;
list_points2d_.clear();
list_points3d_.clear();
}

View File

@ -0,0 +1,42 @@
/*
* ModelRegistration.h
*
* Created on: Apr 18, 2014
* Author: edgar
*/
#ifndef MODELREGISTRATION_H_
#define MODELREGISTRATION_H_
#include <iostream>
#include <opencv2/core.hpp>
class ModelRegistration
{
public:
ModelRegistration();
virtual ~ModelRegistration();
void setNumMax(int n) { max_registrations_ = n; }
std::vector<cv::Point2f> get_points2d() const { return list_points2d_; }
std::vector<cv::Point3f> get_points3d() const { return list_points3d_; }
int getNumMax() const { return max_registrations_; }
int getNumRegist() const { return n_registrations_; }
bool is_registrable() const { return (n_registrations_ < max_registrations_); }
void registerPoint(const cv::Point2f &point2d, const cv::Point3f &point3d);
void reset();
private:
/** The current number of registered points */
int n_registrations_;
/** The total number of points to register */
int max_registrations_;
/** The list of 2D points to register the model */
std::vector<cv::Point2f> list_points2d_;
/** The list of 3D points to register the model */
std::vector<cv::Point3f> list_points3d_;
};
#endif /* MODELREGISTRATION_H_ */

View File

@ -0,0 +1,307 @@
/*
* PnPProblem.cpp
*
* Created on: Mar 28, 2014
* Author: Edgar Riba
*/
#include <iostream>
#include <sstream>
#include "PnPProblem.h"
#include "Mesh.h"
#include <opencv2/calib3d/calib3d.hpp>
/* Functions for Möller-Trumbore intersection algorithm */
static cv::Point3f CROSS(cv::Point3f v1, cv::Point3f v2)
{
cv::Point3f tmp_p;
tmp_p.x = v1.y*v2.z - v1.z*v2.y;
tmp_p.y = v1.z*v2.x - v1.x*v2.z;
tmp_p.z = v1.x*v2.y - v1.y*v2.x;
return tmp_p;
}
static double DOT(cv::Point3f v1, cv::Point3f v2)
{
return v1.x*v2.x + v1.y*v2.y + v1.z*v2.z;
}
static cv::Point3f SUB(cv::Point3f v1, cv::Point3f v2)
{
cv::Point3f tmp_p;
tmp_p.x = v1.x - v2.x;
tmp_p.y = v1.y - v2.y;
tmp_p.z = v1.z - v2.z;
return tmp_p;
}
/* End functions for Möller-Trumbore intersection algorithm */
// Function to get the nearest 3D point to the Ray origin
static cv::Point3f get_nearest_3D_point(std::vector<cv::Point3f> &points_list, cv::Point3f origin)
{
cv::Point3f p1 = points_list[0];
cv::Point3f p2 = points_list[1];
double d1 = std::sqrt( std::pow(p1.x-origin.x, 2) + std::pow(p1.y-origin.y, 2) + std::pow(p1.z-origin.z, 2) );
double d2 = std::sqrt( std::pow(p2.x-origin.x, 2) + std::pow(p2.y-origin.y, 2) + std::pow(p2.z-origin.z, 2) );
if(d1 < d2)
{
return p1;
}
else
{
return p2;
}
}
// Custom constructor given the intrinsic camera parameters
PnPProblem::PnPProblem(const double params[])
{
A_matrix_ = cv::Mat::zeros(3, 3, CV_64FC1); // intrinsic camera parameters
A_matrix_.at<double>(0, 0) = params[0]; // [ fx 0 cx ]
A_matrix_.at<double>(1, 1) = params[1]; // [ 0 fy cy ]
A_matrix_.at<double>(0, 2) = params[2]; // [ 0 0 1 ]
A_matrix_.at<double>(1, 2) = params[3];
A_matrix_.at<double>(2, 2) = 1;
R_matrix_ = cv::Mat::zeros(3, 3, CV_64FC1); // rotation matrix
t_matrix_ = cv::Mat::zeros(3, 1, CV_64FC1); // translation matrix
P_matrix_ = cv::Mat::zeros(3, 4, CV_64FC1); // rotation-translation matrix
}
PnPProblem::~PnPProblem()
{
// TODO Auto-generated destructor stub
}
void PnPProblem::set_P_matrix( const cv::Mat &R_matrix, const cv::Mat &t_matrix)
{
// Rotation-Translation Matrix Definition
P_matrix_.at<double>(0,0) = R_matrix.at<double>(0,0);
P_matrix_.at<double>(0,1) = R_matrix.at<double>(0,1);
P_matrix_.at<double>(0,2) = R_matrix.at<double>(0,2);
P_matrix_.at<double>(1,0) = R_matrix.at<double>(1,0);
P_matrix_.at<double>(1,1) = R_matrix.at<double>(1,1);
P_matrix_.at<double>(1,2) = R_matrix.at<double>(1,2);
P_matrix_.at<double>(2,0) = R_matrix.at<double>(2,0);
P_matrix_.at<double>(2,1) = R_matrix.at<double>(2,1);
P_matrix_.at<double>(2,2) = R_matrix.at<double>(2,2);
P_matrix_.at<double>(0,3) = t_matrix.at<double>(0);
P_matrix_.at<double>(1,3) = t_matrix.at<double>(1);
P_matrix_.at<double>(2,3) = t_matrix.at<double>(2);
}
// Estimate the pose given a list of 2D/3D correspondences and the method to use
bool PnPProblem::estimatePose( const std::vector<cv::Point3f> &list_points3d,
const std::vector<cv::Point2f> &list_points2d,
int flags)
{
cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);
bool useExtrinsicGuess = false;
// Pose estimation
bool correspondence = cv::solvePnP( list_points3d, list_points2d, A_matrix_, distCoeffs, rvec, tvec,
useExtrinsicGuess, flags);
// Transforms Rotation Vector to Matrix
Rodrigues(rvec, R_matrix_);
t_matrix_ = tvec;
// Set projection matrix
this->set_P_matrix(R_matrix_, t_matrix_);
return correspondence;
}
// Estimate the pose given a list of 2D/3D correspondences with RANSAC and the method to use
void PnPProblem::estimatePoseRANSAC( const std::vector<cv::Point3f> &list_points3d, // list with model 3D coordinates
const std::vector<cv::Point2f> &list_points2d, // list with scene 2D coordinates
int flags, cv::Mat &inliers, int iterationsCount, // PnP method; inliers container
float reprojectionError, double confidence ) // Ransac parameters
{
cv::Mat distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1); // vector of distortion coefficients
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1); // output rotation vector
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1); // output translation vector
bool useExtrinsicGuess = false; // if true the function uses the provided rvec and tvec values as
// initial approximations of the rotation and translation vectors
cv::solvePnPRansac( list_points3d, list_points2d, A_matrix_, distCoeffs, rvec, tvec,
useExtrinsicGuess, iterationsCount, reprojectionError, confidence,
inliers, flags );
Rodrigues(rvec, R_matrix_); // converts Rotation Vector to Matrix
t_matrix_ = tvec; // set translation matrix
this->set_P_matrix(R_matrix_, t_matrix_); // set rotation-translation matrix
}
// Given the mesh, backproject the 3D points to 2D to verify the pose estimation
std::vector<cv::Point2f> PnPProblem::verify_points(Mesh *mesh)
{
std::vector<cv::Point2f> verified_points_2d;
for( int i = 0; i < mesh->getNumVertices(); i++)
{
cv::Point3f point3d = mesh->getVertex(i);
cv::Point2f point2d = this->backproject3DPoint(point3d);
verified_points_2d.push_back(point2d);
}
return verified_points_2d;
}
// Backproject a 3D point to 2D using the estimated pose parameters
cv::Point2f PnPProblem::backproject3DPoint(const cv::Point3f &point3d)
{
// 3D point vector [x y z 1]'
cv::Mat point3d_vec = cv::Mat(4, 1, CV_64FC1);
point3d_vec.at<double>(0) = point3d.x;
point3d_vec.at<double>(1) = point3d.y;
point3d_vec.at<double>(2) = point3d.z;
point3d_vec.at<double>(3) = 1;
// 2D point vector [u v 1]'
cv::Mat point2d_vec = cv::Mat(4, 1, CV_64FC1);
point2d_vec = A_matrix_ * P_matrix_ * point3d_vec;
// Normalization of [u v]'
cv::Point2f point2d;
point2d.x = (float)(point2d_vec.at<double>(0) / point2d_vec.at<double>(2));
point2d.y = (float)(point2d_vec.at<double>(1) / point2d_vec.at<double>(2));
return point2d;
}
// Back project a 2D point to 3D and returns if it's on the object surface
bool PnPProblem::backproject2DPoint(const Mesh *mesh, const cv::Point2f &point2d, cv::Point3f &point3d)
{
// Triangles list of the object mesh
std::vector<std::vector<int> > triangles_list = mesh->getTrianglesList();
double lambda = 8;
double u = point2d.x;
double v = point2d.y;
// Point in vector form
cv::Mat point2d_vec = cv::Mat::ones(3, 1, CV_64F); // 3x1
point2d_vec.at<double>(0) = u * lambda;
point2d_vec.at<double>(1) = v * lambda;
point2d_vec.at<double>(2) = lambda;
// Point in camera coordinates
cv::Mat X_c = A_matrix_.inv() * point2d_vec ; // 3x1
// Point in world coordinates
cv::Mat X_w = R_matrix_.inv() * ( X_c - t_matrix_ ); // 3x1
// Center of projection
cv::Mat C_op = cv::Mat(R_matrix_.inv()).mul(-1) * t_matrix_; // 3x1
// Ray direction vector
cv::Mat ray = X_w - C_op; // 3x1
ray = ray / cv::norm(ray); // 3x1
// Set up Ray
Ray R((cv::Point3f)C_op, (cv::Point3f)ray);
// A vector to store the intersections found
std::vector<cv::Point3f> intersections_list;
// Loop for all the triangles and check the intersection
for (unsigned int i = 0; i < triangles_list.size(); i++)
{
cv::Point3f V0 = mesh->getVertex(triangles_list[i][0]);
cv::Point3f V1 = mesh->getVertex(triangles_list[i][1]);
cv::Point3f V2 = mesh->getVertex(triangles_list[i][2]);
Triangle T(V0, V1, V2);
double out;
if(this->intersect_MollerTrumbore(R, T, &out))
{
cv::Point3f tmp_pt = R.getP0() + out*R.getP1(); // P = O + t*D
intersections_list.push_back(tmp_pt);
}
}
// If there are intersection, find the nearest one
if (!intersections_list.empty())
{
point3d = get_nearest_3D_point(intersections_list, R.getP0());
return true;
}
else
{
return false;
}
}
// Möller-Trumbore intersection algorithm
bool PnPProblem::intersect_MollerTrumbore(Ray &Ray, Triangle &Triangle, double *out)
{
const double EPSILON = 0.000001;
cv::Point3f e1, e2;
cv::Point3f P, Q, T;
double det, inv_det, u, v;
double t;
cv::Point3f V1 = Triangle.getV0(); // Triangle vertices
cv::Point3f V2 = Triangle.getV1();
cv::Point3f V3 = Triangle.getV2();
cv::Point3f O = Ray.getP0(); // Ray origin
cv::Point3f D = Ray.getP1(); // Ray direction
//Find vectors for two edges sharing V1
e1 = SUB(V2, V1);
e2 = SUB(V3, V1);
// Begin calculation determinant - also used to calculate U parameter
P = CROSS(D, e2);
// If determinant is near zero, ray lie in plane of triangle
det = DOT(e1, P);
//NOT CULLING
if(det > -EPSILON && det < EPSILON) return false;
inv_det = 1.f / det;
//calculate distance from V1 to ray origin
T = SUB(O, V1);
//Calculate u parameter and test bound
u = DOT(T, P) * inv_det;
//The intersection lies outside of the triangle
if(u < 0.f || u > 1.f) return false;
//Prepare to test v parameter
Q = CROSS(T, e1);
//Calculate V parameter and test bound
v = DOT(D, Q) * inv_det;
//The intersection lies outside of the triangle
if(v < 0.f || u + v > 1.f) return false;
t = DOT(e2, Q) * inv_det;
if(t > EPSILON) { //ray intersection
*out = t;
return true;
}
// No hit, no win
return false;
}

View File

@ -0,0 +1,52 @@
/*
* PnPProblem.h
*
* Created on: Mar 28, 2014
* Author: Edgar Riba
*/
#ifndef PNPPROBLEM_H_
#define PNPPROBLEM_H_
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "Mesh.h"
#include "ModelRegistration.h"
class PnPProblem
{
public:
explicit PnPProblem(const double param[]); // custom constructor
virtual ~PnPProblem();
bool backproject2DPoint(const Mesh *mesh, const cv::Point2f &point2d, cv::Point3f &point3d);
bool intersect_MollerTrumbore(Ray &R, Triangle &T, double *out);
std::vector<cv::Point2f> verify_points(Mesh *mesh);
cv::Point2f backproject3DPoint(const cv::Point3f &point3d);
bool estimatePose(const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d, int flags);
void estimatePoseRANSAC( const std::vector<cv::Point3f> &list_points3d, const std::vector<cv::Point2f> &list_points2d,
int flags, cv::Mat &inliers,
int iterationsCount, float reprojectionError, double confidence );
cv::Mat get_A_matrix() const { return A_matrix_; }
cv::Mat get_R_matrix() const { return R_matrix_; }
cv::Mat get_t_matrix() const { return t_matrix_; }
cv::Mat get_P_matrix() const { return P_matrix_; }
void set_P_matrix( const cv::Mat &R_matrix, const cv::Mat &t_matrix);
private:
/** The calibration matrix */
cv::Mat A_matrix_;
/** The computed rotation matrix */
cv::Mat R_matrix_;
/** The computed translation matrix */
cv::Mat t_matrix_;
/** The computed projection matrix */
cv::Mat P_matrix_;
};
#endif /* PNPPROBLEM_H_ */

View File

@ -0,0 +1,154 @@
/*
* RobustMatcher.cpp
*
* Created on: Jun 4, 2014
* Author: eriba
*/
#include "RobustMatcher.h"
#include <time.h>
#include <opencv2/features2d/features2d.hpp>
RobustMatcher::~RobustMatcher()
{
// TODO Auto-generated destructor stub
}
void RobustMatcher::computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints)
{
detector_->detect(image, keypoints);
}
void RobustMatcher::computeDescriptors( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors)
{
extractor_->compute(image, keypoints, descriptors);
}
int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> > &matches)
{
int removed = 0;
// for all matches
for ( std::vector<std::vector<cv::DMatch> >::iterator
matchIterator= matches.begin(); matchIterator!= matches.end(); ++matchIterator)
{
// if 2 NN has been identified
if (matchIterator->size() > 1)
{
// check distance ratio
if ((*matchIterator)[0].distance / (*matchIterator)[1].distance > ratio_)
{
matchIterator->clear(); // remove match
removed++;
}
}
else
{ // does not have 2 neighbours
matchIterator->clear(); // remove match
removed++;
}
}
return removed;
}
void RobustMatcher::symmetryTest( const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& symMatches )
{
// for all matches image 1 -> image 2
for (std::vector<std::vector<cv::DMatch> >::const_iterator
matchIterator1 = matches1.begin(); matchIterator1 != matches1.end(); ++matchIterator1)
{
// ignore deleted matches
if (matchIterator1->empty() || matchIterator1->size() < 2)
continue;
// for all matches image 2 -> image 1
for (std::vector<std::vector<cv::DMatch> >::const_iterator
matchIterator2 = matches2.begin(); matchIterator2 != matches2.end(); ++matchIterator2)
{
// ignore deleted matches
if (matchIterator2->empty() || matchIterator2->size() < 2)
continue;
// Match symmetry test
if ((*matchIterator1)[0].queryIdx == (*matchIterator2)[0].trainIdx &&
(*matchIterator2)[0].queryIdx == (*matchIterator1)[0].trainIdx)
{
// add symmetrical match
symMatches.push_back(cv::DMatch((*matchIterator1)[0].queryIdx,
(*matchIterator1)[0].trainIdx, (*matchIterator1)[0].distance));
break; // next match in image 1 -> image 2
}
}
}
}
void RobustMatcher::robustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
std::vector<cv::KeyPoint>& keypoints_frame, const cv::Mat& descriptors_model,
const std::vector<cv::KeyPoint>& keypoints_model)
{
// 1a. Detection of the ORB features
this->computeKeyPoints(frame, keypoints_frame);
// 1b. Extraction of the ORB descriptors
cv::Mat descriptors_frame;
this->computeDescriptors(frame, keypoints_frame, descriptors_frame);
// 2. Match the two image descriptors
std::vector<std::vector<cv::DMatch> > matches12, matches21;
// 2a. From image 1 to image 2
matcher_->knnMatch(descriptors_frame, descriptors_model, matches12, 2); // return 2 nearest neighbours
// 2b. From image 2 to image 1
matcher_->knnMatch(descriptors_model, descriptors_frame, matches21, 2); // return 2 nearest neighbours
// 3. Remove matches for which NN ratio is > than threshold
// clean image 1 -> image 2 matches
ratioTest(matches12);
// clean image 2 -> image 1 matches
ratioTest(matches21);
// 4. Remove non-symmetrical matches
symmetryTest(matches12, matches21, good_matches);
if (!training_img_.empty() && !keypoints_model.empty())
{
cv::drawMatches(frame, keypoints_frame, training_img_, keypoints_model, good_matches, img_matching_);
}
}
void RobustMatcher::fastRobustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
std::vector<cv::KeyPoint>& keypoints_frame,
const cv::Mat& descriptors_model,
const std::vector<cv::KeyPoint>& keypoints_model)
{
good_matches.clear();
// 1a. Detection of the ORB features
this->computeKeyPoints(frame, keypoints_frame);
// 1b. Extraction of the ORB descriptors
cv::Mat descriptors_frame;
this->computeDescriptors(frame, keypoints_frame, descriptors_frame);
// 2. Match the two image descriptors
std::vector<std::vector<cv::DMatch> > matches;
matcher_->knnMatch(descriptors_frame, descriptors_model, matches, 2);
// 3. Remove matches for which NN ratio is > than threshold
ratioTest(matches);
// 4. Fill good matches container
for ( std::vector<std::vector<cv::DMatch> >::iterator
matchIterator= matches.begin(); matchIterator!= matches.end(); ++matchIterator)
{
if (!matchIterator->empty()) good_matches.push_back((*matchIterator)[0]);
}
if (!training_img_.empty() && !keypoints_model.empty())
{
cv::drawMatches(frame, keypoints_frame, training_img_, keypoints_model, good_matches, img_matching_);
}
}

View File

@ -0,0 +1,92 @@
/*
* RobustMatcher.h
*
* Created on: Jun 4, 2014
* Author: eriba
*/
#ifndef ROBUSTMATCHER_H_
#define ROBUSTMATCHER_H_
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
class RobustMatcher {
public:
RobustMatcher() : detector_(), extractor_(), matcher_(),
ratio_(0.8f), training_img_(), img_matching_()
{
// ORB is the default feature
detector_ = cv::ORB::create();
extractor_ = cv::ORB::create();
// BruteFroce matcher with Norm Hamming is the default matcher
matcher_ = cv::makePtr<cv::BFMatcher>((int)cv::NORM_HAMMING, false);
}
virtual ~RobustMatcher();
// Set the feature detector
void setFeatureDetector(const cv::Ptr<cv::FeatureDetector>& detect) { detector_ = detect; }
// Set the descriptor extractor
void setDescriptorExtractor(const cv::Ptr<cv::DescriptorExtractor>& desc) { extractor_ = desc; }
// Set the matcher
void setDescriptorMatcher(const cv::Ptr<cv::DescriptorMatcher>& match) { matcher_ = match; }
// Compute the keypoints of an image
void computeKeyPoints( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints);
// Compute the descriptors of an image given its keypoints
void computeDescriptors( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, cv::Mat& descriptors);
cv::Mat getImageMatching() const { return img_matching_; }
// Set ratio parameter for the ratio test
void setRatio( float rat) { ratio_ = rat; }
void setTrainingImage(const cv::Mat &img) { training_img_ = img; }
// Clear matches for which NN ratio is > than threshold
// return the number of removed points
// (corresponding entries being cleared,
// i.e. size will be 0)
int ratioTest(std::vector<std::vector<cv::DMatch> > &matches);
// Insert symmetrical matches in symMatches vector
void symmetryTest( const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& symMatches );
// Match feature points using ratio and symmetry test
void robustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
std::vector<cv::KeyPoint>& keypoints_frame,
const cv::Mat& descriptors_model,
const std::vector<cv::KeyPoint>& keypoints_model);
// Match feature points using ratio test
void fastRobustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
std::vector<cv::KeyPoint>& keypoints_frame,
const cv::Mat& descriptors_model,
const std::vector<cv::KeyPoint>& keypoints_model);
private:
// pointer to the feature point detector object
cv::Ptr<cv::FeatureDetector> detector_;
// pointer to the feature descriptor extractor object
cv::Ptr<cv::DescriptorExtractor> extractor_;
// pointer to the matcher object
cv::Ptr<cv::DescriptorMatcher> matcher_;
// max ratio between 1st and 2nd NN
float ratio_;
// training image
cv::Mat training_img_;
// matching image
cv::Mat img_matching_;
};
#endif /* ROBUSTMATCHER_H_ */

View File

@ -0,0 +1,394 @@
/*
* Utils.cpp
*
* Created on: Mar 28, 2014
* Author: Edgar Riba
*/
#include <iostream>
#include "PnPProblem.h"
#include "ModelRegistration.h"
#include "Utils.h"
#include <opencv2/opencv_modules.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/flann.hpp>
#if defined (HAVE_OPENCV_XFEATURES2D)
#include <opencv2/xfeatures2d.hpp>
#endif
// For text
const int fontFace = cv::FONT_ITALIC;
const double fontScale = 0.75;
const int thickness_font = 2;
// For circles
const int lineType = 8;
const int radius = 4;
// Draw a text with the question point
void drawQuestion(cv::Mat image, cv::Point3f point, cv::Scalar color)
{
std::string x = IntToString((int)point.x);
std::string y = IntToString((int)point.y);
std::string z = IntToString((int)point.z);
std::string text = " Where is point (" + x + "," + y + "," + z + ") ?";
cv::putText(image, text, cv::Point(25,50), fontFace, fontScale, color, thickness_font, 8);
}
// Draw a text with the number of entered points
void drawText(cv::Mat image, std::string text, cv::Scalar color)
{
cv::putText(image, text, cv::Point(25,50), fontFace, fontScale, color, thickness_font, 8);
}
// Draw a text with the number of entered points
void drawText2(cv::Mat image, std::string text, cv::Scalar color)
{
cv::putText(image, text, cv::Point(25,75), fontFace, fontScale, color, thickness_font, 8);
}
// Draw a text with the frame ratio
void drawFPS(cv::Mat image, double fps, cv::Scalar color)
{
std::string fps_str = cv::format("%.2f FPS", fps);
cv::putText(image, fps_str, cv::Point(500,50), fontFace, fontScale, color, thickness_font, 8);
}
// Draw a text with the frame ratio
void drawConfidence(cv::Mat image, double confidence, cv::Scalar color)
{
std::string conf_str = IntToString((int)confidence);
std::string text = conf_str + " %";
cv::putText(image, text, cv::Point(500,75), fontFace, fontScale, color, thickness_font, 8);
}
// Draw a text with the number of entered points
void drawCounter(cv::Mat image, int n, int n_max, cv::Scalar color)
{
std::string n_str = IntToString(n);
std::string n_max_str = IntToString(n_max);
std::string text = n_str + " of " + n_max_str + " points";
cv::putText(image, text, cv::Point(500,50), fontFace, fontScale, color, thickness_font, 8);
}
// Draw the points and the coordinates
void drawPoints(cv::Mat image, std::vector<cv::Point2f> &list_points_2d, std::vector<cv::Point3f> &list_points_3d, cv::Scalar color)
{
for (unsigned int i = 0; i < list_points_2d.size(); ++i)
{
cv::Point2f point_2d = list_points_2d[i];
cv::Point3f point_3d = list_points_3d[i];
// Draw Selected points
cv::circle(image, point_2d, radius, color, -1, lineType );
std::string idx = IntToString(i+1);
std::string x = IntToString((int)point_3d.x);
std::string y = IntToString((int)point_3d.y);
std::string z = IntToString((int)point_3d.z);
std::string text = "P" + idx + " (" + x + "," + y + "," + z +")";
point_2d.x = point_2d.x + 10;
point_2d.y = point_2d.y - 10;
cv::putText(image, text, point_2d, fontFace, fontScale*0.5, color, thickness_font, 8);
}
}
// Draw only the 2D points
void draw2DPoints(cv::Mat image, std::vector<cv::Point2f> &list_points, cv::Scalar color)
{
for( size_t i = 0; i < list_points.size(); i++)
{
cv::Point2f point_2d = list_points[i];
// Draw Selected points
cv::circle(image, point_2d, radius, color, -1, lineType );
}
}
// Draw an arrow into the image
void drawArrow(cv::Mat image, cv::Point2i p, cv::Point2i q, cv::Scalar color, int arrowMagnitude, int thickness, int line_type, int shift)
{
//Draw the principle line
cv::line(image, p, q, color, thickness, line_type, shift);
const double PI = CV_PI;
//compute the angle alpha
double angle = atan2((double)p.y-q.y, (double)p.x-q.x);
//compute the coordinates of the first segment
p.x = (int) ( q.x + arrowMagnitude * cos(angle + PI/4));
p.y = (int) ( q.y + arrowMagnitude * sin(angle + PI/4));
//Draw the first segment
cv::line(image, p, q, color, thickness, line_type, shift);
//compute the coordinates of the second segment
p.x = (int) ( q.x + arrowMagnitude * cos(angle - PI/4));
p.y = (int) ( q.y + arrowMagnitude * sin(angle - PI/4));
//Draw the second segment
cv::line(image, p, q, color, thickness, line_type, shift);
}
// Draw the 3D coordinate axes
void draw3DCoordinateAxes(cv::Mat image, const std::vector<cv::Point2f> &list_points2d)
{
cv::Scalar red(0, 0, 255);
cv::Scalar green(0,255,0);
cv::Scalar blue(255,0,0);
cv::Scalar black(0,0,0);
cv::Point2i origin = list_points2d[0];
cv::Point2i pointX = list_points2d[1];
cv::Point2i pointY = list_points2d[2];
cv::Point2i pointZ = list_points2d[3];
drawArrow(image, origin, pointX, red, 9, 2);
drawArrow(image, origin, pointY, green, 9, 2);
drawArrow(image, origin, pointZ, blue, 9, 2);
cv::circle(image, origin, radius/2, black, -1, lineType );
}
// Draw the object mesh
void drawObjectMesh(cv::Mat image, const Mesh *mesh, PnPProblem *pnpProblem, cv::Scalar color)
{
std::vector<std::vector<int> > list_triangles = mesh->getTrianglesList();
for( size_t i = 0; i < list_triangles.size(); i++)
{
std::vector<int> tmp_triangle = list_triangles.at(i);
cv::Point3f point_3d_0 = mesh->getVertex(tmp_triangle[0]);
cv::Point3f point_3d_1 = mesh->getVertex(tmp_triangle[1]);
cv::Point3f point_3d_2 = mesh->getVertex(tmp_triangle[2]);
cv::Point2f point_2d_0 = pnpProblem->backproject3DPoint(point_3d_0);
cv::Point2f point_2d_1 = pnpProblem->backproject3DPoint(point_3d_1);
cv::Point2f point_2d_2 = pnpProblem->backproject3DPoint(point_3d_2);
cv::line(image, point_2d_0, point_2d_1, color, 1);
cv::line(image, point_2d_1, point_2d_2, color, 1);
cv::line(image, point_2d_2, point_2d_0, color, 1);
}
}
// Computes the norm of the translation error
double get_translation_error(const cv::Mat &t_true, const cv::Mat &t)
{
return cv::norm( t_true - t );
}
// Computes the norm of the rotation error
double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R)
{
cv::Mat error_vec, error_mat;
error_mat = -R_true * R.t();
cv::Rodrigues(error_mat, error_vec);
return cv::norm(error_vec);
}
// Converts a given Rotation Matrix to Euler angles
// Convention used is Y-Z-X Tait-Bryan angles
// Reference code implementation:
// https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToEuler/index.htm
cv::Mat rot2euler(const cv::Mat & rotationMatrix)
{
cv::Mat euler(3,1,CV_64F);
double m00 = rotationMatrix.at<double>(0,0);
double m02 = rotationMatrix.at<double>(0,2);
double m10 = rotationMatrix.at<double>(1,0);
double m11 = rotationMatrix.at<double>(1,1);
double m12 = rotationMatrix.at<double>(1,2);
double m20 = rotationMatrix.at<double>(2,0);
double m22 = rotationMatrix.at<double>(2,2);
double bank, attitude, heading;
// Assuming the angles are in radians.
if (m10 > 0.998) { // singularity at north pole
bank = 0;
attitude = CV_PI/2;
heading = atan2(m02,m22);
}
else if (m10 < -0.998) { // singularity at south pole
bank = 0;
attitude = -CV_PI/2;
heading = atan2(m02,m22);
}
else
{
bank = atan2(-m12,m11);
attitude = asin(m10);
heading = atan2(-m20,m00);
}
euler.at<double>(0) = bank;
euler.at<double>(1) = attitude;
euler.at<double>(2) = heading;
return euler;
}
// Converts a given Euler angles to Rotation Matrix
// Convention used is Y-Z-X Tait-Bryan angles
// Reference:
// https://www.euclideanspace.com/maths/geometry/rotations/conversions/eulerToMatrix/index.htm
cv::Mat euler2rot(const cv::Mat & euler)
{
cv::Mat rotationMatrix(3,3,CV_64F);
double bank = euler.at<double>(0);
double attitude = euler.at<double>(1);
double heading = euler.at<double>(2);
// Assuming the angles are in radians.
double ch = cos(heading);
double sh = sin(heading);
double ca = cos(attitude);
double sa = sin(attitude);
double cb = cos(bank);
double sb = sin(bank);
double m00, m01, m02, m10, m11, m12, m20, m21, m22;
m00 = ch * ca;
m01 = sh*sb - ch*sa*cb;
m02 = ch*sa*sb + sh*cb;
m10 = sa;
m11 = ca*cb;
m12 = -ca*sb;
m20 = -sh*ca;
m21 = sh*sa*cb + ch*sb;
m22 = -sh*sa*sb + ch*cb;
rotationMatrix.at<double>(0,0) = m00;
rotationMatrix.at<double>(0,1) = m01;
rotationMatrix.at<double>(0,2) = m02;
rotationMatrix.at<double>(1,0) = m10;
rotationMatrix.at<double>(1,1) = m11;
rotationMatrix.at<double>(1,2) = m12;
rotationMatrix.at<double>(2,0) = m20;
rotationMatrix.at<double>(2,1) = m21;
rotationMatrix.at<double>(2,2) = m22;
return rotationMatrix;
}
// Converts a given string to an integer
int StringToInt ( const std::string &Text )
{
std::istringstream ss(Text);
int result;
return ss >> result ? result : 0;
}
// Converts a given float to a string
std::string FloatToString ( float Number )
{
std::ostringstream ss;
ss << Number;
return ss.str();
}
// Converts a given integer to a string
std::string IntToString ( int Number )
{
std::ostringstream ss;
ss << Number;
return ss.str();
}
void createFeatures(const std::string &featureName, int numKeypoints, cv::Ptr<cv::Feature2D> &detector, cv::Ptr<cv::Feature2D> &descriptor)
{
if (featureName == "ORB")
{
detector = cv::ORB::create(numKeypoints);
descriptor = cv::ORB::create(numKeypoints);
}
else if (featureName == "KAZE")
{
detector = cv::KAZE::create();
descriptor = cv::KAZE::create();
}
else if (featureName == "AKAZE")
{
detector = cv::AKAZE::create();
descriptor = cv::AKAZE::create();
}
else if (featureName == "BRISK")
{
detector = cv::BRISK::create();
descriptor = cv::BRISK::create();
}
else if (featureName == "SIFT")
{
detector = cv::SIFT::create();
descriptor = cv::SIFT::create();
}
else if (featureName == "SURF")
{
#if defined (OPENCV_ENABLE_NONFREE) && defined (HAVE_OPENCV_XFEATURES2D)
detector = cv::xfeatures2d::SURF::create(100, 4, 3, true); //extended=true
descriptor = cv::xfeatures2d::SURF::create(100, 4, 3, true); //extended=true
#else
std::cout << "xfeatures2d module is not available or nonfree is not enabled." << std::endl;
std::cout << "Default to ORB." << std::endl;
detector = cv::ORB::create(numKeypoints);
descriptor = cv::ORB::create(numKeypoints);
#endif
}
else if (featureName == "BINBOOST")
{
#if defined (HAVE_OPENCV_XFEATURES2D)
detector = cv::KAZE::create();
descriptor = cv::xfeatures2d::BoostDesc::create();
#else
std::cout << "xfeatures2d module is not available." << std::endl;
std::cout << "Default to ORB." << std::endl;
detector = cv::ORB::create(numKeypoints);
descriptor = cv::ORB::create(numKeypoints);
#endif
}
else if (featureName == "VGG")
{
#if defined (HAVE_OPENCV_XFEATURES2D)
detector = cv::KAZE::create();
descriptor = cv::xfeatures2d::VGG::create();
#else
std::cout << "xfeatures2d module is not available." << std::endl;
std::cout << "Default to ORB." << std::endl;
detector = cv::ORB::create(numKeypoints);
descriptor = cv::ORB::create(numKeypoints);
#endif
}
}
cv::Ptr<cv::DescriptorMatcher> createMatcher(const std::string &featureName, bool useFLANN)
{
if (featureName == "ORB" || featureName == "BRISK" || featureName == "AKAZE" || featureName == "BINBOOST")
{
if (useFLANN)
{
cv::Ptr<cv::flann::IndexParams> indexParams = cv::makePtr<cv::flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
cv::Ptr<cv::flann::SearchParams> searchParams = cv::makePtr<cv::flann::SearchParams>(50); // instantiate flann search parameters
return cv::makePtr<cv::FlannBasedMatcher>(indexParams, searchParams);
}
else
{
return cv::DescriptorMatcher::create("BruteForce-Hamming");
}
}
else
{
if (useFLANN)
{
return cv::DescriptorMatcher::create("FlannBased");
}
else
{
return cv::DescriptorMatcher::create("BruteForce");
}
}
}

View File

@ -0,0 +1,74 @@
/*
* Utils.h
*
* Created on: Mar 28, 2014
* Author: Edgar Riba
*/
#ifndef UTILS_H_
#define UTILS_H_
#include <iostream>
#include <opencv2/features2d.hpp>
#include "PnPProblem.h"
// Draw a text with the question point
void drawQuestion(cv::Mat image, cv::Point3f point, cv::Scalar color);
// Draw a text with the number of entered points
void drawText(cv::Mat image, std::string text, cv::Scalar color);
// Draw a text with the number of entered points
void drawText2(cv::Mat image, std::string text, cv::Scalar color);
// Draw a text with the frame ratio
void drawFPS(cv::Mat image, double fps, cv::Scalar color);
// Draw a text with the frame ratio
void drawConfidence(cv::Mat image, double confidence, cv::Scalar color);
// Draw a text with the number of entered points
void drawCounter(cv::Mat image, int n, int n_max, cv::Scalar color);
// Draw the points and the coordinates
void drawPoints(cv::Mat image, std::vector<cv::Point2f> &list_points_2d, std::vector<cv::Point3f> &list_points_3d, cv::Scalar color);
// Draw only the 2D points
void draw2DPoints(cv::Mat image, std::vector<cv::Point2f> &list_points, cv::Scalar color);
// Draw an arrow into the image
void drawArrow(cv::Mat image, cv::Point2i p, cv::Point2i q, cv::Scalar color, int arrowMagnitude = 9, int thickness=1, int line_type=8, int shift=0);
// Draw the 3D coordinate axes
void draw3DCoordinateAxes(cv::Mat image, const std::vector<cv::Point2f> &list_points2d);
// Draw the object mesh
void drawObjectMesh(cv::Mat image, const Mesh *mesh, PnPProblem *pnpProblem, cv::Scalar color);
// Computes the norm of the translation error
double get_translation_error(const cv::Mat &t_true, const cv::Mat &t);
// Computes the norm of the rotation error
double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R);
// Converts a given Rotation Matrix to Euler angles
cv::Mat rot2euler(const cv::Mat & rotationMatrix);
// Converts a given Euler angles to Rotation Matrix
cv::Mat euler2rot(const cv::Mat & euler);
// Converts a given string to an integer
int StringToInt ( const std::string &Text );
// Converts a given float to a string
std::string FloatToString ( float Number );
// Converts a given integer to a string
std::string IntToString ( int Number );
void createFeatures(const std::string &featureName, int numKeypoints, cv::Ptr<cv::Feature2D> &detector, cv::Ptr<cv::Feature2D> &descriptor);
cv::Ptr<cv::DescriptorMatcher> createMatcher(const std::string &featureName, bool useFLANN);
#endif /* UTILS_H_ */

View File

@ -0,0 +1,501 @@
// C++
#include <iostream>
// OpenCV
#include <opencv2/core.hpp>
#include <opencv2/core/utils/filesystem.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/video/tracking.hpp>
// PnP Tutorial
#include "Mesh.h"
#include "Model.h"
#include "PnPProblem.h"
#include "RobustMatcher.h"
#include "ModelRegistration.h"
#include "Utils.h"
/** GLOBAL VARIABLES **/
using namespace cv;
using namespace std;
/** Functions headers **/
void help();
void initKalmanFilter( KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
void predictKalmanFilter( KalmanFilter &KF, Mat &translation_predicted, Mat &rotation_predicted );
void updateKalmanFilter( KalmanFilter &KF, Mat &measurements,
Mat &translation_estimated, Mat &rotation_estimated );
void fillMeasurements( Mat &measurements,
const Mat &translation_measured, const Mat &rotation_measured);
/** Main program **/
int main(int argc, char *argv[])
{
help();
const String keys =
"{help h | | print this message }"
"{video v | | path to recorded video }"
"{model | | path to yml model }"
"{mesh | | path to ply mesh }"
"{keypoints k |2000 | number of keypoints to detect }"
"{ratio r |0.7 | threshold for ratio test }"
"{iterations it |500 | RANSAC maximum iterations count }"
"{error e |6.0 | RANSAC reprojection error }"
"{confidence c |0.99 | RANSAC confidence }"
"{inliers in |30 | minimum inliers for Kalman update }"
"{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS - (5) AP3P}"
"{fast f |true | use of robust fast match }"
"{feature |ORB | feature name (ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
"{FLANN |false | use FLANN library for descriptors matching }"
"{save | | path to the directory where to save the image results }"
"{displayFiltered |false | display filtered pose (from Kalman filter) }"
;
CommandLineParser parser(argc, argv, keys);
string video_read_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.mp4"); // recorded video
string yml_read_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/cookies_ORB.yml"); // 3dpts + descriptors
string ply_read_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.ply"); // mesh
// Intrinsic camera parameters: UVC WEBCAM
double f = 55; // focal length in mm
double sx = 22.3, sy = 14.9; // sensor size
double width = 640, height = 480; // image size
double params_WEBCAM[] = { width*f/sx, // fx
height*f/sy, // fy
width/2, // cx
height/2}; // cy
// Some basic colors
Scalar red(0, 0, 255);
Scalar green(0,255,0);
Scalar blue(255,0,0);
Scalar yellow(0,255,255);
// Robust Matcher parameters
int numKeyPoints = 2000; // number of detected keypoints
float ratioTest = 0.70f; // ratio test
bool fast_match = true; // fastRobustMatch() or robustMatch()
// RANSAC parameters
int iterationsCount = 500; // number of Ransac iterations.
float reprojectionError = 6.0; // maximum allowed distance to consider it an inlier.
double confidence = 0.99; // ransac successful confidence.
// Kalman Filter parameters
int minInliersKalman = 30; // Kalman threshold updating
// PnP parameters
int pnpMethod = SOLVEPNP_ITERATIVE;
string featureName = "ORB";
bool useFLANN = false;
// Save results
string saveDirectory = "";
Mat frameSave;
int frameCount = 0;
bool displayFilteredPose = false;
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
else
{
video_read_path = parser.get<string>("video").size() > 0 ? parser.get<string>("video") : video_read_path;
yml_read_path = parser.get<string>("model").size() > 0 ? parser.get<string>("model") : yml_read_path;
ply_read_path = parser.get<string>("mesh").size() > 0 ? parser.get<string>("mesh") : ply_read_path;
numKeyPoints = parser.has("keypoints") ? parser.get<int>("keypoints") : numKeyPoints;
ratioTest = parser.has("ratio") ? parser.get<float>("ratio") : ratioTest;
fast_match = parser.has("fast") ? parser.get<bool>("fast") : fast_match;
iterationsCount = parser.has("iterations") ? parser.get<int>("iterations") : iterationsCount;
reprojectionError = parser.has("error") ? parser.get<float>("error") : reprojectionError;
confidence = parser.has("confidence") ? parser.get<float>("confidence") : confidence;
minInliersKalman = parser.has("inliers") ? parser.get<int>("inliers") : minInliersKalman;
pnpMethod = parser.has("method") ? parser.get<int>("method") : pnpMethod;
featureName = parser.has("feature") ? parser.get<string>("feature") : featureName;
useFLANN = parser.has("FLANN") ? parser.get<bool>("FLANN") : useFLANN;
saveDirectory = parser.has("save") ? parser.get<string>("save") : saveDirectory;
displayFilteredPose = parser.has("displayFiltered") ? parser.get<bool>("displayFiltered") : displayFilteredPose;
}
std::cout << "Video: " << video_read_path << std::endl;
std::cout << "Training data: " << yml_read_path << std::endl;
std::cout << "CAD model: " << ply_read_path << std::endl;
std::cout << "Ratio test threshold: " << ratioTest << std::endl;
std::cout << "Fast match(no symmetry test)?: " << fast_match << std::endl;
std::cout << "RANSAC number of iterations: " << iterationsCount << std::endl;
std::cout << "RANSAC reprojection error: " << reprojectionError << std::endl;
std::cout << "RANSAC confidence threshold: " << confidence << std::endl;
std::cout << "Kalman number of inliers: " << minInliersKalman << std::endl;
std::cout << "PnP method: " << pnpMethod << std::endl;
std::cout << "Feature: " << featureName << std::endl;
std::cout << "Number of keypoints for ORB: " << numKeyPoints << std::endl;
std::cout << "Use FLANN-based matching? " << useFLANN << std::endl;
std::cout << "Save directory: " << saveDirectory << std::endl;
std::cout << "Display filtered pose from Kalman filter? " << displayFilteredPose << std::endl;
PnPProblem pnp_detection(params_WEBCAM);
PnPProblem pnp_detection_est(params_WEBCAM);
Model model; // instantiate Model object
model.load(yml_read_path); // load a 3D textured object model
Mesh mesh; // instantiate Mesh object
mesh.load(ply_read_path); // load an object mesh
RobustMatcher rmatcher; // instantiate RobustMatcher
Ptr<FeatureDetector> detector, descriptor;
createFeatures(featureName, numKeyPoints, detector, descriptor);
rmatcher.setFeatureDetector(detector); // set feature detector
rmatcher.setDescriptorExtractor(descriptor); // set descriptor extractor
rmatcher.setDescriptorMatcher(createMatcher(featureName, useFLANN)); // set matcher
rmatcher.setRatio(ratioTest); // set ratio test parameter
if (!model.get_trainingImagePath().empty())
{
Mat trainingImg = imread(model.get_trainingImagePath());
rmatcher.setTrainingImage(trainingImg);
}
KalmanFilter KF; // instantiate Kalman Filter
int nStates = 18; // the number of states
int nMeasurements = 6; // the number of measured states
int nInputs = 0; // the number of control actions
double dt = 0.125; // time between measurements (1/FPS)
initKalmanFilter(KF, nStates, nMeasurements, nInputs, dt); // init function
Mat measurements(nMeasurements, 1, CV_64FC1); measurements.setTo(Scalar(0));
bool good_measurement = false;
// Get the MODEL INFO
vector<Point3f> list_points3d_model = model.get_points3d(); // list with model 3D coordinates
Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate
vector<KeyPoint> keypoints_model = model.get_keypoints();
// Create & Open Window
namedWindow("REAL TIME DEMO", WINDOW_KEEPRATIO);
VideoCapture cap; // instantiate VideoCapture
cap.open(video_read_path); // open a recorded video
if(!cap.isOpened()) // check if we succeeded
{
cout << "Could not open the camera device" << endl;
return -1;
}
if (!saveDirectory.empty())
{
if (!cv::utils::fs::exists(saveDirectory))
{
std::cout << "Create directory: " << saveDirectory << std::endl;
cv::utils::fs::createDirectories(saveDirectory);
}
}
// Measure elapsed time
TickMeter tm;
Mat frame, frame_vis, frame_matching;
while(cap.read(frame) && (char)waitKey(30) != 27) // capture frame until ESC is pressed
{
tm.reset();
tm.start();
frame_vis = frame.clone(); // refresh visualisation frame
// -- Step 1: Robust matching between model descriptors and scene descriptors
vector<DMatch> good_matches; // to obtain the 3D points of the model
vector<KeyPoint> keypoints_scene; // to obtain the 2D points of the scene
if(fast_match)
{
rmatcher.fastRobustMatch(frame, good_matches, keypoints_scene, descriptors_model, keypoints_model);
}
else
{
rmatcher.robustMatch(frame, good_matches, keypoints_scene, descriptors_model, keypoints_model);
}
frame_matching = rmatcher.getImageMatching();
if (!frame_matching.empty())
{
imshow("Keypoints matching", frame_matching);
}
// -- Step 2: Find out the 2D/3D correspondences
vector<Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
vector<Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index)
{
Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model
Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
list_points3d_model_match.push_back(point3d_model); // add 3D point
list_points2d_scene_match.push_back(point2d_scene); // add 2D point
}
// Draw outliers
draw2DPoints(frame_vis, list_points2d_scene_match, red);
Mat inliers_idx;
vector<Point2f> list_points2d_inliers;
// Instantiate estimated translation and rotation
good_measurement = false;
if(good_matches.size() >= 4) // OpenCV requires solvePnPRANSAC to minimally have 4 set of points
{
// -- Step 3: Estimate the pose using RANSAC approach
pnp_detection.estimatePoseRANSAC( list_points3d_model_match, list_points2d_scene_match,
pnpMethod, inliers_idx,
iterationsCount, reprojectionError, confidence );
// -- Step 4: Catch the inliers keypoints to draw
for(int inliers_index = 0; inliers_index < inliers_idx.rows; ++inliers_index)
{
int n = inliers_idx.at<int>(inliers_index); // i-inlier
Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
list_points2d_inliers.push_back(point2d); // add i-inlier to list
}
// Draw inliers points 2D
draw2DPoints(frame_vis, list_points2d_inliers, blue);
// -- Step 5: Kalman Filter
// GOOD MEASUREMENT
if( inliers_idx.rows >= minInliersKalman )
{
// Get the measured translation
Mat translation_measured = pnp_detection.get_t_matrix();
// Get the measured rotation
Mat rotation_measured = pnp_detection.get_R_matrix();
// fill the measurements vector
fillMeasurements(measurements, translation_measured, rotation_measured);
good_measurement = true;
}
// update the Kalman filter with good measurements, otherwise with previous valid measurements
Mat translation_estimated(3, 1, CV_64FC1);
Mat rotation_estimated(3, 3, CV_64FC1);
updateKalmanFilter( KF, measurements,
translation_estimated, rotation_estimated);
// -- Step 6: Set estimated projection matrix
pnp_detection_est.set_P_matrix(rotation_estimated, translation_estimated);
}
// -- Step X: Draw pose and coordinate frame
float l = 5;
vector<Point2f> pose_points2d;
if (!good_measurement || displayFilteredPose)
{
drawObjectMesh(frame_vis, &mesh, &pnp_detection_est, yellow); // draw estimated pose
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,0))); // axis center
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(l,0,0))); // axis x
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,l,0))); // axis y
pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,l))); // axis z
draw3DCoordinateAxes(frame_vis, pose_points2d); // draw axes
}
else
{
drawObjectMesh(frame_vis, &mesh, &pnp_detection, green); // draw current pose
pose_points2d.push_back(pnp_detection.backproject3DPoint(Point3f(0,0,0))); // axis center
pose_points2d.push_back(pnp_detection.backproject3DPoint(Point3f(l,0,0))); // axis x
pose_points2d.push_back(pnp_detection.backproject3DPoint(Point3f(0,l,0))); // axis y
pose_points2d.push_back(pnp_detection.backproject3DPoint(Point3f(0,0,l))); // axis z
draw3DCoordinateAxes(frame_vis, pose_points2d); // draw axes
}
// FRAME RATE
// see how much time has elapsed
tm.stop();
// calculate current FPS
double fps = 1.0 / tm.getTimeSec();
drawFPS(frame_vis, fps, yellow); // frame ratio
double detection_ratio = ((double)inliers_idx.rows/(double)good_matches.size())*100;
drawConfidence(frame_vis, detection_ratio, yellow);
// -- Step X: Draw some debugging text
// Draw some debug text
int inliers_int = inliers_idx.rows;
int outliers_int = (int)good_matches.size() - inliers_int;
string inliers_str = IntToString(inliers_int);
string outliers_str = IntToString(outliers_int);
string n = IntToString((int)good_matches.size());
string text = "Found " + inliers_str + " of " + n + " matches";
string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
drawText(frame_vis, text, green);
drawText2(frame_vis, text2, red);
imshow("REAL TIME DEMO", frame_vis);
if (!saveDirectory.empty())
{
const int widthSave = !frame_matching.empty() ? frame_matching.cols : frame_vis.cols;
const int heightSave = !frame_matching.empty() ? frame_matching.rows + frame_vis.rows : frame_vis.rows;
frameSave = Mat::zeros(heightSave, widthSave, CV_8UC3);
if (!frame_matching.empty())
{
int startX = (int)((widthSave - frame_vis.cols) / 2.0);
Mat roi = frameSave(Rect(startX, 0, frame_vis.cols, frame_vis.rows));
frame_vis.copyTo(roi);
roi = frameSave(Rect(0, frame_vis.rows, frame_matching.cols, frame_matching.rows));
frame_matching.copyTo(roi);
}
else
{
frame_vis.copyTo(frameSave);
}
string saveFilename = cv::format(string(saveDirectory + "/image_%04d.png").c_str(), frameCount);
imwrite(saveFilename, frameSave);
frameCount++;
}
}
// Close and Destroy Window
destroyWindow("REAL TIME DEMO");
cout << "GOODBYE ..." << endl;
}
/**********************************************************************************************************/
void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to detect an object given its 3D textured model. You can choose to "
<< "use a recorded video or the webcam." << endl
<< "Usage:" << endl
<< "./cpp-tutorial-pnp_detection -help" << endl
<< "Keys:" << endl
<< "'esc' - to quit." << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
/**********************************************************************************************************/
void initKalmanFilter(KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
{
KF.init(nStates, nMeasurements, nInputs, CV_64F); // init Kalman Filter
setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); // set process noise
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-2)); // set measurement noise
setIdentity(KF.errorCovPost, Scalar::all(1)); // error covariance
/** DYNAMIC MODEL **/
// [1 0 0 dt 0 0 dt2 0 0 0 0 0 0 0 0 0 0 0]
// [0 1 0 0 dt 0 0 dt2 0 0 0 0 0 0 0 0 0 0]
// [0 0 1 0 0 dt 0 0 dt2 0 0 0 0 0 0 0 0 0]
// [0 0 0 1 0 0 dt 0 0 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 1 0 0 dt 0 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 1 0 0 dt 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 0 0 1 0 0 dt 0 0 dt2 0 0]
// [0 0 0 0 0 0 0 0 0 0 1 0 0 dt 0 0 dt2 0]
// [0 0 0 0 0 0 0 0 0 0 0 1 0 0 dt 0 0 dt2]
// [0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 dt 0 0]
// [0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 dt 0]
// [0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 dt]
// [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0]
// [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0]
// [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
// position
KF.transitionMatrix.at<double>(0,3) = dt;
KF.transitionMatrix.at<double>(1,4) = dt;
KF.transitionMatrix.at<double>(2,5) = dt;
KF.transitionMatrix.at<double>(3,6) = dt;
KF.transitionMatrix.at<double>(4,7) = dt;
KF.transitionMatrix.at<double>(5,8) = dt;
KF.transitionMatrix.at<double>(0,6) = 0.5*pow(dt,2);
KF.transitionMatrix.at<double>(1,7) = 0.5*pow(dt,2);
KF.transitionMatrix.at<double>(2,8) = 0.5*pow(dt,2);
// orientation
KF.transitionMatrix.at<double>(9,12) = dt;
KF.transitionMatrix.at<double>(10,13) = dt;
KF.transitionMatrix.at<double>(11,14) = dt;
KF.transitionMatrix.at<double>(12,15) = dt;
KF.transitionMatrix.at<double>(13,16) = dt;
KF.transitionMatrix.at<double>(14,17) = dt;
KF.transitionMatrix.at<double>(9,15) = 0.5*pow(dt,2);
KF.transitionMatrix.at<double>(10,16) = 0.5*pow(dt,2);
KF.transitionMatrix.at<double>(11,17) = 0.5*pow(dt,2);
/** MEASUREMENT MODEL **/
// [1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
// [0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
// [0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]
// [0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
KF.measurementMatrix.at<double>(0,0) = 1; // x
KF.measurementMatrix.at<double>(1,1) = 1; // y
KF.measurementMatrix.at<double>(2,2) = 1; // z
KF.measurementMatrix.at<double>(3,9) = 1; // roll
KF.measurementMatrix.at<double>(4,10) = 1; // pitch
KF.measurementMatrix.at<double>(5,11) = 1; // yaw
}
/**********************************************************************************************************/
void updateKalmanFilter( KalmanFilter &KF, Mat &measurement,
Mat &translation_estimated, Mat &rotation_estimated )
{
// First predict, to update the internal statePre variable
Mat prediction = KF.predict();
// The "correct" phase that is going to use the predicted value and our measurement
Mat estimated = KF.correct(measurement);
// Estimated translation
translation_estimated.at<double>(0) = estimated.at<double>(0);
translation_estimated.at<double>(1) = estimated.at<double>(1);
translation_estimated.at<double>(2) = estimated.at<double>(2);
// Estimated euler angles
Mat eulers_estimated(3, 1, CV_64F);
eulers_estimated.at<double>(0) = estimated.at<double>(9);
eulers_estimated.at<double>(1) = estimated.at<double>(10);
eulers_estimated.at<double>(2) = estimated.at<double>(11);
// Convert estimated quaternion to rotation matrix
rotation_estimated = euler2rot(eulers_estimated);
}
/**********************************************************************************************************/
void fillMeasurements( Mat &measurements,
const Mat &translation_measured, const Mat &rotation_measured)
{
// Convert rotation matrix to euler angles
Mat measured_eulers(3, 1, CV_64F);
measured_eulers = rot2euler(rotation_measured);
// Set measurement to predict
measurements.at<double>(0) = translation_measured.at<double>(0); // x
measurements.at<double>(1) = translation_measured.at<double>(1); // y
measurements.at<double>(2) = translation_measured.at<double>(2); // z
measurements.at<double>(3) = measured_eulers.at<double>(0); // roll
measurements.at<double>(4) = measured_eulers.at<double>(1); // pitch
measurements.at<double>(5) = measured_eulers.at<double>(2); // yaw
}

View File

@ -0,0 +1,293 @@
// C++
#include <iostream>
// OpenCV
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/features2d.hpp>
// PnP Tutorial
#include "Mesh.h"
#include "Model.h"
#include "PnPProblem.h"
#include "RobustMatcher.h"
#include "ModelRegistration.h"
#include "Utils.h"
using namespace cv;
using namespace std;
/** GLOBAL VARIABLES **/
// Boolean the know if the registration it's done
bool end_registration = false;
// Intrinsic camera parameters: UVC WEBCAM
const double f = 45; // focal length in mm
const double sx = 22.3, sy = 14.9;
const double width = 2592, height = 1944;
const double params_CANON[] = { width*f/sx, // fx
height*f/sy, // fy
width/2, // cx
height/2}; // cy
// Setup the points to register in the image
// In the order of the *.ply file and starting at 1
const int n = 8;
const int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4
/*
* CREATE MODEL REGISTRATION OBJECT
* CREATE OBJECT MESH
* CREATE OBJECT MODEL
* CREATE PNP OBJECT
*/
ModelRegistration registration;
Model model;
Mesh mesh;
PnPProblem pnp_registration(params_CANON);
/**********************************************************************************************************/
static void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to create your 3D textured model. " << endl
<< "Usage:" << endl
<< "./cpp-tutorial-pnp_registration" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
// Mouse events for model registration
static void onMouseModelRegistration( int event, int x, int y, int, void* )
{
if ( event == EVENT_LBUTTONUP )
{
bool is_registrable = registration.is_registrable();
if (is_registrable)
{
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
Point2f point_2d = Point2f((float)x,(float)y);
Point3f point_3d = mesh.getVertex(n_vertex-1);
registration.registerPoint(point_2d, point_3d);
if( registration.getNumRegist() == registration.getNumMax() ) end_registration = true;
}
}
}
/** Main program **/
int main(int argc, char *argv[])
{
help();
const String keys =
"{help h | | print this message }"
"{image i | | path to input image }"
"{model | | path to output yml model }"
"{mesh | | path to ply mesh }"
"{keypoints k |2000 | number of keypoints to detect (only for ORB) }"
"{feature |ORB | feature name (ORB, KAZE, AKAZE, BRISK, SIFT, SURF, BINBOOST, VGG) }"
;
CommandLineParser parser(argc, argv, keys);
string img_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/resized_IMG_3875.JPG"); // image to register
string ply_read_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.ply"); // object mesh
string write_path = samples::findFile("samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/cookies_ORB.yml"); // output file
int numKeyPoints = 2000;
string featureName = "ORB";
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
else
{
img_path = parser.get<string>("image").size() > 0 ? parser.get<string>("image") : img_path;
ply_read_path = parser.get<string>("mesh").size() > 0 ? parser.get<string>("mesh") : ply_read_path;
write_path = parser.get<string>("model").size() > 0 ? parser.get<string>("model") : write_path;
numKeyPoints = parser.has("keypoints") ? parser.get<int>("keypoints") : numKeyPoints;
featureName = parser.has("feature") ? parser.get<string>("feature") : featureName;
}
std::cout << "Input image: " << img_path << std::endl;
std::cout << "CAD model: " << ply_read_path << std::endl;
std::cout << "Output training file: " << write_path << std::endl;
std::cout << "Feature: " << featureName << std::endl;
std::cout << "Number of keypoints for ORB: " << numKeyPoints << std::endl;
// load a mesh given the *.ply file path
mesh.load(ply_read_path);
//Instantiate robust matcher: detector, extractor, matcher
RobustMatcher rmatcher;
Ptr<Feature2D> detector, descriptor;
createFeatures(featureName, numKeyPoints, detector, descriptor);
rmatcher.setFeatureDetector(detector);
rmatcher.setDescriptorExtractor(descriptor);
/** GROUND TRUTH OF THE FIRST IMAGE **/
// Create & Open Window
namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO);
// Set up the mouse events
setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0);
// Open the image to register
Mat img_in = imread(img_path, IMREAD_COLOR);
Mat img_vis;
if (img_in.empty()) {
cout << "Could not open or find the image" << endl;
return -1;
}
// Set the number of points to register
int num_registrations = n;
registration.setNumMax(num_registrations);
cout << "Click the box corners ..." << endl;
cout << "Waiting ..." << endl;
// Some basic colors
const Scalar red(0, 0, 255);
const Scalar green(0,255,0);
const Scalar blue(255,0,0);
const Scalar yellow(0,255,255);
// Loop until all the points are registered
while ( waitKey(30) < 0 )
{
// Refresh debug image
img_vis = img_in.clone();
// Current registered points
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Draw current registered points
drawPoints(img_vis, list_points2d, list_points3d, red);
// If the registration is not finished, draw which 3D point we have to register.
// If the registration is finished, breaks the loop.
if (!end_registration)
{
// Draw debug text
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
Point3f current_poin3d = mesh.getVertex(n_vertex-1);
drawQuestion(img_vis, current_poin3d, green);
drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
}
else
{
// Draw debug text
drawText(img_vis, "END REGISTRATION", green);
drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), green);
break;
}
// Show the image
imshow("MODEL REGISTRATION", img_vis);
}
/** COMPUTE CAMERA POSE **/
cout << "COMPUTING POSE ..." << endl;
// The list of registered points
vector<Point2f> list_points2d = registration.get_points2d();
vector<Point3f> list_points3d = registration.get_points3d();
// Estimate pose given the registered points
bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE);
if ( is_correspondence )
{
cout << "Correspondence found" << endl;
// Compute all the 2D points of the mesh to verify the algorithm and draw it
vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
draw2DPoints(img_vis, list_points2d_mesh, green);
} else {
cout << "Correspondence not found" << endl << endl;
}
// Show the image
imshow("MODEL REGISTRATION", img_vis);
// Show image until ESC pressed
waitKey(0);
/** COMPUTE 3D of the image Keypoints **/
// Containers for keypoints and descriptors of the model
vector<KeyPoint> keypoints_model;
Mat descriptors;
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(img_in, keypoints_model);
rmatcher.computeDescriptors(img_in, keypoints_model, descriptors);
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
Point2f point2d(keypoints_model[i].pt);
Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
{
model.add_correspondence(point2d, point3d);
model.add_descriptor(descriptors.row(i));
model.add_keypoint(keypoints_model[i]);
}
else
{
model.add_outlier(point2d);
}
}
model.set_trainingImagePath(img_path);
// save the model into a *.yaml file
model.save(write_path);
// Out image
img_vis = img_in.clone();
// The list of the points2d of the model
vector<Point2f> list_points_in = model.get_points2d_in();
vector<Point2f> list_points_out = model.get_points2d_out();
// Draw some debug text
string num = IntToString((int)list_points_in.size());
string text = "There are " + num + " inliers";
drawText(img_vis, text, green);
// Draw some debug text
num = IntToString((int)list_points_out.size());
text = "There are " + num + " outliers";
drawText2(img_vis, text, red);
// Draw the object mesh
drawObjectMesh(img_vis, &mesh, &pnp_registration, blue);
// Draw found keypoints depending on if are or not on the surface
draw2DPoints(img_vis, list_points_in, green);
draw2DPoints(img_vis, list_points_out, red);
// Show the image
imshow("MODEL REGISTRATION", img_vis);
// Wait until ESC pressed
waitKey(0);
// Close and Destroy Window
destroyWindow("MODEL REGISTRATION");
cout << "GOODBYE" << endl;
}

View File

@ -0,0 +1,19 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs/legacy/constants_c.h>
#include <opencv2/videoio/legacy/constants_c.h>
#include <opencv2/photo/legacy/constants_c.h>
#include <opencv2/video/legacy/constants_c.h>
using namespace cv;
int main(int /*argc*/, const char** /*argv*/)
{
std::cout
<< (int)CV_LOAD_IMAGE_GRAYSCALE
<< (int)CV_CAP_FFMPEG
<< std::endl;
return 0;
}

View File

@ -0,0 +1,57 @@
/**
* @file AddingImages.cpp
* @brief Simple linear blender ( dst = alpha*src1 + beta*src2 )
* @author OpenCV team
*/
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
using std::cout;
using std::endl;
/**
* @function main
* @brief Main function
*/
int main( void )
{
double alpha = 0.5; double beta; double input;
Mat src1, src2, dst;
/// Ask the user enter alpha
cout << " Simple Linear Blender " << endl;
cout << "-----------------------" << endl;
cout << "* Enter alpha [0.0-1.0]: ";
cin >> input;
// We use the alpha provided by the user if it is between 0 and 1
if( input >= 0 && input <= 1 )
{ alpha = input; }
//![load]
/// Read images ( both have to be of the same size and type )
src1 = imread( samples::findFile("LinuxLogo.jpg") );
src2 = imread( samples::findFile("WindowsLogo.jpg") );
//![load]
if( src1.empty() ) { cout << "Error loading src1" << endl; return EXIT_FAILURE; }
if( src2.empty() ) { cout << "Error loading src2" << endl; return EXIT_FAILURE; }
//![blend_images]
beta = ( 1.0 - alpha );
addWeighted( src1, alpha, src2, beta, 0.0, dst);
//![blend_images]
//![display]
imshow( "Linear Blend", dst );
waitKey(0);
//![display]
return 0;
}

View File

@ -0,0 +1,95 @@
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help(char ** argv)
{
cout << endl
<< "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl
<< "The dft of an image is taken and it's power spectrum is displayed." << endl << endl
<< "Usage:" << endl
<< argv[0] << " [image_name -- default lena.jpg]" << endl << endl;
}
int main(int argc, char ** argv)
{
help(argv);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I = imread( samples::findFile( filename ), IMREAD_GRAYSCALE);
if( I.empty()){
cout << "Error opening image" << endl;
return EXIT_FAILURE;
}
//! [expand]
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
//! [expand]
//! [complex_and_real]
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
//! [complex_and_real]
//! [dft]
dft(complexI, complexI); // this way the result may fit in the source matrix
//! [dft]
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
//! [magnitude]
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
//! [magnitude]
//! [log]
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
//! [log]
//! [crop_rearrange]
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
//! [crop_rearrange]
//! [normalize]
normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
//! [normalize]
imshow("Input Image" , I ); // Show the result
imshow("spectrum magnitude", magI);
waitKey();
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,191 @@
#include <opencv2/core.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
static void help(char** av)
{
cout << endl
<< av[0] << " shows the usage of the OpenCV serialization functionality." << endl
<< "usage: " << endl
<< av[0] << " outputfile.yml.gz" << endl
<< "The output file may be either XML (xml) or YAML (yml/yaml). You can even compress it by "
<< "specifying this in its extension like xml.gz yaml.gz etc... " << endl
<< "With FileStorage you can serialize objects in OpenCV by using the << and >> operators" << endl
<< "For example: - create a class and have it serialized" << endl
<< " - use it to read and write matrices." << endl;
}
class MyData
{
public:
MyData() : A(0), X(0), id()
{}
explicit MyData(int) : A(97), X(CV_PI), id("mydata1234") // explicit to avoid implicit conversion
{}
//! [inside]
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{" << "A" << A << "X" << X << "id" << id << "}";
}
void read(const FileNode& node) //Read serialization for this class
{
A = (int)node["A"];
X = (double)node["X"];
id = (string)node["id"];
}
//! [inside]
public: // Data Members
int A;
double X;
string id;
};
//These write and read functions must be defined for the serialization in FileStorage to work
//! [outside]
static void write(FileStorage& fs, const std::string&, const MyData& x)
{
x.write(fs);
}
static void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
if(node.empty())
x = default_value;
else
x.read(node);
}
//! [outside]
// This function will print our custom class to the console
static ostream& operator<<(ostream& out, const MyData& m)
{
out << "{ id = " << m.id << ", ";
out << "X = " << m.X << ", ";
out << "A = " << m.A << "}";
return out;
}
int main(int ac, char** av)
{
if (ac != 2)
{
help(av);
return 1;
}
string filename = av[1];
{ //write
//! [iomati]
Mat R = Mat_<uchar>::eye(3, 3),
T = Mat_<double>::zeros(3, 1);
//! [iomati]
//! [customIOi]
MyData m(1);
//! [customIOi]
//! [open]
FileStorage fs(filename, FileStorage::WRITE);
// or:
// FileStorage fs;
// fs.open(filename, FileStorage::WRITE);
//! [open]
//! [writeNum]
fs << "iterationNr" << 100;
//! [writeNum]
//! [writeStr]
fs << "strings" << "["; // text - string sequence
fs << "image1.jpg" << "Awesomeness" << "../data/baboon.jpg";
fs << "]"; // close sequence
//! [writeStr]
//! [writeMap]
fs << "Mapping"; // text - mapping
fs << "{" << "One" << 1;
fs << "Two" << 2 << "}";
//! [writeMap]
//! [iomatw]
fs << "R" << R; // cv::Mat
fs << "T" << T;
//! [iomatw]
//! [customIOw]
fs << "MyData" << m; // your own data structures
//! [customIOw]
//! [close]
fs.release(); // explicit close
//! [close]
cout << "Write Done." << endl;
}
{//read
cout << endl << "Reading: " << endl;
FileStorage fs;
fs.open(filename, FileStorage::READ);
//! [readNum]
int itNr;
//fs["iterationNr"] >> itNr;
itNr = (int) fs["iterationNr"];
//! [readNum]
cout << itNr;
if (!fs.isOpened())
{
cerr << "Failed to open " << filename << endl;
help(av);
return 1;
}
//! [readStr]
FileNode n = fs["strings"]; // Read string sequence - Get node
if (n.type() != FileNode::SEQ)
{
cerr << "strings is not a sequence! FAIL" << endl;
return 1;
}
FileNodeIterator it = n.begin(), it_end = n.end(); // Go through the node
for (; it != it_end; ++it)
cout << (string)*it << endl;
//! [readStr]
//! [readMap]
n = fs["Mapping"]; // Read mappings from a sequence
cout << "Two " << (int)(n["Two"]) << "; ";
cout << "One " << (int)(n["One"]) << endl << endl;
//! [readMap]
MyData m;
Mat R, T;
//! [iomat]
fs["R"] >> R; // Read cv::Mat
fs["T"] >> T;
//! [iomat]
//! [customIO]
fs["MyData"] >> m; // Read your own structure_
//! [customIO]
cout << endl
<< "R = " << R << endl;
cout << "T = " << T << endl << endl;
cout << "MyData = " << endl << m << endl << endl;
//Show default behavior for non existing nodes
//! [nonexist]
cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
fs["NonExisting"] >> m;
cout << endl << "NonExisting = " << endl << m << endl;
//! [nonexist]
}
cout << endl
<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
return 0;
}

View File

@ -0,0 +1,230 @@
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <iostream>
#include <sstream>
using namespace std;
using namespace cv;
static void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to scan image objects in OpenCV (cv::Mat). As use case"
<< " we take an input image and divide the native color palette (255) with the " << endl
<< "input. Shows C operator[] method, iterators and at function for on-the-fly item address calculation."<< endl
<< "Usage:" << endl
<< "./how_to_scan_images <imageNameToUse> <divideWith> [G]" << endl
<< "if you add a G parameter the image is processed in gray scale" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* table);
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* table);
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar * table);
int main( int argc, char* argv[])
{
help();
if (argc < 3)
{
cout << "Not enough parameters" << endl;
return -1;
}
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
I = imread(argv[1], IMREAD_GRAYSCALE);
else
I = imread(argv[1], IMREAD_COLOR);
if (I.empty())
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
}
//! [dividewith]
int divideWith = 0; // convert our input string to number - C++ style
stringstream s;
s << argv[2];
s >> divideWith;
if (!s || !divideWith)
{
cout << "Invalid number entered for dividing. " << endl;
return -1;
}
uchar table[256];
for (int i = 0; i < 256; ++i)
table[i] = (uchar)(divideWith * (i/divideWith));
//! [dividewith]
const int times = 100;
double t;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceC(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the C operator [] (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceIterator(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the iterator (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
ScanImageAndReduceRandomAccess(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the on-the-fly address generation - at function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
//! [table-init]
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.ptr();
for( int i = 0; i < 256; ++i)
p[i] = table[i];
//! [table-init]
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
//! [table-use]
LUT(I, lookUpTable, J);
//! [table-use]
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the LUT function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
return 0;
}
//! [scan-c]
Mat& ScanImageAndReduceC(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() == CV_8U);
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
}
return I;
}
//! [scan-c]
//! [scan-iterator]
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() == CV_8U);
const int channels = I.channels();
switch(channels)
{
case 1:
{
MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it)
*it = table[*it];
break;
}
case 3:
{
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
}
}
return I;
}
//! [scan-iterator]
//! [scan-random]
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() == CV_8U);
const int channels = I.channels();
switch(channels)
{
case 1:
{
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
I.at<uchar>(i,j) = table[I.at<uchar>(i,j)];
break;
}
case 3:
{
Mat_<Vec3b> _I = I;
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
{
_I(i,j)[0] = table[_I(i,j)[0]];
_I(i,j)[1] = table[_I(i,j)[1]];
_I(i,j)[2] = table[_I(i,j)[2]];
}
I = _I;
break;
}
}
return I;
}
//! [scan-random]

View File

@ -0,0 +1,147 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace std;
using namespace cv;
namespace
{
//! [mandelbrot-escape-time-algorithm]
int mandelbrot(const complex<float> &z0, const int max)
{
complex<float> z = z0;
for (int t = 0; t < max; t++)
{
if (z.real()*z.real() + z.imag()*z.imag() > 4.0f) return t;
z = z*z + z0;
}
return max;
}
//! [mandelbrot-escape-time-algorithm]
//! [mandelbrot-grayscale-value]
int mandelbrotFormula(const complex<float> &z0, const int maxIter=500) {
int value = mandelbrot(z0, maxIter);
if(maxIter - value == 0)
{
return 0;
}
return cvRound(sqrt(value / (float) maxIter) * 255);
}
//! [mandelbrot-grayscale-value]
//! [mandelbrot-parallel]
class ParallelMandelbrot : public ParallelLoopBody
{
public:
ParallelMandelbrot (Mat &img, const float x1, const float y1, const float scaleX, const float scaleY)
: m_img(img), m_x1(x1), m_y1(y1), m_scaleX(scaleX), m_scaleY(scaleY)
{
}
virtual void operator ()(const Range& range) const CV_OVERRIDE
{
for (int r = range.start; r < range.end; r++)
{
int i = r / m_img.cols;
int j = r % m_img.cols;
float x0 = j / m_scaleX + m_x1;
float y0 = i / m_scaleY + m_y1;
complex<float> z0(x0, y0);
uchar value = (uchar) mandelbrotFormula(z0);
m_img.ptr<uchar>(i)[j] = value;
}
}
ParallelMandelbrot& operator=(const ParallelMandelbrot &) {
return *this;
};
private:
Mat &m_img;
float m_x1;
float m_y1;
float m_scaleX;
float m_scaleY;
};
//! [mandelbrot-parallel]
//! [mandelbrot-sequential]
void sequentialMandelbrot(Mat &img, const float x1, const float y1, const float scaleX, const float scaleY)
{
for (int i = 0; i < img.rows; i++)
{
for (int j = 0; j < img.cols; j++)
{
float x0 = j / scaleX + x1;
float y0 = i / scaleY + y1;
complex<float> z0(x0, y0);
uchar value = (uchar) mandelbrotFormula(z0);
img.ptr<uchar>(i)[j] = value;
}
}
}
//! [mandelbrot-sequential]
}
int main()
{
//! [mandelbrot-transformation]
Mat mandelbrotImg(4800, 5400, CV_8U);
float x1 = -2.1f, x2 = 0.6f;
float y1 = -1.2f, y2 = 1.2f;
float scaleX = mandelbrotImg.cols / (x2 - x1);
float scaleY = mandelbrotImg.rows / (y2 - y1);
//! [mandelbrot-transformation]
double t1 = (double) getTickCount();
#ifdef CV_CXX11
//! [mandelbrot-parallel-call-cxx11]
parallel_for_(Range(0, mandelbrotImg.rows*mandelbrotImg.cols), [&](const Range& range){
for (int r = range.start; r < range.end; r++)
{
int i = r / mandelbrotImg.cols;
int j = r % mandelbrotImg.cols;
float x0 = j / scaleX + x1;
float y0 = i / scaleY + y1;
complex<float> z0(x0, y0);
uchar value = (uchar) mandelbrotFormula(z0);
mandelbrotImg.ptr<uchar>(i)[j] = value;
}
});
//! [mandelbrot-parallel-call-cxx11]
#else
//! [mandelbrot-parallel-call]
ParallelMandelbrot parallelMandelbrot(mandelbrotImg, x1, y1, scaleX, scaleY);
parallel_for_(Range(0, mandelbrotImg.rows*mandelbrotImg.cols), parallelMandelbrot);
//! [mandelbrot-parallel-call]
#endif
t1 = ((double) getTickCount() - t1) / getTickFrequency();
cout << "Parallel Mandelbrot: " << t1 << " s" << endl;
Mat mandelbrotImgSequential(4800, 5400, CV_8U);
double t2 = (double) getTickCount();
sequentialMandelbrot(mandelbrotImgSequential, x1, y1, scaleX, scaleY);
t2 = ((double) getTickCount() - t2) / getTickFrequency();
cout << "Sequential Mandelbrot: " << t2 << " s" << endl;
cout << "Speed-up: " << t2/t1 << " X" << endl;
imwrite("Mandelbrot_parallel.png", mandelbrotImg);
imwrite("Mandelbrot_sequential.png", mandelbrotImgSequential);
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,332 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
namespace
{
//! [convolution-sequential]
void conv_seq(Mat src, Mat &dst, Mat kernel)
{
//![convolution-make-borders]
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, src.type());
// Taking care of edge values
// Make border = kernel.rows / 2;
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
//![convolution-make-borders]
//! [convolution-kernel-loop]
for (int i = 0; i < rows; i++)
{
uchar *dptr = dst.ptr(i);
for (int j = 0; j < cols; j++)
{
double value = 0;
for (int k = -sz; k <= sz; k++)
{
// slightly faster results when we create a ptr due to more efficient memory access.
uchar *sptr = src.ptr(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += kernel.ptr<double>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
dptr[j] = saturate_cast<uchar>(value);
}
}
//! [convolution-kernel-loop]
}
//! [convolution-sequential]
#ifdef CV_CXX11
void conv_parallel(Mat src, Mat &dst, Mat kernel)
{
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, CV_8UC1, Scalar(0));
// Taking care of edge values
// Make border = kernel.rows / 2;
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
//! [convolution-parallel-cxx11]
parallel_for_(Range(0, rows * cols), [&](const Range &range)
{
for (int r = range.start; r < range.end; r++)
{
int i = r / cols, j = r % cols;
double value = 0;
for (int k = -sz; k <= sz; k++)
{
uchar *sptr = src.ptr(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += kernel.ptr<double>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
dst.ptr(i)[j] = saturate_cast<uchar>(value);
}
});
//! [convolution-parallel-cxx11]
}
void conv_parallel_row_split(Mat src, Mat &dst, Mat kernel)
{
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, CV_8UC1, Scalar(0));
// Taking care of edge values
// Make border = kernel.rows / 2;
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
//! [convolution-parallel-cxx11-row-split]
parallel_for_(Range(0, rows), [&](const Range &range)
{
for (int i = range.start; i < range.end; i++)
{
uchar *dptr = dst.ptr(i);
for (int j = 0; j < cols; j++)
{
double value = 0;
for (int k = -sz; k <= sz; k++)
{
uchar *sptr = src.ptr(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += kernel.ptr<double>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
dptr[j] = saturate_cast<uchar>(value);
}
}
});
//! [convolution-parallel-cxx11-row-split]
}
#else
//! [convolution-parallel]
class parallelConvolution : public ParallelLoopBody
{
private:
Mat m_src, &m_dst;
Mat m_kernel;
int sz;
public:
parallelConvolution(Mat src, Mat &dst, Mat kernel)
: m_src(src), m_dst(dst), m_kernel(kernel)
{
sz = kernel.rows / 2;
}
//! [overload-full]
virtual void operator()(const Range &range) const CV_OVERRIDE
{
for (int r = range.start; r < range.end; r++)
{
int i = r / m_src.cols, j = r % m_src.cols;
double value = 0;
for (int k = -sz; k <= sz; k++)
{
uchar *sptr = m_src.ptr(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += m_kernel.ptr<double>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
m_dst.ptr(i)[j] = saturate_cast<uchar>(value);
}
}
//! [overload-full]
};
//! [convolution-parallel]
void conv_parallel(Mat src, Mat &dst, Mat kernel)
{
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, CV_8UC1, Scalar(0));
// Taking care of edge values
// Make border = kernel.rows / 2;
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
//! [convolution-parallel-function]
parallelConvolution obj(src, dst, kernel);
parallel_for_(Range(0, rows * cols), obj);
//! [convolution-parallel-function]
}
//! [conv-parallel-row-split]
class parallelConvolutionRowSplit : public ParallelLoopBody
{
private:
Mat m_src, &m_dst;
Mat m_kernel;
int sz;
public:
parallelConvolutionRowSplit(Mat src, Mat &dst, Mat kernel)
: m_src(src), m_dst(dst), m_kernel(kernel)
{
sz = kernel.rows / 2;
}
//! [overload-row-split]
virtual void operator()(const Range &range) const CV_OVERRIDE
{
for (int i = range.start; i < range.end; i++)
{
uchar *dptr = dst.ptr(i);
for (int j = 0; j < cols; j++)
{
double value = 0;
for (int k = -sz; k <= sz; k++)
{
uchar *sptr = src.ptr(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += kernel.ptr<double>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
dptr[j] = saturate_cast<uchar>(value);
}
}
}
//! [overload-row-split]
};
//! [conv-parallel-row-split]
void conv_parallel_row_split(Mat src, Mat &dst, Mat kernel)
{
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, CV_8UC1, Scalar(0));
// Taking care of edge values
// Make border = kernel.rows / 2;
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
//! [convolution-parallel-function-row]
parallelConvolutionRowSplit obj(src, dst, kernel);
parallel_for_(Range(0, rows), obj);
//! [convolution-parallel-function-row]
}
#endif
static void help(char *progName)
{
cout << endl
<< " This program shows how to use the OpenCV parallel_for_ function and \n"
<< " compares the performance of the sequential and parallel implementations for a \n"
<< " convolution operation\n"
<< " Usage:\n "
<< progName << " [image_path -- default lena.jpg] " << endl
<< endl;
}
}
int main(int argc, char *argv[])
{
help(argv[0]);
const char *filepath = argc >= 2 ? argv[1] : "../../../../data/lena.jpg";
Mat src, dst, kernel;
src = imread(filepath, IMREAD_GRAYSCALE);
if (src.empty())
{
cerr << "Can't open [" << filepath << "]" << endl;
return EXIT_FAILURE;
}
namedWindow("Input", 1);
namedWindow("Output1", 1);
namedWindow("Output2", 1);
namedWindow("Output3", 1);
imshow("Input", src);
kernel = (Mat_<double>(3, 3) << 1, 0, -1,
1, 0, -1,
1, 0, -1);
/*
Uncomment the kernels you want to use or write your own kernels to test out
performance.
*/
/*
kernel = (Mat_<double>(5, 5) << 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1);
kernel /= 100;
*/
/*
kernel = (Mat_<double>(3, 3) << 1, 1, 1,
0, 0, 0,
-1, -1, -1);
*/
double t = (double)getTickCount();
conv_seq(src, dst, kernel);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Sequential implementation: " << t << "s" << endl;
imshow("Output1", dst);
waitKey(0);
t = (double)getTickCount();
conv_parallel(src, dst, kernel);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Parallel Implementation: " << t << "s" << endl;
imshow("Output2", dst);
waitKey(0);
t = (double)getTickCount();
conv_parallel_row_split(src, dst, kernel);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Parallel Implementation(Row Split): " << t << "s" << endl
<< endl;
imshow("Output3", dst);
waitKey(0);
// imwrite("src.png", src);
// imwrite("dst.png", dst);
return 0;
}

View File

@ -0,0 +1,108 @@
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
static void help(char* progName)
{
cout << endl
<< "This program shows how to filter images with mask: the write it yourself and the"
<< "filter2d way. " << endl
<< "Usage:" << endl
<< progName << " [image_path -- default lena.jpg] [G -- grayscale] " << endl << endl;
}
void Sharpen(const Mat& myImage,Mat& Result);
int main( int argc, char* argv[])
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat src, dst0, dst1;
if (argc >= 3 && !strcmp("G", argv[2]))
src = imread( samples::findFile( filename ), IMREAD_GRAYSCALE);
else
src = imread( samples::findFile( filename ), IMREAD_COLOR);
if (src.empty())
{
cerr << "Can't open image [" << filename << "]" << endl;
return EXIT_FAILURE;
}
namedWindow("Input", WINDOW_AUTOSIZE);
namedWindow("Output", WINDOW_AUTOSIZE);
imshow( "Input", src );
double t = (double)getTickCount();
Sharpen( src, dst0 );
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Hand written function time passed in seconds: " << t << endl;
imshow( "Output", dst0 );
waitKey();
//![kern]
Mat kernel = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
//![kern]
t = (double)getTickCount();
//![filter2D]
filter2D( src, dst1, src.depth(), kernel );
//![filter2D]
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
imshow( "Output", dst1 );
waitKey();
return EXIT_SUCCESS;
}
//! [basic_method]
void Sharpen(const Mat& myImage,Mat& Result)
{
//! [8_bit]
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
//! [8_bit]
//! [create_channels]
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
//! [create_channels]
//! [basic_method_loop]
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
//! [basic_method_loop]
//! [borders]
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
//! [borders]
}
//! [basic_method]

View File

@ -0,0 +1,170 @@
/* Snippet code for Operations with images tutorial (not intended to be run but should built successfully) */
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int,char**)
{
std::string filename = "";
// Input/Output
{
//! [Load an image from a file]
Mat img = imread(filename);
//! [Load an image from a file]
CV_UNUSED(img);
}
{
//! [Load an image from a file in grayscale]
Mat img = imread(filename, IMREAD_GRAYSCALE);
//! [Load an image from a file in grayscale]
CV_UNUSED(img);
}
{
Mat img(4,4,CV_8U);
//! [Save image]
imwrite(filename, img);
//! [Save image]
}
// Accessing pixel intensity values
{
Mat img(4,4,CV_8U);
int y = 0, x = 0;
{
//! [Pixel access 1]
Scalar intensity = img.at<uchar>(y, x);
//! [Pixel access 1]
CV_UNUSED(intensity);
}
{
//! [Pixel access 2]
Scalar intensity = img.at<uchar>(Point(x, y));
//! [Pixel access 2]
CV_UNUSED(intensity);
}
{
//! [Pixel access 3]
Vec3b intensity = img.at<Vec3b>(y, x);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
//! [Pixel access 3]
CV_UNUSED(blue);
CV_UNUSED(green);
CV_UNUSED(red);
}
{
//! [Pixel access 4]
Vec3f intensity = img.at<Vec3f>(y, x);
float blue = intensity.val[0];
float green = intensity.val[1];
float red = intensity.val[2];
//! [Pixel access 4]
CV_UNUSED(blue);
CV_UNUSED(green);
CV_UNUSED(red);
}
{
//! [Pixel access 5]
img.at<uchar>(y, x) = 128;
//! [Pixel access 5]
}
{
int i = 0;
//! [Mat from points vector]
vector<Point2f> points;
//... fill the array
Mat pointsMat = Mat(points);
//! [Mat from points vector]
//! [Point access]
Point2f point = pointsMat.at<Point2f>(i, 0);
//! [Point access]
CV_UNUSED(point);
}
}
// Memory management and reference counting
{
//! [Reference counting 1]
std::vector<Point3f> points;
// .. fill the array
Mat pointsMat = Mat(points).reshape(1);
//! [Reference counting 1]
CV_UNUSED(pointsMat);
}
{
//! [Reference counting 2]
Mat img = imread("image.jpg");
Mat img1 = img.clone();
//! [Reference counting 2]
CV_UNUSED(img1);
}
{
//! [Reference counting 3]
Mat img = imread("image.jpg");
Mat sobelx;
Sobel(img, sobelx, CV_32F, 1, 0);
//! [Reference counting 3]
}
// Primitive operations
{
Mat img;
{
//! [Set image to black]
img = Scalar(0);
//! [Set image to black]
}
{
//! [Select ROI]
Rect r(10, 10, 100, 100);
Mat smallImg = img(r);
//! [Select ROI]
CV_UNUSED(smallImg);
}
}
{
//! [BGR to Gray]
Mat img = imread("image.jpg"); // loading a 8UC3 image
Mat grey;
cvtColor(img, grey, COLOR_BGR2GRAY);
//! [BGR to Gray]
}
{
Mat dst, src;
//! [Convert to CV_32F]
src.convertTo(dst, CV_32F);
//! [Convert to CV_32F]
}
// Visualizing images
{
//! [imshow 1]
Mat img = imread("image.jpg");
namedWindow("image", WINDOW_AUTOSIZE);
imshow("image", img);
waitKey();
//! [imshow 1]
}
{
//! [imshow 2]
Mat img = imread("image.jpg");
Mat grey;
cvtColor(img, grey, COLOR_BGR2GRAY);
Mat sobelx;
Sobel(grey, sobelx, CV_32F, 1, 0);
double minVal, maxVal;
minMaxLoc(sobelx, &minVal, &maxVal); //find minimum and maximum intensities
Mat draw;
sobelx.convertTo(draw, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
namedWindow("image", WINDOW_AUTOSIZE);
imshow("image", draw);
waitKey();
//! [imshow 2]
}
return 0;
}

View File

@ -0,0 +1,119 @@
/* For description look into the help() function. */
#include "opencv2/core.hpp"
#include <iostream>
using namespace std;
using namespace cv;
static void help()
{
cout
<< "\n---------------------------------------------------------------------------" << endl
<< "This program shows how to create matrices(cv::Mat) in OpenCV and its serial"
<< " out capabilities" << endl
<< "That is, cv::Mat M(...); M.create and cout << M. " << endl
<< "Shows how output can be formatted to OpenCV, python, numpy, csv and C styles." << endl
<< "Usage:" << endl
<< "./mat_the_basic_image_container" << endl
<< "-----------------------------------------------------------------------------" << endl
<< endl;
}
int main(int,char**)
{
help();
// create by using the constructor
//! [constructor]
Mat M(2,2, CV_8UC3, Scalar(0,0,255));
cout << "M = " << endl << " " << M << endl << endl;
//! [constructor]
// create by using the create function()
//! [create]
M.create(4,4, CV_8UC(2));
cout << "M = "<< endl << " " << M << endl << endl;
//! [create]
// create multidimensional matrices
//! [init]
int sz[3] = {2,2,2};
Mat L(3,sz, CV_8UC(1), Scalar::all(0));
//! [init]
// Cannot print via operator <<
// Create using MATLAB style eye, ones or zero matrix
//! [matlab]
Mat E = Mat::eye(4, 4, CV_64F);
cout << "E = " << endl << " " << E << endl << endl;
Mat O = Mat::ones(2, 2, CV_32F);
cout << "O = " << endl << " " << O << endl << endl;
Mat Z = Mat::zeros(3,3, CV_8UC1);
cout << "Z = " << endl << " " << Z << endl << endl;
//! [matlab]
// create a 3x3 double-precision identity matrix
//! [comma]
Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cout << "C = " << endl << " " << C << endl << endl;
//! [comma]
// do the same with initializer_list
#ifdef CV_CXX11
//! [list]
C = (Mat_<double>({0, -1, 0, -1, 5, -1, 0, -1, 0})).reshape(3);
cout << "C = " << endl << " " << C << endl << endl;
//! [list]
#endif
//! [clone]
Mat RowClone = C.row(1).clone();
cout << "RowClone = " << endl << " " << RowClone << endl << endl;
//! [clone]
// Fill a matrix with random values
//! [random]
Mat R = Mat(3, 2, CV_8UC3);
randu(R, Scalar::all(0), Scalar::all(255));
//! [random]
// Demonstrate the output formatting options
//! [out-default]
cout << "R (default) = " << endl << R << endl << endl;
//! [out-default]
//! [out-python]
cout << "R (python) = " << endl << format(R, Formatter::FMT_PYTHON) << endl << endl;
//! [out-python]
//! [out-numpy]
cout << "R (numpy) = " << endl << format(R, Formatter::FMT_NUMPY ) << endl << endl;
//! [out-numpy]
//! [out-csv]
cout << "R (csv) = " << endl << format(R, Formatter::FMT_CSV ) << endl << endl;
//! [out-csv]
//! [out-c]
cout << "R (c) = " << endl << format(R, Formatter::FMT_C ) << endl << endl;
//! [out-c]
//! [out-point2]
Point2f P(5, 1);
cout << "Point (2D) = " << P << endl << endl;
//! [out-point2]
//! [out-point3]
Point3f P3f(2, 6, 7);
cout << "Point (3D) = " << P3f << endl << endl;
//! [out-point3]
//! [out-vector]
vector<float> v;
v.push_back( (float)CV_PI); v.push_back(2); v.push_back(3.01f);
cout << "Vector of floats via Mat = " << Mat(v) << endl << endl;
//! [out-vector]
//! [out-vector-points]
vector<Point2f> vPoints(20);
for (size_t i = 0; i < vPoints.size(); ++i)
vPoints[i] = Point2f((float)(i * 5), (float)(i % 7));
cout << "A vector of 2D Points = " << vPoints << endl << endl;
//! [out-vector-points]
return 0;
}

View File

@ -0,0 +1,26 @@
cmake_minimum_required(VERSION 3.9)
find_package(OpenCV REQUIRED COMPONENTS opencv_core)
find_package(OpenMP)
if(OpenMP_FOUND)
project(opencv_example_openmp_backend)
add_executable(opencv_example_openmp_backend example-openmp.cpp)
target_link_libraries(opencv_example_openmp_backend PRIVATE
opencv_core
OpenMP::OpenMP_CXX
)
endif()
# TODO: find_package(TBB)
find_path(TBB_INCLUDE_DIR NAMES "tbb/tbb.h")
find_library(TBB_LIBRARY NAMES "tbb")
if(TBB_INCLUDE_DIR AND TBB_LIBRARY AND NOT OPENCV_EXAMPLE_SKIP_TBB)
project(opencv_example_tbb_backend)
add_executable(opencv_example_tbb_backend example-tbb.cpp)
target_include_directories(opencv_example_tbb_backend SYSTEM PRIVATE ${TBB_INCLUDE_DIR})
target_link_libraries(opencv_example_tbb_backend PRIVATE
opencv_core
${TBB_LIBRARY}
)
endif()

View File

@ -0,0 +1,44 @@
#include "opencv2/core.hpp"
#include <iostream>
#include <chrono>
#include <thread>
//! [openmp_include]
#include "opencv2/core/parallel/backend/parallel_for.openmp.hpp"
//! [openmp_include]
namespace cv { // private.hpp
CV_EXPORTS const char* currentParallelFramework();
}
static
std::string currentParallelFrameworkSafe()
{
const char* framework = cv::currentParallelFramework();
if (framework)
return framework;
return std::string();
}
using namespace cv;
int main()
{
std::cout << "OpenCV builtin parallel framework: '" << currentParallelFrameworkSafe() << "' (nthreads=" << getNumThreads() << ")" << std::endl;
//! [openmp_backend]
//omp_set_dynamic(1);
cv::parallel::setParallelForBackend(std::make_shared<cv::parallel::openmp::ParallelForBackend>());
//! [openmp_backend]
std::cout << "New parallel backend: '" << currentParallelFrameworkSafe() << "'" << "' (nthreads=" << getNumThreads() << ")" << std::endl;
parallel_for_(Range(0, 20), [&](const Range range)
{
std::ostringstream out;
out << "Thread " << getThreadNum() << "(opencv=" << utils::getThreadID() << "): range " << range.start << "-" << range.end << std::endl;
std::cout << out.str() << std::flush;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
});
}

View File

@ -0,0 +1,43 @@
#include "opencv2/core.hpp"
#include <iostream>
#include <chrono>
#include <thread>
//! [tbb_include]
#include "opencv2/core/parallel/backend/parallel_for.tbb.hpp"
//! [tbb_include]
namespace cv { // private.hpp
CV_EXPORTS const char* currentParallelFramework();
}
static
std::string currentParallelFrameworkSafe()
{
const char* framework = cv::currentParallelFramework();
if (framework)
return framework;
return std::string();
}
using namespace cv;
int main()
{
std::cout << "OpenCV builtin parallel framework: '" << currentParallelFrameworkSafe() << "' (nthreads=" << getNumThreads() << ")" << std::endl;
//! [tbb_backend]
cv::parallel::setParallelForBackend(std::make_shared<cv::parallel::tbb::ParallelForBackend>());
//! [tbb_backend]
std::cout << "New parallel backend: '" << currentParallelFrameworkSafe() << "'" << "' (nthreads=" << getNumThreads() << ")" << std::endl;
parallel_for_(Range(0, 20), [&](const Range range)
{
std::ostringstream out;
out << "Thread " << getThreadNum() << "(opencv=" << utils::getThreadID() << "): range " << range.start << "-" << range.end << std::endl;
std::cout << out.str() << std::flush;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
});
}

View File

@ -0,0 +1,230 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/simd_intrinsics.hpp>
using namespace std;
using namespace cv;
const int N = 100005, K = 2000;
namespace
{
void conv_seq(Mat src, Mat &dst, Mat kernel)
{
int rows = src.rows, cols = src.cols;
dst = Mat(rows, cols, CV_8UC1);
int sz = kernel.rows / 2;
copyMakeBorder(src, src, sz, sz, sz, sz, BORDER_REPLICATE);
for (int i = 0; i < rows; i++)
{
uchar *dptr = dst.ptr<uchar>(i);
for (int j = 0; j < cols; j++)
{
float value = 0;
for (int k = -sz; k <= sz; k++)
{
// slightly faster results when we create a ptr due to more efficient memory access.
uchar *sptr = src.ptr<uchar>(i + sz + k);
for (int l = -sz; l <= sz; l++)
{
value += kernel.ptr<float>(k + sz)[l + sz] * sptr[j + sz + l];
}
}
dptr[j] = saturate_cast<uchar>(value);
}
}
}
//! [convolution-1D-scalar]
void conv1d(Mat src, Mat &dst, Mat kernel)
{
//! [convolution-1D-border]
int len = src.cols;
dst = Mat(1, len, CV_8UC1);
int sz = kernel.cols / 2;
copyMakeBorder(src, src, 0, 0, sz, sz, BORDER_REPLICATE);
//! [convolution-1D-border]
//! [convolution-1D-scalar-main]
for (int i = 0; i < len; i++)
{
double value = 0;
for (int k = -sz; k <= sz; k++)
value += src.ptr<uchar>(0)[i + k + sz] * kernel.ptr<float>(0)[k + sz];
dst.ptr<uchar>(0)[i] = saturate_cast<uchar>(value);
}
//! [convolution-1D-scalar-main]
}
//! [convolution-1D-scalar]
//! [convolution-1D-vector]
void conv1dsimd(Mat src, Mat kernel, float *ans, int row = 0, int rowk = 0, int len = -1)
{
if (len == -1)
len = src.cols;
//! [convolution-1D-convert]
Mat src_32, kernel_32;
const int alpha = 1;
src.convertTo(src_32, CV_32FC1, alpha);
int ksize = kernel.cols, sz = kernel.cols / 2;
copyMakeBorder(src_32, src_32, 0, 0, sz, sz, BORDER_REPLICATE);
//! [convolution-1D-convert]
//! [convolution-1D-main]
//! [convolution-1D-main-h1]
int step = v_float32().nlanes;
float *sptr = src_32.ptr<float>(row), *kptr = kernel.ptr<float>(rowk);
for (int k = 0; k < ksize; k++)
{
//! [convolution-1D-main-h1]
//! [convolution-1D-main-h2]
v_float32 kernel_wide = vx_setall_f32(kptr[k]);
int i;
for (i = 0; i + step < len; i += step)
{
v_float32 window = vx_load(sptr + i + k);
v_float32 sum = vx_load(ans + i) + kernel_wide * window;
v_store(ans + i, sum);
}
//! [convolution-1D-main-h2]
//! [convolution-1D-main-h3]
for (; i < len; i++)
{
*(ans + i) += sptr[i + k]*kptr[k];
}
//! [convolution-1D-main-h3]
}
//! [convolution-1D-main]
}
//! [convolution-1D-vector]
//! [convolution-2D]
void convolute_simd(Mat src, Mat &dst, Mat kernel)
{
//! [convolution-2D-init]
int rows = src.rows, cols = src.cols;
int ksize = kernel.rows, sz = ksize / 2;
dst = Mat(rows, cols, CV_32FC1);
copyMakeBorder(src, src, sz, sz, 0, 0, BORDER_REPLICATE);
int step = v_float32().nlanes;
//! [convolution-2D-init]
//! [convolution-2D-main]
for (int i = 0; i < rows; i++)
{
for (int k = 0; k < ksize; k++)
{
float ans[N] = {0};
conv1dsimd(src, kernel, ans, i + k, k, cols);
int j;
for (j = 0; j + step < cols; j += step)
{
v_float32 sum = vx_load(&dst.ptr<float>(i)[j]) + vx_load(&ans[j]);
v_store(&dst.ptr<float>(i)[j], sum);
}
for (; j < cols; j++)
dst.ptr<float>(i)[j] += ans[j];
}
}
//! [convolution-2D-main]
//! [convolution-2D-conv]
const int alpha = 1;
dst.convertTo(dst, CV_8UC1, alpha);
//! [convolution-2D-conv]
}
//! [convolution-2D]
static void help(char *progName)
{
cout << endl
<< " This program shows how to use the OpenCV parallel_for_ function and \n"
<< " compares the performance of the sequential and parallel implementations for a \n"
<< " convolution operation\n"
<< " Usage:\n "
<< progName << " [image_path -- default lena.jpg] " << endl
<< endl;
}
}
int main(int argc, char *argv[])
{
// 1-D Convolution //
Mat vsrc(1, N, CV_8UC1), k(1, K, CV_32FC1), vdst;
RNG rng(time(0));
rng.RNG::fill(vsrc, RNG::UNIFORM, Scalar(0), Scalar(255));
rng.RNG::fill(k, RNG::UNIFORM, Scalar(-50), Scalar(50));
double t = (double)getTickCount();
conv1d(vsrc, vdst, k);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Sequential 1-D convolution implementation: " << t << "s" << endl;
t = (double)getTickCount();
float ans[N] = {0};
conv1dsimd(vsrc, k, ans);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Vectorized 1-D convolution implementation: " << t << "s" << endl;
// 2-D Convolution //
help(argv[0]);
const char *filepath = argc >= 2 ? argv[1] : "../../../../data/lena.jpg";
Mat src, dst1, dst2, kernel;
src = imread(filepath, IMREAD_GRAYSCALE);
if (src.empty())
{
cerr << "Can't open [" << filepath << "]" << endl;
return EXIT_FAILURE;
}
namedWindow("Input", 1);
namedWindow("Output", 1);
imshow("Input", src);
kernel = (Mat_<float>(3, 3) << 1, 0, -1,
2, 0, -2,
1, 0, -1);
t = (double)getTickCount();
conv_seq(src, dst1, kernel);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Sequential 2-D convolution implementation: " << t << "s" << endl;
imshow("Output", dst1);
waitKey(0);
t = (double)getTickCount();
convolute_simd(src, dst2, kernel);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << " Vectorized 2-D convolution implementation: " << t << "s" << endl
<< endl;
imshow("Output", dst2);
waitKey(0);
return 0;
}

View File

@ -0,0 +1,98 @@
#include <opencv2/features2d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const float inlier_threshold = 2.5f; // Distance threshold to identify inliers with homography check
const float nn_match_ratio = 0.8f; // Nearest neighbor matching ratio
int main(int argc, char* argv[])
{
//! [load]
CommandLineParser parser(argc, argv,
"{@img1 | graf1.png | input image 1}"
"{@img2 | graf3.png | input image 2}"
"{@homography | H1to3p.xml | homography matrix}");
Mat img1 = imread( samples::findFile( parser.get<String>("@img1") ), IMREAD_GRAYSCALE);
Mat img2 = imread( samples::findFile( parser.get<String>("@img2") ), IMREAD_GRAYSCALE);
Mat homography;
FileStorage fs( samples::findFile( parser.get<String>("@homography") ), FileStorage::READ);
fs.getFirstTopLevelNode() >> homography;
//! [load]
//! [AKAZE]
vector<KeyPoint> kpts1, kpts2;
Mat desc1, desc2;
Ptr<AKAZE> akaze = AKAZE::create();
akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
//! [AKAZE]
//! [2-nn matching]
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;
matcher.knnMatch(desc1, desc2, nn_matches, 2);
//! [2-nn matching]
//! [ratio test filtering]
vector<KeyPoint> matched1, matched2;
for(size_t i = 0; i < nn_matches.size(); i++) {
DMatch first = nn_matches[i][0];
float dist1 = nn_matches[i][0].distance;
float dist2 = nn_matches[i][1].distance;
if(dist1 < nn_match_ratio * dist2) {
matched1.push_back(kpts1[first.queryIdx]);
matched2.push_back(kpts2[first.trainIdx]);
}
}
//! [ratio test filtering]
//! [homography check]
vector<DMatch> good_matches;
vector<KeyPoint> inliers1, inliers2;
for(size_t i = 0; i < matched1.size(); i++) {
Mat col = Mat::ones(3, 1, CV_64F);
col.at<double>(0) = matched1[i].pt.x;
col.at<double>(1) = matched1[i].pt.y;
col = homography * col;
col /= col.at<double>(2);
double dist = sqrt( pow(col.at<double>(0) - matched2[i].pt.x, 2) +
pow(col.at<double>(1) - matched2[i].pt.y, 2));
if(dist < inlier_threshold) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
good_matches.push_back(DMatch(new_i, new_i, 0));
}
}
//! [homography check]
//! [draw final matches]
Mat res;
drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
imwrite("akaze_result.png", res);
double inlier_ratio = inliers1.size() / (double) matched1.size();
cout << "A-KAZE Matching Results" << endl;
cout << "*******************************" << endl;
cout << "# Keypoints 1: \t" << kpts1.size() << endl;
cout << "# Keypoints 2: \t" << kpts2.size() << endl;
cout << "# Matches: \t" << matched1.size() << endl;
cout << "# Inliers: \t" << inliers1.size() << endl;
cout << "# Inliers Ratio: \t" << inlier_ratio << endl;
cout << endl;
imshow("result", res);
waitKey();
//! [draw final matches]
return 0;
}

View File

@ -0,0 +1,213 @@
#include <opencv2/features2d.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp> //for imshow
#include <vector>
#include <iostream>
#include <iomanip>
#include "stats.h" // Stats structure definition
#include "utils.h" // Drawing and printing functions
using namespace std;
using namespace cv;
const double akaze_thresh = 3e-4; // AKAZE detection threshold set to locate about 1000 keypoints
const double ransac_thresh = 2.5f; // RANSAC inlier threshold
const double nn_match_ratio = 0.8f; // Nearest-neighbour matching ratio
const int bb_min_inliers = 100; // Minimal number of inliers to draw bounding box
const int stats_update_period = 10; // On-screen statistics are updated every 10 frames
namespace example {
class Tracker
{
public:
Tracker(Ptr<Feature2D> _detector, Ptr<DescriptorMatcher> _matcher) :
detector(_detector),
matcher(_matcher)
{}
void setFirstFrame(const Mat frame, vector<Point2f> bb, string title, Stats& stats);
Mat process(const Mat frame, Stats& stats);
Ptr<Feature2D> getDetector() {
return detector;
}
protected:
Ptr<Feature2D> detector;
Ptr<DescriptorMatcher> matcher;
Mat first_frame, first_desc;
vector<KeyPoint> first_kp;
vector<Point2f> object_bb;
};
void Tracker::setFirstFrame(const Mat frame, vector<Point2f> bb, string title, Stats& stats)
{
cv::Point *ptMask = new cv::Point[bb.size()];
const Point* ptContain = { &ptMask[0] };
int iSize = static_cast<int>(bb.size());
for (size_t i=0; i<bb.size(); i++) {
ptMask[i].x = static_cast<int>(bb[i].x);
ptMask[i].y = static_cast<int>(bb[i].y);
}
first_frame = frame.clone();
cv::Mat matMask = cv::Mat::zeros(frame.size(), CV_8UC1);
cv::fillPoly(matMask, &ptContain, &iSize, 1, cv::Scalar::all(255));
detector->detectAndCompute(first_frame, matMask, first_kp, first_desc);
stats.keypoints = (int)first_kp.size();
drawBoundingBox(first_frame, bb);
putText(first_frame, title, Point(0, 60), FONT_HERSHEY_PLAIN, 5, Scalar::all(0), 4);
object_bb = bb;
delete[] ptMask;
}
Mat Tracker::process(const Mat frame, Stats& stats)
{
TickMeter tm;
vector<KeyPoint> kp;
Mat desc;
tm.start();
detector->detectAndCompute(frame, noArray(), kp, desc);
stats.keypoints = (int)kp.size();
vector< vector<DMatch> > matches;
vector<KeyPoint> matched1, matched2;
matcher->knnMatch(first_desc, desc, matches, 2);
for(unsigned i = 0; i < matches.size(); i++) {
if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
matched1.push_back(first_kp[matches[i][0].queryIdx]);
matched2.push_back( kp[matches[i][0].trainIdx]);
}
}
stats.matches = (int)matched1.size();
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
tm.stop();
stats.fps = 1. / tm.getTimeSec();
if(matched1.size() < 4 || homography.empty()) {
Mat res;
hconcat(first_frame, frame, res);
stats.inliers = 0;
stats.ratio = 0;
return res;
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));
return res;
}
}
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv, "{@input_path |0|input path can be a camera id, like 0,1,2 or a video filename}");
parser.printMessage();
string input_path = parser.get<string>(0);
string video_name = input_path;
VideoCapture video_in;
if ( ( isdigit(input_path[0]) && input_path.size() == 1 ) )
{
int camera_no = input_path[0] - '0';
video_in.open( camera_no );
}
else {
video_in.open(video_name);
}
if(!video_in.isOpened()) {
cerr << "Couldn't open " << video_name << endl;
return 1;
}
Stats stats, akaze_stats, orb_stats;
Ptr<AKAZE> akaze = AKAZE::create();
akaze->setThreshold(akaze_thresh);
Ptr<ORB> orb = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
example::Tracker akaze_tracker(akaze, matcher);
example::Tracker orb_tracker(orb, matcher);
Mat frame;
namedWindow(video_name, WINDOW_NORMAL);
cout << "\nPress any key to stop the video and select a bounding box" << endl;
while ( waitKey(1) < 1 )
{
video_in >> frame;
cv::resizeWindow(video_name, frame.size());
imshow(video_name, frame);
}
vector<Point2f> bb;
cv::Rect uBox = cv::selectROI(video_name, frame);
bb.push_back(cv::Point2f(static_cast<float>(uBox.x), static_cast<float>(uBox.y)));
bb.push_back(cv::Point2f(static_cast<float>(uBox.x+uBox.width), static_cast<float>(uBox.y)));
bb.push_back(cv::Point2f(static_cast<float>(uBox.x+uBox.width), static_cast<float>(uBox.y+uBox.height)));
bb.push_back(cv::Point2f(static_cast<float>(uBox.x), static_cast<float>(uBox.y+uBox.height)));
akaze_tracker.setFirstFrame(frame, bb, "AKAZE", stats);
orb_tracker.setFirstFrame(frame, bb, "ORB", stats);
Stats akaze_draw_stats, orb_draw_stats;
Mat akaze_res, orb_res, res_frame;
int i = 0;
for(;;) {
i++;
bool update_stats = (i % stats_update_period == 0);
video_in >> frame;
// stop the program if no more images
if(frame.empty()) break;
akaze_res = akaze_tracker.process(frame, stats);
akaze_stats += stats;
if(update_stats) {
akaze_draw_stats = stats;
}
orb->setMaxFeatures(stats.keypoints);
orb_res = orb_tracker.process(frame, stats);
orb_stats += stats;
if(update_stats) {
orb_draw_stats = stats;
}
drawStatistics(akaze_res, akaze_draw_stats);
drawStatistics(orb_res, orb_draw_stats);
vconcat(akaze_res, orb_res, res_frame);
cv::imshow(video_name, res_frame);
if(waitKey(1)==27) break; //quit on ESC button
}
akaze_stats /= i - 1;
orb_stats /= i - 1;
printStatistics("AKAZE", akaze_stats);
printStatistics("ORB", orb_stats);
return 0;
}

View File

@ -0,0 +1,38 @@
#ifndef STATS_H
#define STATS_H
struct Stats
{
int matches;
int inliers;
double ratio;
int keypoints;
double fps;
Stats() : matches(0),
inliers(0),
ratio(0),
keypoints(0),
fps(0.)
{}
Stats& operator+=(const Stats& op) {
matches += op.matches;
inliers += op.inliers;
ratio += op.ratio;
keypoints += op.keypoints;
fps += op.fps;
return *this;
}
Stats& operator/=(int num)
{
matches /= num;
inliers /= num;
ratio /= num;
keypoints /= num;
fps /= num;
return *this;
}
};
#endif // STATS_H

View File

@ -0,0 +1,62 @@
#ifndef UTILS_H
#define UTILS_H
#include <opencv2/core.hpp>
#include <vector>
#include "stats.h"
using namespace std;
using namespace cv;
void drawBoundingBox(Mat image, vector<Point2f> bb);
void drawStatistics(Mat image, const Stats& stats);
void printStatistics(string name, Stats stats);
vector<Point2f> Points(vector<KeyPoint> keypoints);
Rect2d selectROI(const String &video_name, const Mat &frame);
void drawBoundingBox(Mat image, vector<Point2f> bb)
{
for(unsigned i = 0; i < bb.size() - 1; i++) {
line(image, bb[i], bb[i + 1], Scalar(0, 0, 255), 2);
}
line(image, bb[bb.size() - 1], bb[0], Scalar(0, 0, 255), 2);
}
void drawStatistics(Mat image, const Stats& stats)
{
static const int font = FONT_HERSHEY_PLAIN;
stringstream str1, str2, str3, str4;
str1 << "Matches: " << stats.matches;
str2 << "Inliers: " << stats.inliers;
str3 << "Inlier ratio: " << setprecision(2) << stats.ratio;
str4 << "FPS: " << std::fixed << setprecision(2) << stats.fps;
putText(image, str1.str(), Point(0, image.rows - 120), font, 2, Scalar::all(255), 3);
putText(image, str2.str(), Point(0, image.rows - 90), font, 2, Scalar::all(255), 3);
putText(image, str3.str(), Point(0, image.rows - 60), font, 2, Scalar::all(255), 3);
putText(image, str4.str(), Point(0, image.rows - 30), font, 2, Scalar::all(255), 3);
}
void printStatistics(string name, Stats stats)
{
cout << name << endl;
cout << "----------" << endl;
cout << "Matches " << stats.matches << endl;
cout << "Inliers " << stats.inliers << endl;
cout << "Inlier ratio " << setprecision(2) << stats.ratio << endl;
cout << "Keypoints " << stats.keypoints << endl;
cout << "FPS " << std::fixed << setprecision(2) << stats.fps << endl;
cout << endl;
}
vector<Point2f> Points(vector<KeyPoint> keypoints)
{
vector<Point2f> res;
for(unsigned i = 0; i < keypoints.size(); i++) {
res.push_back(keypoints[i].pt);
}
return res;
}
#endif // UTILS_H

View File

@ -0,0 +1,185 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
using namespace std;
using namespace cv;
namespace
{
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
{
corners.resize(0);
switch (patternType) {
case CHESSBOARD:
case CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
break;
case ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize),
float(i*squareSize), 0));
break;
default:
CV_Error(Error::StsBadArg, "Unknown pattern type\n");
}
}
Mat computeHomography(const Mat &R_1to2, const Mat &tvec_1to2, const double d_inv, const Mat &normal)
{
Mat homography = R_1to2 + d_inv * tvec_1to2*normal.t();
return homography;
}
void computeC2MC1(const Mat &R1, const Mat &tvec1, const Mat &R2, const Mat &tvec2,
Mat &R_1to2, Mat &tvec_1to2)
{
//c2Mc1 = c2Mo * oMc1 = c2Mo * c1Mo.inv()
R_1to2 = R2 * R1.t();
tvec_1to2 = R2 * (-R1.t()*tvec1) + tvec2;
}
void decomposeHomography(const string &img1Path, const string &img2Path, const Size &patternSize,
const float squareSize, const string &intrinsicsPath)
{
Mat img1 = imread( samples::findFile( img1Path) );
Mat img2 = imread( samples::findFile( img2Path) );
vector<Point2f> corners1, corners2;
bool found1 = findChessboardCorners(img1, patternSize, corners1);
bool found2 = findChessboardCorners(img2, patternSize, corners2);
if (!found1 || !found2)
{
cout << "Error, cannot find the chessboard corners in both images." << endl;
return;
}
//! [compute-poses]
vector<Point3f> objectPoints;
calcChessboardCorners(patternSize, squareSize, objectPoints);
FileStorage fs( samples::findFile( intrinsicsPath ), FileStorage::READ);
Mat cameraMatrix, distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
Mat rvec1, tvec1;
solvePnP(objectPoints, corners1, cameraMatrix, distCoeffs, rvec1, tvec1);
Mat rvec2, tvec2;
solvePnP(objectPoints, corners2, cameraMatrix, distCoeffs, rvec2, tvec2);
//! [compute-poses]
//! [compute-camera-displacement]
Mat R1, R2;
Rodrigues(rvec1, R1);
Rodrigues(rvec2, R2);
Mat R_1to2, t_1to2;
computeC2MC1(R1, tvec1, R2, tvec2, R_1to2, t_1to2);
Mat rvec_1to2;
Rodrigues(R_1to2, rvec_1to2);
//! [compute-camera-displacement]
//! [compute-plane-normal-at-camera-pose-1]
Mat normal = (Mat_<double>(3,1) << 0, 0, 1);
Mat normal1 = R1*normal;
//! [compute-plane-normal-at-camera-pose-1]
//! [compute-plane-distance-to-the-camera-frame-1]
Mat origin(3, 1, CV_64F, Scalar(0));
Mat origin1 = R1*origin + tvec1;
double d_inv1 = 1.0 / normal1.dot(origin1);
//! [compute-plane-distance-to-the-camera-frame-1]
//! [compute-homography-from-camera-displacement]
Mat homography_euclidean = computeHomography(R_1to2, t_1to2, d_inv1, normal1);
Mat homography = cameraMatrix * homography_euclidean * cameraMatrix.inv();
homography /= homography.at<double>(2,2);
homography_euclidean /= homography_euclidean.at<double>(2,2);
//! [compute-homography-from-camera-displacement]
//! [decompose-homography-from-camera-displacement]
vector<Mat> Rs_decomp, ts_decomp, normals_decomp;
int solutions = decomposeHomographyMat(homography, cameraMatrix, Rs_decomp, ts_decomp, normals_decomp);
cout << "Decompose homography matrix computed from the camera displacement:" << endl << endl;
for (int i = 0; i < solutions; i++)
{
double factor_d1 = 1.0 / d_inv1;
Mat rvec_decomp;
Rodrigues(Rs_decomp[i], rvec_decomp);
cout << "Solution " << i << ":" << endl;
cout << "rvec from homography decomposition: " << rvec_decomp.t() << endl;
cout << "rvec from camera displacement: " << rvec_1to2.t() << endl;
cout << "tvec from homography decomposition: " << ts_decomp[i].t() << " and scaled by d: " << factor_d1 * ts_decomp[i].t() << endl;
cout << "tvec from camera displacement: " << t_1to2.t() << endl;
cout << "plane normal from homography decomposition: " << normals_decomp[i].t() << endl;
cout << "plane normal at camera 1 pose: " << normal1.t() << endl << endl;
}
//! [decompose-homography-from-camera-displacement]
//! [estimate homography]
Mat H = findHomography(corners1, corners2);
//! [estimate homography]
//! [decompose-homography-estimated-by-findHomography]
solutions = decomposeHomographyMat(H, cameraMatrix, Rs_decomp, ts_decomp, normals_decomp);
cout << "Decompose homography matrix estimated by findHomography():" << endl << endl;
for (int i = 0; i < solutions; i++)
{
double factor_d1 = 1.0 / d_inv1;
Mat rvec_decomp;
Rodrigues(Rs_decomp[i], rvec_decomp);
cout << "Solution " << i << ":" << endl;
cout << "rvec from homography decomposition: " << rvec_decomp.t() << endl;
cout << "rvec from camera displacement: " << rvec_1to2.t() << endl;
cout << "tvec from homography decomposition: " << ts_decomp[i].t() << " and scaled by d: " << factor_d1 * ts_decomp[i].t() << endl;
cout << "tvec from camera displacement: " << t_1to2.t() << endl;
cout << "plane normal from homography decomposition: " << normals_decomp[i].t() << endl;
cout << "plane normal at camera 1 pose: " << normal1.t() << endl << endl;
}
//! [decompose-homography-estimated-by-findHomography]
}
const char* params
= "{ help h | | print usage }"
"{ image1 | left02.jpg | path to the source chessboard image }"
"{ image2 | left01.jpg | path to the desired chessboard image }"
"{ intrinsics | left_intrinsics.yml | path to camera intrinsics }"
"{ width bw | 9 | chessboard width }"
"{ height bh | 6 | chessboard height }"
"{ square_size | 0.025 | chessboard square size }";
}
int main(int argc, char *argv[])
{
CommandLineParser parser(argc, argv, params);
if ( parser.has("help") )
{
parser.about( "Code for homography tutorial.\n"
"Example 4: decompose the homography matrix.\n" );
parser.printMessage();
return 0;
}
Size patternSize(parser.get<int>("width"), parser.get<int>("height"));
float squareSize = (float) parser.get<double>("square_size");
decomposeHomography(parser.get<String>("image1"),
parser.get<String>("image2"),
patternSize, squareSize,
parser.get<String>("intrinsics"));
return 0;
}

View File

@ -0,0 +1,201 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
using namespace std;
using namespace cv;
namespace
{
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
{
corners.resize(0);
switch (patternType)
{
case CHESSBOARD:
case CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
break;
case ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize),
float(i*squareSize), 0));
break;
default:
CV_Error(Error::StsBadArg, "Unknown pattern type\n");
}
}
//! [compute-homography]
Mat computeHomography(const Mat &R_1to2, const Mat &tvec_1to2, const double d_inv, const Mat &normal)
{
Mat homography = R_1to2 + d_inv * tvec_1to2*normal.t();
return homography;
}
//! [compute-homography]
Mat computeHomography(const Mat &R1, const Mat &tvec1, const Mat &R2, const Mat &tvec2,
const double d_inv, const Mat &normal)
{
Mat homography = R2 * R1.t() + d_inv * (-R2 * R1.t() * tvec1 + tvec2) * normal.t();
return homography;
}
//! [compute-c2Mc1]
void computeC2MC1(const Mat &R1, const Mat &tvec1, const Mat &R2, const Mat &tvec2,
Mat &R_1to2, Mat &tvec_1to2)
{
//c2Mc1 = c2Mo * oMc1 = c2Mo * c1Mo.inv()
R_1to2 = R2 * R1.t();
tvec_1to2 = R2 * (-R1.t()*tvec1) + tvec2;
}
//! [compute-c2Mc1]
void homographyFromCameraDisplacement(const string &img1Path, const string &img2Path, const Size &patternSize,
const float squareSize, const string &intrinsicsPath)
{
Mat img1 = imread( samples::findFile( img1Path ) );
Mat img2 = imread( samples::findFile( img2Path ) );
//! [compute-poses]
vector<Point2f> corners1, corners2;
bool found1 = findChessboardCorners(img1, patternSize, corners1);
bool found2 = findChessboardCorners(img2, patternSize, corners2);
if (!found1 || !found2)
{
cout << "Error, cannot find the chessboard corners in both images." << endl;
return;
}
vector<Point3f> objectPoints;
calcChessboardCorners(patternSize, squareSize, objectPoints);
FileStorage fs( samples::findFile( intrinsicsPath ), FileStorage::READ);
Mat cameraMatrix, distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
Mat rvec1, tvec1;
solvePnP(objectPoints, corners1, cameraMatrix, distCoeffs, rvec1, tvec1);
Mat rvec2, tvec2;
solvePnP(objectPoints, corners2, cameraMatrix, distCoeffs, rvec2, tvec2);
//! [compute-poses]
Mat img1_copy_pose = img1.clone(), img2_copy_pose = img2.clone();
Mat img_draw_poses;
drawFrameAxes(img1_copy_pose, cameraMatrix, distCoeffs, rvec1, tvec1, 2*squareSize);
drawFrameAxes(img2_copy_pose, cameraMatrix, distCoeffs, rvec2, tvec2, 2*squareSize);
hconcat(img1_copy_pose, img2_copy_pose, img_draw_poses);
imshow("Chessboard poses", img_draw_poses);
//! [compute-camera-displacement]
Mat R1, R2;
Rodrigues(rvec1, R1);
Rodrigues(rvec2, R2);
Mat R_1to2, t_1to2;
computeC2MC1(R1, tvec1, R2, tvec2, R_1to2, t_1to2);
Mat rvec_1to2;
Rodrigues(R_1to2, rvec_1to2);
//! [compute-camera-displacement]
//! [compute-plane-normal-at-camera-pose-1]
Mat normal = (Mat_<double>(3,1) << 0, 0, 1);
Mat normal1 = R1*normal;
//! [compute-plane-normal-at-camera-pose-1]
//! [compute-plane-distance-to-the-camera-frame-1]
Mat origin(3, 1, CV_64F, Scalar(0));
Mat origin1 = R1*origin + tvec1;
double d_inv1 = 1.0 / normal1.dot(origin1);
//! [compute-plane-distance-to-the-camera-frame-1]
//! [compute-homography-from-camera-displacement]
Mat homography_euclidean = computeHomography(R_1to2, t_1to2, d_inv1, normal1);
Mat homography = cameraMatrix * homography_euclidean * cameraMatrix.inv();
homography /= homography.at<double>(2,2);
homography_euclidean /= homography_euclidean.at<double>(2,2);
//! [compute-homography-from-camera-displacement]
//Same but using absolute camera poses instead of camera displacement, just for check
Mat homography_euclidean2 = computeHomography(R1, tvec1, R2, tvec2, d_inv1, normal1);
Mat homography2 = cameraMatrix * homography_euclidean2 * cameraMatrix.inv();
homography_euclidean2 /= homography_euclidean2.at<double>(2,2);
homography2 /= homography2.at<double>(2,2);
cout << "\nEuclidean Homography:\n" << homography_euclidean << endl;
cout << "Euclidean Homography 2:\n" << homography_euclidean2 << endl << endl;
//! [estimate-homography]
Mat H = findHomography(corners1, corners2);
cout << "\nfindHomography H:\n" << H << endl;
//! [estimate-homography]
cout << "homography from camera displacement:\n" << homography << endl;
cout << "homography from absolute camera poses:\n" << homography2 << endl << endl;
//! [warp-chessboard]
Mat img1_warp;
warpPerspective(img1, img1_warp, H, img1.size());
//! [warp-chessboard]
Mat img1_warp_custom;
warpPerspective(img1, img1_warp_custom, homography, img1.size());
imshow("Warped image using homography computed from camera displacement", img1_warp_custom);
Mat img_draw_compare;
hconcat(img1_warp, img1_warp_custom, img_draw_compare);
imshow("Warped images comparison", img_draw_compare);
Mat img1_warp_custom2;
warpPerspective(img1, img1_warp_custom2, homography2, img1.size());
imshow("Warped image using homography computed from absolute camera poses", img1_warp_custom2);
waitKey();
}
const char* params
= "{ help h | | print usage }"
"{ image1 | left02.jpg | path to the source chessboard image }"
"{ image2 | left01.jpg | path to the desired chessboard image }"
"{ intrinsics | left_intrinsics.yml | path to camera intrinsics }"
"{ width bw | 9 | chessboard width }"
"{ height bh | 6 | chessboard height }"
"{ square_size | 0.025 | chessboard square size }";
}
int main(int argc, char *argv[])
{
CommandLineParser parser(argc, argv, params);
if (parser.has("help"))
{
parser.about("Code for homography tutorial.\n"
"Example 3: homography from the camera displacement.\n");
parser.printMessage();
return 0;
}
Size patternSize(parser.get<int>("width"), parser.get<int>("height"));
float squareSize = (float) parser.get<double>("square_size");
homographyFromCameraDisplacement(parser.get<String>("image1"),
parser.get<String>("image2"),
patternSize, squareSize,
parser.get<String>("intrinsics"));
return 0;
}

View File

@ -0,0 +1,90 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
namespace
{
void basicPanoramaStitching(const string &img1Path, const string &img2Path)
{
Mat img1 = imread( samples::findFile( img1Path ) );
Mat img2 = imread( samples::findFile( img2Path ) );
//! [camera-pose-from-Blender-at-location-1]
Mat c1Mo = (Mat_<double>(4,4) << 0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112,
0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443,
-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654,
0, 0, 0, 1);
//! [camera-pose-from-Blender-at-location-1]
//! [camera-pose-from-Blender-at-location-2]
Mat c2Mo = (Mat_<double>(4,4) << 0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112,
-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443,
0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654,
0, 0, 0, 1);
//! [camera-pose-from-Blender-at-location-2]
//! [camera-intrinsics-from-Blender]
Mat cameraMatrix = (Mat_<double>(3,3) << 700.0, 0.0, 320.0,
0.0, 700.0, 240.0,
0, 0, 1);
//! [camera-intrinsics-from-Blender]
//! [extract-rotation]
Mat R1 = c1Mo(Range(0,3), Range(0,3));
Mat R2 = c2Mo(Range(0,3), Range(0,3));
//! [extract-rotation]
//! [compute-rotation-displacement]
//c1Mo * oMc2
Mat R_2to1 = R1*R2.t();
//! [compute-rotation-displacement]
//! [compute-homography]
Mat H = cameraMatrix * R_2to1 * cameraMatrix.inv();
H /= H.at<double>(2,2);
cout << "H:\n" << H << endl;
//! [compute-homography]
//! [stitch]
Mat img_stitch;
warpPerspective(img2, img_stitch, H, Size(img2.cols*2, img2.rows));
Mat half = img_stitch(Rect(0, 0, img1.cols, img1.rows));
img1.copyTo(half);
//! [stitch]
Mat img_compare;
Mat img_space = Mat::zeros(Size(50, img1.rows), CV_8UC3);
hconcat(img1, img_space, img_compare);
hconcat(img_compare, img2, img_compare);
imshow("Compare images", img_compare);
imshow("Panorama stitching", img_stitch);
waitKey();
}
const char* params
= "{ help h | | print usage }"
"{ image1 | Blender_Suzanne1.jpg | path to the first Blender image }"
"{ image2 | Blender_Suzanne2.jpg | path to the second Blender image }";
}
int main(int argc, char *argv[])
{
CommandLineParser parser(argc, argv, params);
if (parser.has("help"))
{
parser.about( "Code for homography tutorial.\n"
"Example 5: basic panorama stitching from a rotating camera.\n" );
parser.printMessage();
return 0;
}
basicPanoramaStitching(parser.get<String>("image1"), parser.get<String>("image2"));
return 0;
}

View File

@ -0,0 +1,96 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
namespace
{
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
Scalar randomColor( RNG& rng )
{
int icolor = (unsigned int) rng;
return Scalar( icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255 );
}
void perspectiveCorrection(const string &img1Path, const string &img2Path, const Size &patternSize, RNG &rng)
{
Mat img1 = imread( samples::findFile(img1Path) );
Mat img2 = imread( samples::findFile(img2Path) );
//! [find-corners]
vector<Point2f> corners1, corners2;
bool found1 = findChessboardCorners(img1, patternSize, corners1);
bool found2 = findChessboardCorners(img2, patternSize, corners2);
//! [find-corners]
if (!found1 || !found2)
{
cout << "Error, cannot find the chessboard corners in both images." << endl;
return;
}
//! [estimate-homography]
Mat H = findHomography(corners1, corners2);
cout << "H:\n" << H << endl;
//! [estimate-homography]
//! [warp-chessboard]
Mat img1_warp;
warpPerspective(img1, img1_warp, H, img1.size());
//! [warp-chessboard]
Mat img_draw_warp;
hconcat(img2, img1_warp, img_draw_warp);
imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp);
//! [compute-transformed-corners]
Mat img_draw_matches;
hconcat(img1, img2, img_draw_matches);
for (size_t i = 0; i < corners1.size(); i++)
{
Mat pt1 = (Mat_<double>(3,1) << corners1[i].x, corners1[i].y, 1);
Mat pt2 = H * pt1;
pt2 /= pt2.at<double>(2);
Point end( (int) (img1.cols + pt2.at<double>(0)), (int) pt2.at<double>(1) );
line(img_draw_matches, corners1[i], end, randomColor(rng), 2);
}
imshow("Draw matches", img_draw_matches);
waitKey();
//! [compute-transformed-corners]
}
const char* params
= "{ help h | | print usage }"
"{ image1 | left02.jpg | path to the source chessboard image }"
"{ image2 | left01.jpg | path to the desired chessboard image }"
"{ width bw | 9 | chessboard width }"
"{ height bh | 6 | chessboard height }";
}
int main(int argc, char *argv[])
{
cv::RNG rng( 0xFFFFFFFF );
CommandLineParser parser(argc, argv, params);
if (parser.has("help"))
{
parser.about("Code for homography tutorial.\n"
"Example 2: perspective correction.\n");
parser.printMessage();
return 0;
}
Size patternSize(parser.get<int>("width"), parser.get<int>("height"));
perspectiveCorrection(parser.get<String>("image1"),
parser.get<String>("image2"),
patternSize, rng);
return 0;
}

View File

@ -0,0 +1,155 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
namespace
{
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
{
corners.resize(0);
switch (patternType)
{
case CHESSBOARD:
case CIRCLES_GRID:
//! [compute-chessboard-object-points]
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
float(i*squareSize), 0));
//! [compute-chessboard-object-points]
break;
case ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize),
float(i*squareSize), 0));
break;
default:
CV_Error(Error::StsBadArg, "Unknown pattern type\n");
}
}
void poseEstimationFromCoplanarPoints(const string &imgPath, const string &intrinsicsPath, const Size &patternSize,
const float squareSize)
{
Mat img = imread( samples::findFile( imgPath) );
Mat img_corners = img.clone(), img_pose = img.clone();
//! [find-chessboard-corners]
vector<Point2f> corners;
bool found = findChessboardCorners(img, patternSize, corners);
//! [find-chessboard-corners]
if (!found)
{
cout << "Cannot find chessboard corners." << endl;
return;
}
drawChessboardCorners(img_corners, patternSize, corners, found);
imshow("Chessboard corners detection", img_corners);
//! [compute-object-points]
vector<Point3f> objectPoints;
calcChessboardCorners(patternSize, squareSize, objectPoints);
vector<Point2f> objectPointsPlanar;
for (size_t i = 0; i < objectPoints.size(); i++)
{
objectPointsPlanar.push_back(Point2f(objectPoints[i].x, objectPoints[i].y));
}
//! [compute-object-points]
//! [load-intrinsics]
FileStorage fs( samples::findFile( intrinsicsPath ), FileStorage::READ);
Mat cameraMatrix, distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
//! [load-intrinsics]
//! [compute-image-points]
vector<Point2f> imagePoints;
undistortPoints(corners, imagePoints, cameraMatrix, distCoeffs);
//! [compute-image-points]
//! [estimate-homography]
Mat H = findHomography(objectPointsPlanar, imagePoints);
cout << "H:\n" << H << endl;
//! [estimate-homography]
//! [pose-from-homography]
// Normalization to ensure that ||c1|| = 1
double norm = sqrt(H.at<double>(0,0)*H.at<double>(0,0) +
H.at<double>(1,0)*H.at<double>(1,0) +
H.at<double>(2,0)*H.at<double>(2,0));
H /= norm;
Mat c1 = H.col(0);
Mat c2 = H.col(1);
Mat c3 = c1.cross(c2);
Mat tvec = H.col(2);
Mat R(3, 3, CV_64F);
for (int i = 0; i < 3; i++)
{
R.at<double>(i,0) = c1.at<double>(i,0);
R.at<double>(i,1) = c2.at<double>(i,0);
R.at<double>(i,2) = c3.at<double>(i,0);
}
//! [pose-from-homography]
//! [polar-decomposition-of-the-rotation-matrix]
cout << "R (before polar decomposition):\n" << R << "\ndet(R): " << determinant(R) << endl;
Mat W, U, Vt;
SVDecomp(R, W, U, Vt);
R = U*Vt;
cout << "R (after polar decomposition):\n" << R << "\ndet(R): " << determinant(R) << endl;
//! [polar-decomposition-of-the-rotation-matrix]
//! [display-pose]
Mat rvec;
Rodrigues(R, rvec);
drawFrameAxes(img_pose, cameraMatrix, distCoeffs, rvec, tvec, 2*squareSize);
imshow("Pose from coplanar points", img_pose);
waitKey();
//! [display-pose]
}
const char* params
= "{ help h | | print usage }"
"{ image | left04.jpg | path to a chessboard image }"
"{ intrinsics | left_intrinsics.yml | path to camera intrinsics }"
"{ width bw | 9 | chessboard width }"
"{ height bh | 6 | chessboard height }"
"{ square_size | 0.025 | chessboard square size }";
}
int main(int argc, char *argv[])
{
CommandLineParser parser(argc, argv, params);
if (parser.has("help"))
{
parser.about("Code for homography tutorial.\n"
"Example 1: pose from homography with coplanar points.\n");
parser.printMessage();
return 0;
}
Size patternSize(parser.get<int>("width"), parser.get<int>("height"));
float squareSize = (float) parser.get<double>("square_size");
poseEstimationFromCoplanarPoints(parser.get<String>("image"),
parser.get<String>("intrinsics"),
patternSize, squareSize);
return 0;
}

View File

@ -0,0 +1,60 @@
#include <iostream>
#include "opencv2/core.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
const char* keys =
"{ help h | | Print help message. }"
"{ input1 | box.png | Path to input image 1. }"
"{ input2 | box_in_scene.png | Path to input image 2. }";
int main( int argc, char* argv[] )
{
CommandLineParser parser( argc, argv, keys );
Mat img1 = imread( samples::findFile( parser.get<String>("input1") ), IMREAD_GRAYSCALE );
Mat img2 = imread( samples::findFile( parser.get<String>("input2") ), IMREAD_GRAYSCALE );
if ( img1.empty() || img2.empty() )
{
cout << "Could not open or find the image!\n" << endl;
parser.printMessage();
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
//-- Step 2: Matching descriptor vectors with a brute force matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE);
std::vector< DMatch > matches;
matcher->match( descriptors1, descriptors2, matches );
//-- Draw matches
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );
waitKey();
return 0;
}
#else
int main()
{
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
return 0;
}
#endif

View File

@ -0,0 +1,46 @@
#include <iostream>
#include "opencv2/core.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
int main( int argc, char* argv[] )
{
CommandLineParser parser( argc, argv, "{@input | box.png | input image}" );
Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_GRAYSCALE );
if ( src.empty() )
{
cout << "Could not open or find the image!\n" << endl;
cout << "Usage: " << argv[0] << " <Input image>" << endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints;
detector->detect( src, keypoints );
//-- Draw keypoints
Mat img_keypoints;
drawKeypoints( src, keypoints, img_keypoints );
//-- Show detected (drawn) keypoints
imshow("SURF Keypoints", img_keypoints );
waitKey();
return 0;
}
#else
int main()
{
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
return 0;
}
#endif

View File

@ -0,0 +1,72 @@
#include <iostream>
#include "opencv2/core.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
const char* keys =
"{ help h | | Print help message. }"
"{ input1 | box.png | Path to input image 1. }"
"{ input2 | box_in_scene.png | Path to input image 2. }";
int main( int argc, char* argv[] )
{
CommandLineParser parser( argc, argv, keys );
Mat img1 = imread( samples::findFile( parser.get<String>("input1") ), IMREAD_GRAYSCALE );
Mat img2 = imread( samples::findFile( parser.get<String>("input2") ), IMREAD_GRAYSCALE );
if ( img1.empty() || img2.empty() )
{
cout << "Could not open or find the image!\n" << endl;
parser.printMessage();
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
std::vector< std::vector<DMatch> > knn_matches;
matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
//-- Filter matches using the Lowe's ratio test
const float ratio_thresh = 0.7f;
std::vector<DMatch> good_matches;
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
//-- Draw matches
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow("Good Matches", img_matches );
waitKey();
return 0;
}
#else
int main()
{
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
return 0;
}
#endif

View File

@ -0,0 +1,107 @@
#include <iostream>
#include "opencv2/core.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/calib3d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
const char* keys =
"{ help h | | Print help message. }"
"{ input1 | box.png | Path to input image 1. }"
"{ input2 | box_in_scene.png | Path to input image 2. }";
int main( int argc, char* argv[] )
{
CommandLineParser parser( argc, argv, keys );
Mat img_object = imread( samples::findFile( parser.get<String>("input1") ), IMREAD_GRAYSCALE );
Mat img_scene = imread( samples::findFile( parser.get<String>("input2") ), IMREAD_GRAYSCALE );
if ( img_object.empty() || img_scene.empty() )
{
cout << "Could not open or find the image!\n" << endl;
parser.printMessage();
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
Mat descriptors_object, descriptors_scene;
detector->detectAndCompute( img_object, noArray(), keypoints_object, descriptors_object );
detector->detectAndCompute( img_scene, noArray(), keypoints_scene, descriptors_scene );
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
std::vector< std::vector<DMatch> > knn_matches;
matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 );
//-- Filter matches using the Lowe's ratio test
const float ratio_thresh = 0.75f;
std::vector<DMatch> good_matches;
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
//-- Draw matches
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point2f(0, 0);
obj_corners[1] = Point2f( (float)img_object.cols, 0 );
obj_corners[2] = Point2f( (float)img_object.cols, (float)img_object.rows );
obj_corners[3] = Point2f( 0, (float)img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f((float)img_object.cols, 0),
scene_corners[1] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f((float)img_object.cols, 0),
scene_corners[2] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f((float)img_object.cols, 0),
scene_corners[3] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f((float)img_object.cols, 0),
scene_corners[0] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches );
waitKey();
return 0;
}
#else
int main()
{
std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
return 0;
}
#endif

Some files were not shown because too many files have changed in this diff Show More