feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
74
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp
vendored
Normal file
74
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/**
|
||||
* @file BasicLinearTransforms.cpp
|
||||
* @brief Simple program to change contrast and brightness
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
|
||||
using std::cin;
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using namespace cv;
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Read image given by user
|
||||
//! [basic-linear-transform-load]
|
||||
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
|
||||
Mat image = imread( samples::findFile( parser.get<String>( "@input" ) ) );
|
||||
if( image.empty() )
|
||||
{
|
||||
cout << "Could not open or find the image!\n" << endl;
|
||||
cout << "Usage: " << argv[0] << " <Input image>" << endl;
|
||||
return -1;
|
||||
}
|
||||
//! [basic-linear-transform-load]
|
||||
|
||||
//! [basic-linear-transform-output]
|
||||
Mat new_image = Mat::zeros( image.size(), image.type() );
|
||||
//! [basic-linear-transform-output]
|
||||
|
||||
//! [basic-linear-transform-parameters]
|
||||
double alpha = 1.0; /*< Simple contrast control */
|
||||
int beta = 0; /*< Simple brightness control */
|
||||
|
||||
/// Initialize values
|
||||
cout << " Basic Linear Transforms " << endl;
|
||||
cout << "-------------------------" << endl;
|
||||
cout << "* Enter the alpha value [1.0-3.0]: "; cin >> alpha;
|
||||
cout << "* Enter the beta value [0-100]: "; cin >> beta;
|
||||
//! [basic-linear-transform-parameters]
|
||||
|
||||
/// Do the operation new_image(i,j) = alpha*image(i,j) + beta
|
||||
/// Instead of these 'for' loops we could have used simply:
|
||||
/// image.convertTo(new_image, -1, alpha, beta);
|
||||
/// but we wanted to show you how to access the pixels :)
|
||||
//! [basic-linear-transform-operation]
|
||||
for( int y = 0; y < image.rows; y++ ) {
|
||||
for( int x = 0; x < image.cols; x++ ) {
|
||||
for( int c = 0; c < image.channels(); c++ ) {
|
||||
new_image.at<Vec3b>(y,x)[c] =
|
||||
saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
|
||||
}
|
||||
}
|
||||
}
|
||||
//! [basic-linear-transform-operation]
|
||||
|
||||
//! [basic-linear-transform-display]
|
||||
/// Show stuff
|
||||
imshow("Original Image", image);
|
||||
imshow("New Image", new_image);
|
||||
|
||||
/// Wait until the user press a key
|
||||
waitKey();
|
||||
//! [basic-linear-transform-display]
|
||||
return 0;
|
||||
}
|
44
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp
vendored
Normal file
44
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
int main(){
|
||||
Mat input_image = (Mat_<uchar>(8, 8) <<
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 255, 255, 255, 0, 0, 0, 255,
|
||||
0, 255, 255, 255, 0, 0, 0, 0,
|
||||
0, 255, 255, 255, 0, 255, 0, 0,
|
||||
0, 0, 255, 0, 0, 0, 0, 0,
|
||||
0, 0, 255, 0, 0, 255, 255, 0,
|
||||
0, 255, 0, 255, 0, 0, 255, 0,
|
||||
0, 255, 255, 255, 0, 0, 0, 0);
|
||||
|
||||
Mat kernel = (Mat_<int>(3, 3) <<
|
||||
0, 1, 0,
|
||||
1, -1, 1,
|
||||
0, 1, 0);
|
||||
|
||||
Mat output_image;
|
||||
morphologyEx(input_image, output_image, MORPH_HITMISS, kernel);
|
||||
|
||||
const int rate = 50;
|
||||
kernel = (kernel + 1) * 127;
|
||||
kernel.convertTo(kernel, CV_8U);
|
||||
|
||||
resize(kernel, kernel, Size(), rate, rate, INTER_NEAREST);
|
||||
imshow("kernel", kernel);
|
||||
moveWindow("kernel", 0, 0);
|
||||
|
||||
resize(input_image, input_image, Size(), rate, rate, INTER_NEAREST);
|
||||
imshow("Original", input_image);
|
||||
moveWindow("Original", 0, 200);
|
||||
|
||||
resize(output_image, output_image, Size(), rate, rate, INTER_NEAREST);
|
||||
imshow("Hit or Miss", output_image);
|
||||
moveWindow("Hit or Miss", 500, 200);
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
118
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
vendored
Normal file
118
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
/**
|
||||
* @file Morphology_1.cpp
|
||||
* @brief Erosion and Dilation sample code
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
/// Global variables
|
||||
Mat src, erosion_dst, dilation_dst;
|
||||
|
||||
int erosion_elem = 0;
|
||||
int erosion_size = 0;
|
||||
int dilation_elem = 0;
|
||||
int dilation_size = 0;
|
||||
int const max_elem = 2;
|
||||
int const max_kernel_size = 21;
|
||||
|
||||
/** Function Headers */
|
||||
void Erosion( int, void* );
|
||||
void Dilation( int, void* );
|
||||
|
||||
//![main]
|
||||
/**
|
||||
* @function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// Load an image
|
||||
CommandLineParser parser( argc, argv, "{@input | LinuxLogo.jpg | input image}" );
|
||||
src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
|
||||
if( src.empty() )
|
||||
{
|
||||
cout << "Could not open or find the image!\n" << endl;
|
||||
cout << "Usage: " << argv[0] << " <Input image>" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Create windows
|
||||
namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
|
||||
namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
|
||||
moveWindow( "Dilation Demo", src.cols, 0 );
|
||||
|
||||
/// Create Erosion Trackbar
|
||||
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
|
||||
&erosion_elem, max_elem,
|
||||
Erosion );
|
||||
|
||||
createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",
|
||||
&erosion_size, max_kernel_size,
|
||||
Erosion );
|
||||
|
||||
/// Create Dilation Trackbar
|
||||
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",
|
||||
&dilation_elem, max_elem,
|
||||
Dilation );
|
||||
|
||||
createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",
|
||||
&dilation_size, max_kernel_size,
|
||||
Dilation );
|
||||
|
||||
/// Default start
|
||||
Erosion( 0, 0 );
|
||||
Dilation( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
//![main]
|
||||
|
||||
//![erosion]
|
||||
/**
|
||||
* @function Erosion
|
||||
*/
|
||||
void Erosion( int, void* )
|
||||
{
|
||||
int erosion_type = 0;
|
||||
if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; }
|
||||
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
|
||||
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
|
||||
|
||||
//![kernel]
|
||||
Mat element = getStructuringElement( erosion_type,
|
||||
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
|
||||
Point( erosion_size, erosion_size ) );
|
||||
//![kernel]
|
||||
|
||||
/// Apply the erosion operation
|
||||
erode( src, erosion_dst, element );
|
||||
imshow( "Erosion Demo", erosion_dst );
|
||||
}
|
||||
//![erosion]
|
||||
|
||||
//![dilation]
|
||||
/**
|
||||
* @function Dilation
|
||||
*/
|
||||
void Dilation( int, void* )
|
||||
{
|
||||
int dilation_type = 0;
|
||||
if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; }
|
||||
else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
|
||||
else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
|
||||
|
||||
Mat element = getStructuringElement( dilation_type,
|
||||
Size( 2*dilation_size + 1, 2*dilation_size+1 ),
|
||||
Point( dilation_size, dilation_size ) );
|
||||
|
||||
/// Apply the dilation operation
|
||||
dilate( src, dilation_dst, element );
|
||||
imshow( "Dilation Demo", dilation_dst );
|
||||
}
|
||||
//![dilation]
|
93
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp
vendored
Normal file
93
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
/**
|
||||
* @file Morphology_2.cpp
|
||||
* @brief Advanced morphology Transformations sample code
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Global variables
|
||||
Mat src, dst;
|
||||
|
||||
int morph_elem = 0;
|
||||
int morph_size = 0;
|
||||
int morph_operator = 0;
|
||||
int const max_operator = 4;
|
||||
int const max_elem = 2;
|
||||
int const max_kernel_size = 21;
|
||||
|
||||
const char* window_name = "Morphology Transformations Demo";
|
||||
|
||||
|
||||
/** Function Headers */
|
||||
void Morphology_Operations( int, void* );
|
||||
|
||||
/**
|
||||
* @function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
//![load]
|
||||
CommandLineParser parser( argc, argv, "{@input | baboon.jpg | input image}" );
|
||||
src = imread( samples::findFile( parser.get<String>( "@input" ) ), IMREAD_COLOR );
|
||||
if (src.empty())
|
||||
{
|
||||
std::cout << "Could not open or find the image!\n" << std::endl;
|
||||
std::cout << "Usage: " << argv[0] << " <Input image>" << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
//![load]
|
||||
|
||||
//![window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create window
|
||||
//![window]
|
||||
|
||||
//![create_trackbar1]
|
||||
/// Create Trackbar to select Morphology operation
|
||||
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
|
||||
//![create_trackbar1]
|
||||
|
||||
//![create_trackbar2]
|
||||
/// Create Trackbar to select kernel type
|
||||
createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name,
|
||||
&morph_elem, max_elem,
|
||||
Morphology_Operations );
|
||||
//![create_trackbar2]
|
||||
|
||||
//![create_trackbar3]
|
||||
/// Create Trackbar to choose kernel size
|
||||
createTrackbar( "Kernel size:\n 2n +1", window_name,
|
||||
&morph_size, max_kernel_size,
|
||||
Morphology_Operations );
|
||||
//![create_trackbar3]
|
||||
|
||||
/// Default start
|
||||
Morphology_Operations( 0, 0 );
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![morphology_operations]
|
||||
/**
|
||||
* @function Morphology_Operations
|
||||
*/
|
||||
void Morphology_Operations( int, void* )
|
||||
{
|
||||
// Since MORPH_X : 2,3,4,5 and 6
|
||||
//![operation]
|
||||
int operation = morph_operator + 2;
|
||||
//![operation]
|
||||
|
||||
Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
|
||||
|
||||
/// Apply the specified morphology operation
|
||||
morphologyEx( src, dst, operation, element );
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
//![morphology_operations]
|
69
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp
vendored
Normal file
69
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
/**
|
||||
* @file Pyramids.cpp
|
||||
* @brief Sample code of image pyramids (pyrDown and pyrUp)
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include "iostream"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
const char* window_name = "Pyramids Demo";
|
||||
|
||||
/**
|
||||
* @function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
/// General instructions
|
||||
cout << "\n Zoom In-Out demo \n "
|
||||
"------------------ \n"
|
||||
" * [i] -> Zoom in \n"
|
||||
" * [o] -> Zoom out \n"
|
||||
" * [ESC] -> Close program \n" << endl;
|
||||
|
||||
//![load]
|
||||
const char* filename = argc >=2 ? argv[1] : "chicky_512.png";
|
||||
|
||||
// Loads an image
|
||||
Mat src = imread( samples::findFile( filename ) );
|
||||
|
||||
// Check if image is loaded fine
|
||||
if(src.empty()){
|
||||
printf(" Error opening image\n");
|
||||
printf(" Program Arguments: [image_name -- default chicky_512.png] \n");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
//![load]
|
||||
|
||||
//![loop]
|
||||
for(;;)
|
||||
{
|
||||
//![show_image]
|
||||
imshow( window_name, src );
|
||||
//![show_image]
|
||||
char c = (char)waitKey(0);
|
||||
|
||||
if( c == 27 )
|
||||
{ break; }
|
||||
//![pyrup]
|
||||
else if( c == 'i' )
|
||||
{ pyrUp( src, src, Size( src.cols*2, src.rows*2 ) );
|
||||
printf( "** Zoom In: Image x 2 \n" );
|
||||
}
|
||||
//![pyrup]
|
||||
//![pyrdown]
|
||||
else if( c == 'o' )
|
||||
{ pyrDown( src, src, Size( src.cols/2, src.rows/2 ) );
|
||||
printf( "** Zoom Out: Image / 2 \n" );
|
||||
}
|
||||
//![pyrdown]
|
||||
}
|
||||
//![loop]
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
153
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp
vendored
Normal file
153
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
/**
|
||||
* file Smoothing.cpp
|
||||
* brief Sample code for simple filters
|
||||
* author OpenCV team
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
/// Global Variables
|
||||
int DELAY_CAPTION = 1500;
|
||||
int DELAY_BLUR = 100;
|
||||
int MAX_KERNEL_LENGTH = 31;
|
||||
|
||||
Mat src; Mat dst;
|
||||
char window_name[] = "Smoothing Demo";
|
||||
|
||||
/// Function headers
|
||||
int display_caption( const char* caption );
|
||||
int display_dst( int delay );
|
||||
|
||||
|
||||
/**
|
||||
* function main
|
||||
*/
|
||||
int main( int argc, char ** argv )
|
||||
{
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE );
|
||||
|
||||
/// Load the source image
|
||||
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
|
||||
|
||||
src = imread( samples::findFile( filename ), IMREAD_COLOR );
|
||||
if (src.empty())
|
||||
{
|
||||
printf(" Error opening image\n");
|
||||
printf(" Usage:\n %s [image_name-- default lena.jpg] \n", argv[0]);
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if( display_caption( "Original Image" ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
dst = src.clone();
|
||||
if( display_dst( DELAY_CAPTION ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Applying Homogeneous blur
|
||||
if( display_caption( "Homogeneous Blur" ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![blur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{
|
||||
blur( src, dst, Size( i, i ), Point(-1,-1) );
|
||||
if( display_dst( DELAY_BLUR ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
//![blur]
|
||||
|
||||
/// Applying Gaussian blur
|
||||
if( display_caption( "Gaussian Blur" ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![gaussianblur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{
|
||||
GaussianBlur( src, dst, Size( i, i ), 0, 0 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
//![gaussianblur]
|
||||
|
||||
/// Applying Median blur
|
||||
if( display_caption( "Median Blur" ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![medianblur]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{
|
||||
medianBlur ( src, dst, i );
|
||||
if( display_dst( DELAY_BLUR ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
//![medianblur]
|
||||
|
||||
/// Applying Bilateral Filter
|
||||
if( display_caption( "Bilateral Blur" ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//![bilateralfilter]
|
||||
for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
|
||||
{
|
||||
bilateralFilter ( src, dst, i, i*2, i/2 );
|
||||
if( display_dst( DELAY_BLUR ) != 0 )
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
//![bilateralfilter]
|
||||
|
||||
/// Done
|
||||
display_caption( "Done!" );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function display_caption
|
||||
*/
|
||||
int display_caption( const char* caption )
|
||||
{
|
||||
dst = Mat::zeros( src.size(), src.type() );
|
||||
putText( dst, caption,
|
||||
Point( src.cols/4, src.rows/2),
|
||||
FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
|
||||
|
||||
return display_dst(DELAY_CAPTION);
|
||||
}
|
||||
|
||||
/**
|
||||
* @function display_dst
|
||||
*/
|
||||
int display_dst( int delay )
|
||||
{
|
||||
imshow( window_name, dst );
|
||||
int c = waitKey ( delay );
|
||||
if( c >= 0 ) { return -1; }
|
||||
return 0;
|
||||
}
|
87
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Threshold.cpp
vendored
Normal file
87
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Threshold.cpp
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
/**
|
||||
* @file Threshold.cpp
|
||||
* @brief Sample code that shows how to use the diverse threshold options offered by OpenCV
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using std::cout;
|
||||
|
||||
/// Global variables
|
||||
|
||||
int threshold_value = 0;
|
||||
int threshold_type = 3;
|
||||
int const max_value = 255;
|
||||
int const max_type = 4;
|
||||
int const max_binary_value = 255;
|
||||
|
||||
Mat src, src_gray, dst;
|
||||
const char* window_name = "Threshold Demo";
|
||||
|
||||
const char* trackbar_type = "Type: \n 0: Binary \n 1: Binary Inverted \n 2: Truncate \n 3: To Zero \n 4: To Zero Inverted";
|
||||
const char* trackbar_value = "Value";
|
||||
|
||||
//![Threshold_Demo]
|
||||
/**
|
||||
* @function Threshold_Demo
|
||||
*/
|
||||
static void Threshold_Demo( int, void* )
|
||||
{
|
||||
/* 0: Binary
|
||||
1: Binary Inverted
|
||||
2: Threshold Truncated
|
||||
3: Threshold to Zero
|
||||
4: Threshold to Zero Inverted
|
||||
*/
|
||||
threshold( src_gray, dst, threshold_value, max_binary_value, threshold_type );
|
||||
imshow( window_name, dst );
|
||||
}
|
||||
//![Threshold_Demo]
|
||||
|
||||
/**
|
||||
* @function main
|
||||
*/
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
//! [load]
|
||||
String imageName("stuff.jpg"); // by default
|
||||
if (argc > 1)
|
||||
{
|
||||
imageName = argv[1];
|
||||
}
|
||||
src = imread( samples::findFile( imageName ), IMREAD_COLOR ); // Load an image
|
||||
|
||||
if (src.empty())
|
||||
{
|
||||
cout << "Cannot read the image: " << imageName << std::endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
cvtColor( src, src_gray, COLOR_BGR2GRAY ); // Convert the image to Gray
|
||||
//! [load]
|
||||
|
||||
//! [window]
|
||||
namedWindow( window_name, WINDOW_AUTOSIZE ); // Create a window to display results
|
||||
//! [window]
|
||||
|
||||
//! [trackbar]
|
||||
createTrackbar( trackbar_type,
|
||||
window_name, &threshold_type,
|
||||
max_type, Threshold_Demo ); // Create a Trackbar to choose type of Threshold
|
||||
|
||||
createTrackbar( trackbar_value,
|
||||
window_name, &threshold_value,
|
||||
max_value, Threshold_Demo ); // Create a Trackbar to choose Threshold value
|
||||
//! [trackbar]
|
||||
|
||||
Threshold_Demo( 0, 0 ); // Call the function to initialize
|
||||
|
||||
/// Wait until the user finishes the program
|
||||
waitKey();
|
||||
return 0;
|
||||
}
|
105
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp
vendored
Normal file
105
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/** Global Variables */
|
||||
const int max_value_H = 360/2;
|
||||
const int max_value = 255;
|
||||
const String window_capture_name = "Video Capture";
|
||||
const String window_detection_name = "Object Detection";
|
||||
int low_H = 0, low_S = 0, low_V = 0;
|
||||
int high_H = max_value_H, high_S = max_value, high_V = max_value;
|
||||
|
||||
//! [low]
|
||||
static void on_low_H_thresh_trackbar(int, void *)
|
||||
{
|
||||
low_H = min(high_H-1, low_H);
|
||||
setTrackbarPos("Low H", window_detection_name, low_H);
|
||||
}
|
||||
//! [low]
|
||||
|
||||
//! [high]
|
||||
static void on_high_H_thresh_trackbar(int, void *)
|
||||
{
|
||||
high_H = max(high_H, low_H+1);
|
||||
setTrackbarPos("High H", window_detection_name, high_H);
|
||||
}
|
||||
|
||||
//! [high]
|
||||
static void on_low_S_thresh_trackbar(int, void *)
|
||||
{
|
||||
low_S = min(high_S-1, low_S);
|
||||
setTrackbarPos("Low S", window_detection_name, low_S);
|
||||
}
|
||||
|
||||
static void on_high_S_thresh_trackbar(int, void *)
|
||||
{
|
||||
high_S = max(high_S, low_S+1);
|
||||
setTrackbarPos("High S", window_detection_name, high_S);
|
||||
}
|
||||
|
||||
static void on_low_V_thresh_trackbar(int, void *)
|
||||
{
|
||||
low_V = min(high_V-1, low_V);
|
||||
setTrackbarPos("Low V", window_detection_name, low_V);
|
||||
}
|
||||
|
||||
static void on_high_V_thresh_trackbar(int, void *)
|
||||
{
|
||||
high_V = max(high_V, low_V+1);
|
||||
setTrackbarPos("High V", window_detection_name, high_V);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
//! [cap]
|
||||
VideoCapture cap(argc > 1 ? atoi(argv[1]) : 0);
|
||||
//! [cap]
|
||||
|
||||
//! [window]
|
||||
namedWindow(window_capture_name);
|
||||
namedWindow(window_detection_name);
|
||||
//! [window]
|
||||
|
||||
//! [trackbar]
|
||||
// Trackbars to set thresholds for HSV values
|
||||
createTrackbar("Low H", window_detection_name, &low_H, max_value_H, on_low_H_thresh_trackbar);
|
||||
createTrackbar("High H", window_detection_name, &high_H, max_value_H, on_high_H_thresh_trackbar);
|
||||
createTrackbar("Low S", window_detection_name, &low_S, max_value, on_low_S_thresh_trackbar);
|
||||
createTrackbar("High S", window_detection_name, &high_S, max_value, on_high_S_thresh_trackbar);
|
||||
createTrackbar("Low V", window_detection_name, &low_V, max_value, on_low_V_thresh_trackbar);
|
||||
createTrackbar("High V", window_detection_name, &high_V, max_value, on_high_V_thresh_trackbar);
|
||||
//! [trackbar]
|
||||
|
||||
Mat frame, frame_HSV, frame_threshold;
|
||||
while (true) {
|
||||
//! [while]
|
||||
cap >> frame;
|
||||
if(frame.empty())
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
// Convert from BGR to HSV colorspace
|
||||
cvtColor(frame, frame_HSV, COLOR_BGR2HSV);
|
||||
// Detect the object based on HSV Range Values
|
||||
inRange(frame_HSV, Scalar(low_H, low_S, low_V), Scalar(high_H, high_S, high_V), frame_threshold);
|
||||
//! [while]
|
||||
|
||||
//! [show]
|
||||
// Show the frames
|
||||
imshow(window_capture_name, frame);
|
||||
imshow(window_detection_name, frame_threshold);
|
||||
//! [show]
|
||||
|
||||
char key = (char) waitKey(30);
|
||||
if (key == 'q' || key == 27)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
/**
|
||||
* @brief You will learn how to segment an anisotropic image with a single local orientation by a gradient structure tensor (GST)
|
||||
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
//! [calcGST_proto]
|
||||
void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut, int w);
|
||||
//! [calcGST_proto]
|
||||
|
||||
int main()
|
||||
{
|
||||
int W = 52; // window size is WxW
|
||||
double C_Thr = 0.43; // threshold for coherency
|
||||
int LowThr = 35; // threshold1 for orientation, it ranges from 0 to 180
|
||||
int HighThr = 57; // threshold2 for orientation, it ranges from 0 to 180
|
||||
|
||||
Mat imgIn = imread("input.jpg", IMREAD_GRAYSCALE);
|
||||
if (imgIn.empty()) //check whether the image is loaded or not
|
||||
{
|
||||
cout << "ERROR : Image cannot be loaded..!!" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
//! [main_extra]
|
||||
//! [main]
|
||||
Mat imgCoherency, imgOrientation;
|
||||
calcGST(imgIn, imgCoherency, imgOrientation, W);
|
||||
|
||||
//! [thresholding]
|
||||
Mat imgCoherencyBin;
|
||||
imgCoherencyBin = imgCoherency > C_Thr;
|
||||
Mat imgOrientationBin;
|
||||
inRange(imgOrientation, Scalar(LowThr), Scalar(HighThr), imgOrientationBin);
|
||||
//! [thresholding]
|
||||
|
||||
//! [combining]
|
||||
Mat imgBin;
|
||||
imgBin = imgCoherencyBin & imgOrientationBin;
|
||||
//! [combining]
|
||||
//! [main]
|
||||
|
||||
normalize(imgCoherency, imgCoherency, 0, 255, NORM_MINMAX);
|
||||
normalize(imgOrientation, imgOrientation, 0, 255, NORM_MINMAX);
|
||||
|
||||
imwrite("result.jpg", 0.5*(imgIn + imgBin));
|
||||
imwrite("Coherency.jpg", imgCoherency);
|
||||
imwrite("Orientation.jpg", imgOrientation);
|
||||
//! [main_extra]
|
||||
return 0;
|
||||
}
|
||||
//! [calcGST]
|
||||
//! [calcJ_header]
|
||||
void calcGST(const Mat& inputImg, Mat& imgCoherencyOut, Mat& imgOrientationOut, int w)
|
||||
{
|
||||
Mat img;
|
||||
inputImg.convertTo(img, CV_32F);
|
||||
|
||||
// GST components calculation (start)
|
||||
// J = (J11 J12; J12 J22) - GST
|
||||
Mat imgDiffX, imgDiffY, imgDiffXY;
|
||||
Sobel(img, imgDiffX, CV_32F, 1, 0, 3);
|
||||
Sobel(img, imgDiffY, CV_32F, 0, 1, 3);
|
||||
multiply(imgDiffX, imgDiffY, imgDiffXY);
|
||||
//! [calcJ_header]
|
||||
|
||||
Mat imgDiffXX, imgDiffYY;
|
||||
multiply(imgDiffX, imgDiffX, imgDiffXX);
|
||||
multiply(imgDiffY, imgDiffY, imgDiffYY);
|
||||
|
||||
Mat J11, J22, J12; // J11, J22 and J12 are GST components
|
||||
boxFilter(imgDiffXX, J11, CV_32F, Size(w, w));
|
||||
boxFilter(imgDiffYY, J22, CV_32F, Size(w, w));
|
||||
boxFilter(imgDiffXY, J12, CV_32F, Size(w, w));
|
||||
// GST components calculation (stop)
|
||||
|
||||
// eigenvalue calculation (start)
|
||||
// lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))
|
||||
// lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))
|
||||
Mat tmp1, tmp2, tmp3, tmp4;
|
||||
tmp1 = J11 + J22;
|
||||
tmp2 = J11 - J22;
|
||||
multiply(tmp2, tmp2, tmp2);
|
||||
multiply(J12, J12, tmp3);
|
||||
sqrt(tmp2 + 4.0 * tmp3, tmp4);
|
||||
|
||||
Mat lambda1, lambda2;
|
||||
lambda1 = tmp1 + tmp4;
|
||||
lambda1 = 0.5*lambda1; // biggest eigenvalue
|
||||
lambda2 = tmp1 - tmp4;
|
||||
lambda2 = 0.5*lambda2; // smallest eigenvalue
|
||||
// eigenvalue calculation (stop)
|
||||
|
||||
// Coherency calculation (start)
|
||||
// Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism
|
||||
// Coherency is anisotropy degree (consistency of local orientation)
|
||||
divide(lambda1 - lambda2, lambda1 + lambda2, imgCoherencyOut);
|
||||
// Coherency calculation (stop)
|
||||
|
||||
// orientation angle calculation (start)
|
||||
// tan(2*Alpha) = 2*J12/(J22 - J11)
|
||||
// Alpha = 0.5 atan2(2*J12/(J22 - J11))
|
||||
phase(J22 - J11, 2.0*J12, imgOrientationOut, true);
|
||||
imgOrientationOut = 0.5*imgOrientationOut;
|
||||
// orientation angle calculation (stop)
|
||||
}
|
||||
//! [calcGST]
|
185
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp
vendored
Normal file
185
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
/**
|
||||
* @file Drawing_1.cpp
|
||||
* @brief Simple geometric drawing
|
||||
* @author OpenCV team
|
||||
*/
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
|
||||
#define w 400
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Function headers
|
||||
void MyEllipse( Mat img, double angle );
|
||||
void MyFilledCircle( Mat img, Point center );
|
||||
void MyPolygon( Mat img );
|
||||
void MyLine( Mat img, Point start, Point end );
|
||||
|
||||
/**
|
||||
* @function main
|
||||
* @brief Main function
|
||||
*/
|
||||
int main( void ){
|
||||
|
||||
//![create_images]
|
||||
/// Windows names
|
||||
char atom_window[] = "Drawing 1: Atom";
|
||||
char rook_window[] = "Drawing 2: Rook";
|
||||
|
||||
/// Create black empty images
|
||||
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
|
||||
//![create_images]
|
||||
|
||||
/// 1. Draw a simple atom:
|
||||
/// -----------------------
|
||||
|
||||
//![draw_atom]
|
||||
/// 1.a. Creating ellipses
|
||||
MyEllipse( atom_image, 90 );
|
||||
MyEllipse( atom_image, 0 );
|
||||
MyEllipse( atom_image, 45 );
|
||||
MyEllipse( atom_image, -45 );
|
||||
|
||||
/// 1.b. Creating circles
|
||||
MyFilledCircle( atom_image, Point( w/2, w/2) );
|
||||
//![draw_atom]
|
||||
|
||||
/// 2. Draw a rook
|
||||
/// ------------------
|
||||
|
||||
//![draw_rook]
|
||||
/// 2.a. Create a convex polygon
|
||||
MyPolygon( rook_image );
|
||||
|
||||
//![rectangle]
|
||||
/// 2.b. Creating rectangles
|
||||
rectangle( rook_image,
|
||||
Point( 0, 7*w/8 ),
|
||||
Point( w, w),
|
||||
Scalar( 0, 255, 255 ),
|
||||
FILLED,
|
||||
LINE_8 );
|
||||
//![rectangle]
|
||||
|
||||
/// 2.c. Create a few lines
|
||||
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
|
||||
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
|
||||
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
|
||||
MyLine( rook_image, Point( 3*w/4, 7*w/8 ), Point( 3*w/4, w ) );
|
||||
//![draw_rook]
|
||||
|
||||
/// 3. Display your stuff!
|
||||
imshow( atom_window, atom_image );
|
||||
moveWindow( atom_window, 0, 200 );
|
||||
imshow( rook_window, rook_image );
|
||||
moveWindow( rook_window, w, 200 );
|
||||
|
||||
waitKey( 0 );
|
||||
return(0);
|
||||
}
|
||||
|
||||
/// Function Declaration
|
||||
|
||||
/**
|
||||
* @function MyEllipse
|
||||
* @brief Draw a fixed-size ellipse with different angles
|
||||
*/
|
||||
//![my_ellipse]
|
||||
void MyEllipse( Mat img, double angle )
|
||||
{
|
||||
int thickness = 2;
|
||||
int lineType = 8;
|
||||
|
||||
ellipse( img,
|
||||
Point( w/2, w/2 ),
|
||||
Size( w/4, w/16 ),
|
||||
angle,
|
||||
0,
|
||||
360,
|
||||
Scalar( 255, 0, 0 ),
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
//![my_ellipse]
|
||||
|
||||
/**
|
||||
* @function MyFilledCircle
|
||||
* @brief Draw a fixed-size filled circle
|
||||
*/
|
||||
//![my_filled_circle]
|
||||
void MyFilledCircle( Mat img, Point center )
|
||||
{
|
||||
circle( img,
|
||||
center,
|
||||
w/32,
|
||||
Scalar( 0, 0, 255 ),
|
||||
FILLED,
|
||||
LINE_8 );
|
||||
}
|
||||
//![my_filled_circle]
|
||||
|
||||
/**
|
||||
* @function MyPolygon
|
||||
* @brief Draw a simple concave polygon (rook)
|
||||
*/
|
||||
//![my_polygon]
|
||||
void MyPolygon( Mat img )
|
||||
{
|
||||
int lineType = LINE_8;
|
||||
|
||||
/** Create some points */
|
||||
Point rook_points[1][20];
|
||||
rook_points[0][0] = Point( w/4, 7*w/8 );
|
||||
rook_points[0][1] = Point( 3*w/4, 7*w/8 );
|
||||
rook_points[0][2] = Point( 3*w/4, 13*w/16 );
|
||||
rook_points[0][3] = Point( 11*w/16, 13*w/16 );
|
||||
rook_points[0][4] = Point( 19*w/32, 3*w/8 );
|
||||
rook_points[0][5] = Point( 3*w/4, 3*w/8 );
|
||||
rook_points[0][6] = Point( 3*w/4, w/8 );
|
||||
rook_points[0][7] = Point( 26*w/40, w/8 );
|
||||
rook_points[0][8] = Point( 26*w/40, w/4 );
|
||||
rook_points[0][9] = Point( 22*w/40, w/4 );
|
||||
rook_points[0][10] = Point( 22*w/40, w/8 );
|
||||
rook_points[0][11] = Point( 18*w/40, w/8 );
|
||||
rook_points[0][12] = Point( 18*w/40, w/4 );
|
||||
rook_points[0][13] = Point( 14*w/40, w/4 );
|
||||
rook_points[0][14] = Point( 14*w/40, w/8 );
|
||||
rook_points[0][15] = Point( w/4, w/8 );
|
||||
rook_points[0][16] = Point( w/4, 3*w/8 );
|
||||
rook_points[0][17] = Point( 13*w/32, 3*w/8 );
|
||||
rook_points[0][18] = Point( 5*w/16, 13*w/16 );
|
||||
rook_points[0][19] = Point( w/4, 13*w/16 );
|
||||
|
||||
const Point* ppt[1] = { rook_points[0] };
|
||||
int npt[] = { 20 };
|
||||
|
||||
fillPoly( img,
|
||||
ppt,
|
||||
npt,
|
||||
1,
|
||||
Scalar( 255, 255, 255 ),
|
||||
lineType );
|
||||
}
|
||||
//![my_polygon]
|
||||
|
||||
/**
|
||||
* @function MyLine
|
||||
* @brief Draw a simple line
|
||||
*/
|
||||
//![my_line]
|
||||
void MyLine( Mat img, Point start, Point end )
|
||||
{
|
||||
int thickness = 2;
|
||||
int lineType = LINE_8;
|
||||
|
||||
line( img,
|
||||
start,
|
||||
end,
|
||||
Scalar( 0, 0, 0 ),
|
||||
thickness,
|
||||
lineType );
|
||||
}
|
||||
//![my_line]
|
326
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_2.cpp
vendored
Normal file
326
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_2.cpp
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
/**
|
||||
* @file Drawing_2.cpp
|
||||
* @brief Simple sample code
|
||||
*/
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
/// Global Variables
|
||||
const int NUMBER = 100;
|
||||
const int DELAY = 5;
|
||||
|
||||
const int window_width = 900;
|
||||
const int window_height = 600;
|
||||
int x_1 = -window_width/2;
|
||||
int x_2 = window_width*3/2;
|
||||
int y_1 = -window_width/2;
|
||||
int y_2 = window_width*3/2;
|
||||
|
||||
/// Function headers
|
||||
static Scalar randomColor( RNG& rng );
|
||||
int Drawing_Random_Lines( Mat image, char* window_name, RNG rng );
|
||||
int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng );
|
||||
int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng );
|
||||
int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng );
|
||||
int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng );
|
||||
int Drawing_Random_Circles( Mat image, char* window_name, RNG rng );
|
||||
int Displaying_Random_Text( Mat image, char* window_name, RNG rng );
|
||||
int Displaying_Big_End( Mat image, char* window_name, RNG rng );
|
||||
|
||||
|
||||
/**
|
||||
* @function main
|
||||
*/
|
||||
int main( void )
|
||||
{
|
||||
int c;
|
||||
|
||||
/// Start creating a window
|
||||
char window_name[] = "Drawing_2 Tutorial";
|
||||
|
||||
/// Also create a random object (RNG)
|
||||
RNG rng( 0xFFFFFFFF );
|
||||
|
||||
/// Initialize a matrix filled with zeros
|
||||
Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
|
||||
/// Show it in a window during DELAY ms
|
||||
imshow( window_name, image );
|
||||
waitKey( DELAY );
|
||||
|
||||
/// Now, let's draw some lines
|
||||
c = Drawing_Random_Lines(image, window_name, rng);
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Go on drawing, this time nice rectangles
|
||||
c = Drawing_Random_Rectangles(image, window_name, rng);
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Draw some ellipses
|
||||
c = Drawing_Random_Ellipses( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Now some polylines
|
||||
c = Drawing_Random_Polylines( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Draw filled polygons
|
||||
c = Drawing_Random_Filled_Polygons( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Draw circles
|
||||
c = Drawing_Random_Circles( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Display text in random positions
|
||||
c = Displaying_Random_Text( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
/// Displaying the big end!
|
||||
c = Displaying_Big_End( image, window_name, rng );
|
||||
if( c != 0 ) return 0;
|
||||
|
||||
waitKey(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Function definitions
|
||||
|
||||
/**
|
||||
* @function randomColor
|
||||
* @brief Produces a random color given a random object
|
||||
*/
|
||||
static Scalar randomColor( RNG& rng )
|
||||
{
|
||||
int icolor = (unsigned) rng;
|
||||
return Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 );
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @function Drawing_Random_Lines
|
||||
*/
|
||||
int Drawing_Random_Lines( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
Point pt1, pt2;
|
||||
|
||||
for( int i = 0; i < NUMBER; i++ )
|
||||
{
|
||||
pt1.x = rng.uniform( x_1, x_2 );
|
||||
pt1.y = rng.uniform( y_1, y_2 );
|
||||
pt2.x = rng.uniform( x_1, x_2 );
|
||||
pt2.y = rng.uniform( y_1, y_2 );
|
||||
|
||||
line( image, pt1, pt2, randomColor(rng), rng.uniform(1, 10), 8 );
|
||||
imshow( window_name, image );
|
||||
if( waitKey( DELAY ) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Drawing_Rectangles
|
||||
*/
|
||||
int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
Point pt1, pt2;
|
||||
int lineType = 8;
|
||||
int thickness = rng.uniform( -3, 10 );
|
||||
|
||||
for( int i = 0; i < NUMBER; i++ )
|
||||
{
|
||||
pt1.x = rng.uniform( x_1, x_2 );
|
||||
pt1.y = rng.uniform( y_1, y_2 );
|
||||
pt2.x = rng.uniform( x_1, x_2 );
|
||||
pt2.y = rng.uniform( y_1, y_2 );
|
||||
|
||||
rectangle( image, pt1, pt2, randomColor(rng), MAX( thickness, -1 ), lineType );
|
||||
|
||||
imshow( window_name, image );
|
||||
if( waitKey( DELAY ) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Drawing_Random_Ellipses
|
||||
*/
|
||||
int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
int lineType = 8;
|
||||
|
||||
for ( int i = 0; i < NUMBER; i++ )
|
||||
{
|
||||
Point center;
|
||||
center.x = rng.uniform(x_1, x_2);
|
||||
center.y = rng.uniform(y_1, y_2);
|
||||
|
||||
Size axes;
|
||||
axes.width = rng.uniform(0, 200);
|
||||
axes.height = rng.uniform(0, 200);
|
||||
|
||||
double angle = rng.uniform(0, 180);
|
||||
|
||||
ellipse( image, center, axes, angle, angle - 100, angle + 200,
|
||||
randomColor(rng), rng.uniform(-1,9), lineType );
|
||||
|
||||
imshow( window_name, image );
|
||||
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Drawing_Random_Polylines
|
||||
*/
|
||||
int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
int lineType = 8;
|
||||
|
||||
for( int i = 0; i< NUMBER; i++ )
|
||||
{
|
||||
Point pt[2][3];
|
||||
pt[0][0].x = rng.uniform(x_1, x_2);
|
||||
pt[0][0].y = rng.uniform(y_1, y_2);
|
||||
pt[0][1].x = rng.uniform(x_1, x_2);
|
||||
pt[0][1].y = rng.uniform(y_1, y_2);
|
||||
pt[0][2].x = rng.uniform(x_1, x_2);
|
||||
pt[0][2].y = rng.uniform(y_1, y_2);
|
||||
pt[1][0].x = rng.uniform(x_1, x_2);
|
||||
pt[1][0].y = rng.uniform(y_1, y_2);
|
||||
pt[1][1].x = rng.uniform(x_1, x_2);
|
||||
pt[1][1].y = rng.uniform(y_1, y_2);
|
||||
pt[1][2].x = rng.uniform(x_1, x_2);
|
||||
pt[1][2].y = rng.uniform(y_1, y_2);
|
||||
|
||||
const Point* ppt[2] = {pt[0], pt[1]};
|
||||
int npt[] = {3, 3};
|
||||
|
||||
polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
|
||||
|
||||
imshow( window_name, image );
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Drawing_Random_Filled_Polygons
|
||||
*/
|
||||
int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
int lineType = 8;
|
||||
|
||||
for ( int i = 0; i < NUMBER; i++ )
|
||||
{
|
||||
Point pt[2][3];
|
||||
pt[0][0].x = rng.uniform(x_1, x_2);
|
||||
pt[0][0].y = rng.uniform(y_1, y_2);
|
||||
pt[0][1].x = rng.uniform(x_1, x_2);
|
||||
pt[0][1].y = rng.uniform(y_1, y_2);
|
||||
pt[0][2].x = rng.uniform(x_1, x_2);
|
||||
pt[0][2].y = rng.uniform(y_1, y_2);
|
||||
pt[1][0].x = rng.uniform(x_1, x_2);
|
||||
pt[1][0].y = rng.uniform(y_1, y_2);
|
||||
pt[1][1].x = rng.uniform(x_1, x_2);
|
||||
pt[1][1].y = rng.uniform(y_1, y_2);
|
||||
pt[1][2].x = rng.uniform(x_1, x_2);
|
||||
pt[1][2].y = rng.uniform(y_1, y_2);
|
||||
|
||||
const Point* ppt[2] = {pt[0], pt[1]};
|
||||
int npt[] = {3, 3};
|
||||
|
||||
fillPoly( image, ppt, npt, 2, randomColor(rng), lineType );
|
||||
|
||||
imshow( window_name, image );
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Drawing_Random_Circles
|
||||
*/
|
||||
int Drawing_Random_Circles( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
int lineType = 8;
|
||||
|
||||
for (int i = 0; i < NUMBER; i++)
|
||||
{
|
||||
Point center;
|
||||
center.x = rng.uniform(x_1, x_2);
|
||||
center.y = rng.uniform(y_1, y_2);
|
||||
|
||||
circle( image, center, rng.uniform(0, 300), randomColor(rng),
|
||||
rng.uniform(-1, 9), lineType );
|
||||
|
||||
imshow( window_name, image );
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Displaying_Random_Text
|
||||
*/
|
||||
int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
|
||||
{
|
||||
int lineType = 8;
|
||||
|
||||
for ( int i = 1; i < NUMBER; i++ )
|
||||
{
|
||||
Point org;
|
||||
org.x = rng.uniform(x_1, x_2);
|
||||
org.y = rng.uniform(y_1, y_2);
|
||||
|
||||
putText( image, "Testing text rendering", org, rng.uniform(0,8),
|
||||
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
|
||||
|
||||
imshow( window_name, image );
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @function Displaying_Big_End
|
||||
*/
|
||||
int Displaying_Big_End( Mat image, char* window_name, RNG )
|
||||
{
|
||||
Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
|
||||
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
|
||||
int lineType = 8;
|
||||
|
||||
Mat image2;
|
||||
|
||||
for( int i = 0; i < 255; i += 2 )
|
||||
{
|
||||
image2 = image - Scalar::all(i);
|
||||
putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
|
||||
Scalar(i, i, 255), 5, lineType );
|
||||
|
||||
imshow( window_name, image2 );
|
||||
if( waitKey(DELAY) >= 0 )
|
||||
{ return -1; }
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,98 @@
|
||||
#include <iostream>
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using namespace cv;
|
||||
|
||||
namespace
|
||||
{
|
||||
/** Global Variables */
|
||||
int alpha = 100;
|
||||
int beta = 100;
|
||||
int gamma_cor = 100;
|
||||
Mat img_original, img_corrected, img_gamma_corrected;
|
||||
|
||||
void basicLinearTransform(const Mat &img, const double alpha_, const int beta_)
|
||||
{
|
||||
Mat res;
|
||||
img.convertTo(res, -1, alpha_, beta_);
|
||||
|
||||
hconcat(img, res, img_corrected);
|
||||
imshow("Brightness and contrast adjustments", img_corrected);
|
||||
}
|
||||
|
||||
void gammaCorrection(const Mat &img, const double gamma_)
|
||||
{
|
||||
CV_Assert(gamma_ >= 0);
|
||||
//! [changing-contrast-brightness-gamma-correction]
|
||||
Mat lookUpTable(1, 256, CV_8U);
|
||||
uchar* p = lookUpTable.ptr();
|
||||
for( int i = 0; i < 256; ++i)
|
||||
p[i] = saturate_cast<uchar>(pow(i / 255.0, gamma_) * 255.0);
|
||||
|
||||
Mat res = img.clone();
|
||||
LUT(img, lookUpTable, res);
|
||||
//! [changing-contrast-brightness-gamma-correction]
|
||||
|
||||
hconcat(img, res, img_gamma_corrected);
|
||||
imshow("Gamma correction", img_gamma_corrected);
|
||||
}
|
||||
|
||||
void on_linear_transform_alpha_trackbar(int, void *)
|
||||
{
|
||||
double alpha_value = alpha / 100.0;
|
||||
int beta_value = beta - 100;
|
||||
basicLinearTransform(img_original, alpha_value, beta_value);
|
||||
}
|
||||
|
||||
void on_linear_transform_beta_trackbar(int, void *)
|
||||
{
|
||||
double alpha_value = alpha / 100.0;
|
||||
int beta_value = beta - 100;
|
||||
basicLinearTransform(img_original, alpha_value, beta_value);
|
||||
}
|
||||
|
||||
void on_gamma_correction_trackbar(int, void *)
|
||||
{
|
||||
double gamma_value = gamma_cor / 100.0;
|
||||
gammaCorrection(img_original, gamma_value);
|
||||
}
|
||||
}
|
||||
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
CommandLineParser parser( argc, argv, "{@input | lena.jpg | input image}" );
|
||||
img_original = imread( samples::findFile( parser.get<String>( "@input" ) ) );
|
||||
if( img_original.empty() )
|
||||
{
|
||||
cout << "Could not open or find the image!\n" << endl;
|
||||
cout << "Usage: " << argv[0] << " <Input image>" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
img_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());
|
||||
img_gamma_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());
|
||||
|
||||
hconcat(img_original, img_original, img_corrected);
|
||||
hconcat(img_original, img_original, img_gamma_corrected);
|
||||
|
||||
namedWindow("Brightness and contrast adjustments");
|
||||
namedWindow("Gamma correction");
|
||||
|
||||
createTrackbar("Alpha gain (contrast)", "Brightness and contrast adjustments", &alpha, 500, on_linear_transform_alpha_trackbar);
|
||||
createTrackbar("Beta bias (brightness)", "Brightness and contrast adjustments", &beta, 200, on_linear_transform_beta_trackbar);
|
||||
createTrackbar("Gamma correction", "Gamma correction", &gamma_cor, 200, on_gamma_correction_trackbar);
|
||||
|
||||
on_linear_transform_alpha_trackbar(0, 0);
|
||||
on_gamma_correction_trackbar(0, 0);
|
||||
|
||||
waitKey();
|
||||
|
||||
imwrite("linear_transform_correction.png", img_corrected);
|
||||
imwrite("gamma_correction.png", img_gamma_corrected);
|
||||
|
||||
return 0;
|
||||
}
|
139
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp
vendored
Normal file
139
3rdparty/opencv-4.5.4/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
/**
|
||||
* @file Morphology_3(Extract_Lines).cpp
|
||||
* @brief Use morphology transformations for extracting horizontal and vertical lines sample code
|
||||
* @author OpenCV team
|
||||
*/
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include <iostream>
|
||||
|
||||
void show_wait_destroy(const char* winname, cv::Mat img);
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
//! [load_image]
|
||||
CommandLineParser parser(argc, argv, "{@input | notes.png | input image}");
|
||||
Mat src = imread( samples::findFile( parser.get<String>("@input") ), IMREAD_COLOR);
|
||||
if (src.empty())
|
||||
{
|
||||
cout << "Could not open or find the image!\n" << endl;
|
||||
cout << "Usage: " << argv[0] << " <Input image>" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Show source image
|
||||
imshow("src", src);
|
||||
//! [load_image]
|
||||
|
||||
//! [gray]
|
||||
// Transform source image to gray if it is not already
|
||||
Mat gray;
|
||||
|
||||
if (src.channels() == 3)
|
||||
{
|
||||
cvtColor(src, gray, COLOR_BGR2GRAY);
|
||||
}
|
||||
else
|
||||
{
|
||||
gray = src;
|
||||
}
|
||||
|
||||
// Show gray image
|
||||
show_wait_destroy("gray", gray);
|
||||
//! [gray]
|
||||
|
||||
//! [bin]
|
||||
// Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
|
||||
Mat bw;
|
||||
adaptiveThreshold(~gray, bw, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, -2);
|
||||
|
||||
// Show binary image
|
||||
show_wait_destroy("binary", bw);
|
||||
//! [bin]
|
||||
|
||||
//! [init]
|
||||
// Create the images that will use to extract the horizontal and vertical lines
|
||||
Mat horizontal = bw.clone();
|
||||
Mat vertical = bw.clone();
|
||||
//! [init]
|
||||
|
||||
//! [horiz]
|
||||
// Specify size on horizontal axis
|
||||
int horizontal_size = horizontal.cols / 30;
|
||||
|
||||
// Create structure element for extracting horizontal lines through morphology operations
|
||||
Mat horizontalStructure = getStructuringElement(MORPH_RECT, Size(horizontal_size, 1));
|
||||
|
||||
// Apply morphology operations
|
||||
erode(horizontal, horizontal, horizontalStructure, Point(-1, -1));
|
||||
dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1));
|
||||
|
||||
// Show extracted horizontal lines
|
||||
show_wait_destroy("horizontal", horizontal);
|
||||
//! [horiz]
|
||||
|
||||
//! [vert]
|
||||
// Specify size on vertical axis
|
||||
int vertical_size = vertical.rows / 30;
|
||||
|
||||
// Create structure element for extracting vertical lines through morphology operations
|
||||
Mat verticalStructure = getStructuringElement(MORPH_RECT, Size(1, vertical_size));
|
||||
|
||||
// Apply morphology operations
|
||||
erode(vertical, vertical, verticalStructure, Point(-1, -1));
|
||||
dilate(vertical, vertical, verticalStructure, Point(-1, -1));
|
||||
|
||||
// Show extracted vertical lines
|
||||
show_wait_destroy("vertical", vertical);
|
||||
//! [vert]
|
||||
|
||||
//! [smooth]
|
||||
// Inverse vertical image
|
||||
bitwise_not(vertical, vertical);
|
||||
show_wait_destroy("vertical_bit", vertical);
|
||||
|
||||
// Extract edges and smooth image according to the logic
|
||||
// 1. extract edges
|
||||
// 2. dilate(edges)
|
||||
// 3. src.copyTo(smooth)
|
||||
// 4. blur smooth img
|
||||
// 5. smooth.copyTo(src, edges)
|
||||
|
||||
// Step 1
|
||||
Mat edges;
|
||||
adaptiveThreshold(vertical, edges, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, -2);
|
||||
show_wait_destroy("edges", edges);
|
||||
|
||||
// Step 2
|
||||
Mat kernel = Mat::ones(2, 2, CV_8UC1);
|
||||
dilate(edges, edges, kernel);
|
||||
show_wait_destroy("dilate", edges);
|
||||
|
||||
// Step 3
|
||||
Mat smooth;
|
||||
vertical.copyTo(smooth);
|
||||
|
||||
// Step 4
|
||||
blur(smooth, smooth, Size(2, 2));
|
||||
|
||||
// Step 5
|
||||
smooth.copyTo(vertical, edges);
|
||||
|
||||
// Show final result
|
||||
show_wait_destroy("smooth - final", vertical);
|
||||
//! [smooth]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void show_wait_destroy(const char* winname, cv::Mat img) {
|
||||
imshow(winname, img);
|
||||
moveWindow(winname, 500, 0);
|
||||
waitKey(0);
|
||||
destroyWindow(winname);
|
||||
}
|
@ -0,0 +1,184 @@
|
||||
/**
|
||||
* @brief You will learn how to recover an image with motion blur distortion using a Wiener filter
|
||||
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
|
||||
*/
|
||||
#include <iostream>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help();
|
||||
void calcPSF(Mat& outputImg, Size filterSize, int len, double theta);
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg);
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
|
||||
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr);
|
||||
void edgetaper(const Mat& inputImg, Mat& outputImg, double gamma = 5.0, double beta = 0.2);
|
||||
|
||||
const String keys =
|
||||
"{help h usage ? | | print this message }"
|
||||
"{image |input.png | input image name }"
|
||||
"{LEN |125 | length of a motion }"
|
||||
"{THETA |0 | angle of a motion in degrees }"
|
||||
"{SNR |700 | signal to noise ratio }"
|
||||
;
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
help();
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
if (parser.has("help"))
|
||||
{
|
||||
parser.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int LEN = parser.get<int>("LEN");
|
||||
double THETA = parser.get<double>("THETA");
|
||||
int snr = parser.get<int>("SNR");
|
||||
string strInFileName = parser.get<String>("image");
|
||||
|
||||
if (!parser.check())
|
||||
{
|
||||
parser.printErrors();
|
||||
return 0;
|
||||
}
|
||||
|
||||
Mat imgIn;
|
||||
imgIn = imread(strInFileName, IMREAD_GRAYSCALE);
|
||||
if (imgIn.empty()) //check whether the image is loaded or not
|
||||
{
|
||||
cout << "ERROR : Image cannot be loaded..!!" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat imgOut;
|
||||
|
||||
//! [main]
|
||||
// it needs to process even image only
|
||||
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
|
||||
|
||||
//Hw calculation (start)
|
||||
Mat Hw, h;
|
||||
calcPSF(h, roi.size(), LEN, THETA);
|
||||
calcWnrFilter(h, Hw, 1.0 / double(snr));
|
||||
//Hw calculation (stop)
|
||||
|
||||
imgIn.convertTo(imgIn, CV_32F);
|
||||
edgetaper(imgIn, imgIn);
|
||||
|
||||
// filtering (start)
|
||||
filter2DFreq(imgIn(roi), imgOut, Hw);
|
||||
// filtering (stop)
|
||||
//! [main]
|
||||
|
||||
imgOut.convertTo(imgOut, CV_8U);
|
||||
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
|
||||
imwrite("result.jpg", imgOut);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void help()
|
||||
{
|
||||
cout << "2018-08-14" << endl;
|
||||
cout << "Motion_deblur_v2" << endl;
|
||||
cout << "You will learn how to recover an image with motion blur distortion using a Wiener filter" << endl;
|
||||
}
|
||||
|
||||
//! [calcPSF]
|
||||
void calcPSF(Mat& outputImg, Size filterSize, int len, double theta)
|
||||
{
|
||||
Mat h(filterSize, CV_32F, Scalar(0));
|
||||
Point point(filterSize.width / 2, filterSize.height / 2);
|
||||
ellipse(h, point, Size(0, cvRound(float(len) / 2.0)), 90.0 - theta, 0, 360, Scalar(255), FILLED);
|
||||
Scalar summa = sum(h);
|
||||
outputImg = h / summa[0];
|
||||
}
|
||||
//! [calcPSF]
|
||||
|
||||
//! [fftshift]
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg)
|
||||
{
|
||||
outputImg = inputImg.clone();
|
||||
int cx = outputImg.cols / 2;
|
||||
int cy = outputImg.rows / 2;
|
||||
Mat q0(outputImg, Rect(0, 0, cx, cy));
|
||||
Mat q1(outputImg, Rect(cx, 0, cx, cy));
|
||||
Mat q2(outputImg, Rect(0, cy, cx, cy));
|
||||
Mat q3(outputImg, Rect(cx, cy, cx, cy));
|
||||
Mat tmp;
|
||||
q0.copyTo(tmp);
|
||||
q3.copyTo(q0);
|
||||
tmp.copyTo(q3);
|
||||
q1.copyTo(tmp);
|
||||
q2.copyTo(q1);
|
||||
tmp.copyTo(q2);
|
||||
}
|
||||
//! [fftshift]
|
||||
|
||||
//! [filter2DFreq]
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
|
||||
{
|
||||
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI, DFT_SCALE);
|
||||
|
||||
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
|
||||
Mat complexH;
|
||||
merge(planesH, 2, complexH);
|
||||
Mat complexIH;
|
||||
mulSpectrums(complexI, complexH, complexIH, 0);
|
||||
|
||||
idft(complexIH, complexIH);
|
||||
split(complexIH, planes);
|
||||
outputImg = planes[0];
|
||||
}
|
||||
//! [filter2DFreq]
|
||||
|
||||
//! [calcWnrFilter]
|
||||
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr)
|
||||
{
|
||||
Mat h_PSF_shifted;
|
||||
fftshift(input_h_PSF, h_PSF_shifted);
|
||||
Mat planes[2] = { Mat_<float>(h_PSF_shifted.clone()), Mat::zeros(h_PSF_shifted.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI);
|
||||
split(complexI, planes);
|
||||
Mat denom;
|
||||
pow(abs(planes[0]), 2, denom);
|
||||
denom += nsr;
|
||||
divide(planes[0], denom, output_G);
|
||||
}
|
||||
//! [calcWnrFilter]
|
||||
|
||||
//! [edgetaper]
|
||||
void edgetaper(const Mat& inputImg, Mat& outputImg, double gamma, double beta)
|
||||
{
|
||||
int Nx = inputImg.cols;
|
||||
int Ny = inputImg.rows;
|
||||
Mat w1(1, Nx, CV_32F, Scalar(0));
|
||||
Mat w2(Ny, 1, CV_32F, Scalar(0));
|
||||
|
||||
float* p1 = w1.ptr<float>(0);
|
||||
float* p2 = w2.ptr<float>(0);
|
||||
float dx = float(2.0 * CV_PI / Nx);
|
||||
float x = float(-CV_PI);
|
||||
for (int i = 0; i < Nx; i++)
|
||||
{
|
||||
p1[i] = float(0.5 * (tanh((x + gamma / 2) / beta) - tanh((x - gamma / 2) / beta)));
|
||||
x += dx;
|
||||
}
|
||||
float dy = float(2.0 * CV_PI / Ny);
|
||||
float y = float(-CV_PI);
|
||||
for (int i = 0; i < Ny; i++)
|
||||
{
|
||||
p2[i] = float(0.5 * (tanh((y + gamma / 2) / beta) - tanh((y - gamma / 2) / beta)));
|
||||
y += dy;
|
||||
}
|
||||
Mat w = w2 * w1;
|
||||
multiply(inputImg, w, outputImg);
|
||||
}
|
||||
//! [edgetaper]
|
@ -0,0 +1,149 @@
|
||||
/**
|
||||
* @brief You will learn how to recover an out-of-focus image by Wiener filter
|
||||
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
|
||||
*/
|
||||
#include <iostream>
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help();
|
||||
void calcPSF(Mat& outputImg, Size filterSize, int R);
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg);
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
|
||||
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr);
|
||||
|
||||
const String keys =
|
||||
"{help h usage ? | | print this message }"
|
||||
"{image |original.JPG | input image name }"
|
||||
"{R |53 | radius }"
|
||||
"{SNR |5200 | signal to noise ratio}"
|
||||
;
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
help();
|
||||
CommandLineParser parser(argc, argv, keys);
|
||||
if (parser.has("help"))
|
||||
{
|
||||
parser.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int R = parser.get<int>("R");
|
||||
int snr = parser.get<int>("SNR");
|
||||
string strInFileName = parser.get<String>("image");
|
||||
|
||||
if (!parser.check())
|
||||
{
|
||||
parser.printErrors();
|
||||
return 0;
|
||||
}
|
||||
|
||||
Mat imgIn;
|
||||
imgIn = imread(strInFileName, IMREAD_GRAYSCALE);
|
||||
if (imgIn.empty()) //check whether the image is loaded or not
|
||||
{
|
||||
cout << "ERROR : Image cannot be loaded..!!" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat imgOut;
|
||||
|
||||
//! [main]
|
||||
// it needs to process even image only
|
||||
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
|
||||
|
||||
//Hw calculation (start)
|
||||
Mat Hw, h;
|
||||
calcPSF(h, roi.size(), R);
|
||||
calcWnrFilter(h, Hw, 1.0 / double(snr));
|
||||
//Hw calculation (stop)
|
||||
|
||||
// filtering (start)
|
||||
filter2DFreq(imgIn(roi), imgOut, Hw);
|
||||
// filtering (stop)
|
||||
//! [main]
|
||||
|
||||
imgOut.convertTo(imgOut, CV_8U);
|
||||
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
|
||||
imwrite("result.jpg", imgOut);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void help()
|
||||
{
|
||||
cout << "2018-07-12" << endl;
|
||||
cout << "DeBlur_v8" << endl;
|
||||
cout << "You will learn how to recover an out-of-focus image by Wiener filter" << endl;
|
||||
}
|
||||
|
||||
//! [calcPSF]
|
||||
void calcPSF(Mat& outputImg, Size filterSize, int R)
|
||||
{
|
||||
Mat h(filterSize, CV_32F, Scalar(0));
|
||||
Point point(filterSize.width / 2, filterSize.height / 2);
|
||||
circle(h, point, R, 255, -1, 8);
|
||||
Scalar summa = sum(h);
|
||||
outputImg = h / summa[0];
|
||||
}
|
||||
//! [calcPSF]
|
||||
|
||||
//! [fftshift]
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg)
|
||||
{
|
||||
outputImg = inputImg.clone();
|
||||
int cx = outputImg.cols / 2;
|
||||
int cy = outputImg.rows / 2;
|
||||
Mat q0(outputImg, Rect(0, 0, cx, cy));
|
||||
Mat q1(outputImg, Rect(cx, 0, cx, cy));
|
||||
Mat q2(outputImg, Rect(0, cy, cx, cy));
|
||||
Mat q3(outputImg, Rect(cx, cy, cx, cy));
|
||||
Mat tmp;
|
||||
q0.copyTo(tmp);
|
||||
q3.copyTo(q0);
|
||||
tmp.copyTo(q3);
|
||||
q1.copyTo(tmp);
|
||||
q2.copyTo(q1);
|
||||
tmp.copyTo(q2);
|
||||
}
|
||||
//! [fftshift]
|
||||
|
||||
//! [filter2DFreq]
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
|
||||
{
|
||||
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI, DFT_SCALE);
|
||||
|
||||
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
|
||||
Mat complexH;
|
||||
merge(planesH, 2, complexH);
|
||||
Mat complexIH;
|
||||
mulSpectrums(complexI, complexH, complexIH, 0);
|
||||
|
||||
idft(complexIH, complexIH);
|
||||
split(complexIH, planes);
|
||||
outputImg = planes[0];
|
||||
}
|
||||
//! [filter2DFreq]
|
||||
|
||||
//! [calcWnrFilter]
|
||||
void calcWnrFilter(const Mat& input_h_PSF, Mat& output_G, double nsr)
|
||||
{
|
||||
Mat h_PSF_shifted;
|
||||
fftshift(input_h_PSF, h_PSF_shifted);
|
||||
Mat planes[2] = { Mat_<float>(h_PSF_shifted.clone()), Mat::zeros(h_PSF_shifted.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI);
|
||||
split(complexI, planes);
|
||||
Mat denom;
|
||||
pow(abs(planes[0]), 2, denom);
|
||||
denom += nsr;
|
||||
divide(planes[0], denom, output_G);
|
||||
}
|
||||
//! [calcWnrFilter]
|
@ -0,0 +1,148 @@
|
||||
/**
|
||||
* @brief You will learn how to remove periodic noise in the Fourier domain
|
||||
* @author Karpushin Vladislav, karpushin@ngs.ru, https://github.com/VladKarpushin
|
||||
*/
|
||||
#include <iostream>
|
||||
#include <opencv2/imgcodecs.hpp>
|
||||
#include <opencv2/imgproc.hpp>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg);
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H);
|
||||
void synthesizeFilterH(Mat& inputOutput_H, Point center, int radius);
|
||||
void calcPSD(const Mat& inputImg, Mat& outputImg, int flag = 0);
|
||||
|
||||
int main()
|
||||
{
|
||||
Mat imgIn = imread("input.jpg", IMREAD_GRAYSCALE);
|
||||
if (imgIn.empty()) //check whether the image is loaded or not
|
||||
{
|
||||
cout << "ERROR : Image cannot be loaded..!!" << endl;
|
||||
return -1;
|
||||
}
|
||||
|
||||
imgIn.convertTo(imgIn, CV_32F);
|
||||
|
||||
//! [main]
|
||||
// it needs to process even image only
|
||||
Rect roi = Rect(0, 0, imgIn.cols & -2, imgIn.rows & -2);
|
||||
imgIn = imgIn(roi);
|
||||
|
||||
// PSD calculation (start)
|
||||
Mat imgPSD;
|
||||
calcPSD(imgIn, imgPSD);
|
||||
fftshift(imgPSD, imgPSD);
|
||||
normalize(imgPSD, imgPSD, 0, 255, NORM_MINMAX);
|
||||
// PSD calculation (stop)
|
||||
|
||||
//H calculation (start)
|
||||
Mat H = Mat(roi.size(), CV_32F, Scalar(1));
|
||||
const int r = 21;
|
||||
synthesizeFilterH(H, Point(705, 458), r);
|
||||
synthesizeFilterH(H, Point(850, 391), r);
|
||||
synthesizeFilterH(H, Point(993, 325), r);
|
||||
//H calculation (stop)
|
||||
// filtering (start)
|
||||
Mat imgOut;
|
||||
fftshift(H, H);
|
||||
filter2DFreq(imgIn, imgOut, H);
|
||||
// filtering (stop)
|
||||
//! [main]
|
||||
|
||||
imgOut.convertTo(imgOut, CV_8U);
|
||||
normalize(imgOut, imgOut, 0, 255, NORM_MINMAX);
|
||||
imwrite("result.jpg", imgOut);
|
||||
imwrite("PSD.jpg", imgPSD);
|
||||
fftshift(H, H);
|
||||
normalize(H, H, 0, 255, NORM_MINMAX);
|
||||
imwrite("filter.jpg", H);
|
||||
return 0;
|
||||
}
|
||||
|
||||
//! [fftshift]
|
||||
void fftshift(const Mat& inputImg, Mat& outputImg)
|
||||
{
|
||||
outputImg = inputImg.clone();
|
||||
int cx = outputImg.cols / 2;
|
||||
int cy = outputImg.rows / 2;
|
||||
Mat q0(outputImg, Rect(0, 0, cx, cy));
|
||||
Mat q1(outputImg, Rect(cx, 0, cx, cy));
|
||||
Mat q2(outputImg, Rect(0, cy, cx, cy));
|
||||
Mat q3(outputImg, Rect(cx, cy, cx, cy));
|
||||
Mat tmp;
|
||||
q0.copyTo(tmp);
|
||||
q3.copyTo(q0);
|
||||
tmp.copyTo(q3);
|
||||
q1.copyTo(tmp);
|
||||
q2.copyTo(q1);
|
||||
tmp.copyTo(q2);
|
||||
}
|
||||
//! [fftshift]
|
||||
|
||||
//! [filter2DFreq]
|
||||
void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
|
||||
{
|
||||
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI, DFT_SCALE);
|
||||
|
||||
Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
|
||||
Mat complexH;
|
||||
merge(planesH, 2, complexH);
|
||||
Mat complexIH;
|
||||
mulSpectrums(complexI, complexH, complexIH, 0);
|
||||
|
||||
idft(complexIH, complexIH);
|
||||
split(complexIH, planes);
|
||||
outputImg = planes[0];
|
||||
}
|
||||
//! [filter2DFreq]
|
||||
|
||||
//! [synthesizeFilterH]
|
||||
void synthesizeFilterH(Mat& inputOutput_H, Point center, int radius)
|
||||
{
|
||||
Point c2 = center, c3 = center, c4 = center;
|
||||
c2.y = inputOutput_H.rows - center.y;
|
||||
c3.x = inputOutput_H.cols - center.x;
|
||||
c4 = Point(c3.x,c2.y);
|
||||
circle(inputOutput_H, center, radius, 0, -1, 8);
|
||||
circle(inputOutput_H, c2, radius, 0, -1, 8);
|
||||
circle(inputOutput_H, c3, radius, 0, -1, 8);
|
||||
circle(inputOutput_H, c4, radius, 0, -1, 8);
|
||||
}
|
||||
//! [synthesizeFilterH]
|
||||
|
||||
// Function calculates PSD(Power spectrum density) by fft with two flags
|
||||
// flag = 0 means to return PSD
|
||||
// flag = 1 means to return log(PSD)
|
||||
//! [calcPSD]
|
||||
void calcPSD(const Mat& inputImg, Mat& outputImg, int flag)
|
||||
{
|
||||
Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
|
||||
Mat complexI;
|
||||
merge(planes, 2, complexI);
|
||||
dft(complexI, complexI);
|
||||
split(complexI, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
|
||||
|
||||
planes[0].at<float>(0) = 0;
|
||||
planes[1].at<float>(0) = 0;
|
||||
|
||||
// compute the PSD = sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)^2
|
||||
Mat imgPSD;
|
||||
magnitude(planes[0], planes[1], imgPSD); //imgPSD = sqrt(Power spectrum density)
|
||||
pow(imgPSD, 2, imgPSD); //it needs ^2 in order to get PSD
|
||||
outputImg = imgPSD;
|
||||
|
||||
// logPSD = log(1 + PSD)
|
||||
if (flag)
|
||||
{
|
||||
Mat imglogPSD;
|
||||
imglogPSD = imgPSD + Scalar::all(1);
|
||||
log(imglogPSD, imglogPSD);
|
||||
outputImg = imglogPSD;
|
||||
}
|
||||
}
|
||||
//! [calcPSD]
|
Reference in New Issue
Block a user