feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@@ -0,0 +1,9 @@
set(the_description "2D Features Framework")
ocv_add_dispatched_file(sift SSE4_1 AVX2 AVX512_SKX)
set(debug_modules "")
if(DEBUG_opencv_features2d)
list(APPEND debug_modules opencv_highgui)
endif()
ocv_define_module(features2d opencv_imgproc ${debug_modules} OPTIONAL opencv_flann WRAP java objc python js)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,284 @@
#!/usr/bin/perl
use strict;
use warnings;
use autodie; # die if problem reading or writing a file
my $filein = "./agast.txt";
my $fileout = "./agast_new.txt";
my $i1=1;
my $i2=1;
my $i3=1;
my $tmp;
my $ifcount0=0;
my $ifcount1=0;
my $ifcount2=0;
my $ifcount3=0;
my $ifcount4=0;
my $elsecount;
my $myfirstline = $ARGV[0];
my $mylastline = $ARGV[1];
my $tablename = $ARGV[2];
my @array0 = ();
my @array1 = ();
my @array2 = ();
my @array3 = ();
my $homogeneous;
my $success_homogeneous;
my $structured;
my $success_structured;
open(my $in1, "<", $filein) or die "Can't open $filein: $!";
open(my $out, ">", $fileout) or die "Can't open $fileout: $!";
$array0[0] = 0;
$i1=1;
while (my $line1 = <$in1>)
{
chomp $line1;
$array0[$i1] = 0;
if (($i1>=$myfirstline)&&($i1<=$mylastline))
{
if($line1=~/if\(ptr\[offset(\d+)/)
{
if($line1=~/if\(ptr\[offset(\d+).*\>.*cb/)
{
$tmp=$1;
}
else
{
if($line1=~/if\(ptr\[offset(\d+).*\<.*c\_b/)
{
$tmp=$1+128;
}
else
{
die "invalid array index!"
}
}
$array1[$ifcount1] = $tmp;
$array0[$ifcount1] = $i1;
$ifcount1++;
}
else
{
}
}
$i1++;
}
$homogeneous=$ifcount1;
$success_homogeneous=$ifcount1+1;
$structured=$ifcount1+2;
$success_structured=$ifcount1+3;
close $in1 or die "Can't close $filein: $!";
open($in1, "<", $filein) or die "Can't open $filein: $!";
$i1=1;
while (my $line1 = <$in1>)
{
chomp $line1;
if (($i1>=$myfirstline)&&($i1<=$mylastline))
{
if ($array0[$ifcount2] == $i1)
{
$array2[$ifcount2]=0;
$array3[$ifcount2]=0;
if ($array0[$ifcount2+1] == ($i1+1))
{
$array2[$ifcount2]=($ifcount2+1);
}
else
{
open(my $in2, "<", $filein) or die "Can't open $filein: $!";
$i2=1;
while (my $line2 = <$in2>)
{
chomp $line2;
if ($i2 == $i1)
{
last;
}
$i2++;
}
my $line2 = <$in2>;
chomp $line2;
if ($line2=~/goto (\w+)/)
{
$tmp=$1;
if ($tmp eq "homogeneous")
{
$array2[$ifcount2]=$homogeneous;
}
if ($tmp eq "success_homogeneous")
{
$array2[$ifcount2]=$success_homogeneous;
}
if ($tmp eq "structured")
{
$array2[$ifcount2]=$structured;
}
if ($tmp eq "success_structured")
{
$array2[$ifcount2]=$success_structured;
}
}
else
{
die "goto expected: $!";
}
close $in2 or die "Can't close $filein: $!";
}
#find next else and interpret it
open(my $in3, "<", $filein) or die "Can't open $filein: $!";
$i3=1;
$ifcount3=0;
$elsecount=0;
while (my $line3 = <$in3>)
{
chomp $line3;
$i3++;
if ($i3 == $i1)
{
last;
}
}
while (my $line3 = <$in3>)
{
chomp $line3;
$ifcount3++;
if (($elsecount==0)&&($i3>$i1))
{
if ($line3=~/goto (\w+)/)
{
$tmp=$1;
if ($tmp eq "homogeneous")
{
$array3[$ifcount2]=$homogeneous;
}
if ($tmp eq "success_homogeneous")
{
$array3[$ifcount2]=$success_homogeneous;
}
if ($tmp eq "structured")
{
$array3[$ifcount2]=$structured;
}
if ($tmp eq "success_structured")
{
$array3[$ifcount2]=$success_structured;
}
}
else
{
if ($line3=~/if\(ptr\[offset/)
{
$ifcount4=0;
while ($array0[$ifcount4]!=$i3)
{
$ifcount4++;
if ($ifcount4==$ifcount1)
{
die "if else match expected: $!";
}
$array3[$ifcount2]=$ifcount4;
}
}
else
{
die "elseif or elsegoto match expected: $!";
}
}
last;
}
else
{
if ($line3=~/if\(ptr\[offset/)
{
$elsecount++;
}
else
{
if ($line3=~/else/)
{
$elsecount--;
}
}
}
$i3++;
}
printf("%3d [%3d][0x%08x]\n", $array0[$ifcount2], $ifcount2, (($array1[$ifcount2]&15)<<28)|($array2[$ifcount2]<<16)|(($array1[$ifcount2]&128)<<5)|($array3[$ifcount2]));
close $in3 or die "Can't close $filein: $!";
$ifcount2++;
}
else
{
}
}
$i1++;
}
printf(" [%3d][0x%08x]\n", $homogeneous, 252);
printf(" [%3d][0x%08x]\n", $success_homogeneous, 253);
printf(" [%3d][0x%08x]\n", $structured, 254);
printf(" [%3d][0x%08x]\n", $success_structured, 255);
close $in1 or die "Can't close $filein: $!";
$ifcount0=0;
$ifcount2=0;
printf $out " static const unsigned long %s[] = {\n ", $tablename;
while ($ifcount0 < $ifcount1)
{
printf $out "0x%08x, ", (($array1[$ifcount0]&15)<<28)|($array2[$ifcount0]<<16)|(($array1[$ifcount0]&128)<<5)|($array3[$ifcount0]);
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
}
printf $out "0x%08x, ", 252;
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
printf $out "0x%08x, ", 253;
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
printf $out "0x%08x, ", 254;
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
printf $out "0x%08x\n", 255;
$ifcount0++;
$ifcount2++;
printf $out " };\n\n";
$#array0 = -1;
$#array1 = -1;
$#array2 = -1;
$#array3 = -1;
close $out or die "Can't close $fileout: $!";

View File

@@ -0,0 +1,244 @@
#!/usr/bin/perl
use strict;
use warnings;
use autodie; # die if problem reading or writing a file
my $filein = "./agast_score.txt";
my $fileout = "./agast_new.txt";
my $i1=1;
my $i2=1;
my $i3=1;
my $tmp;
my $ifcount0=0;
my $ifcount1=0;
my $ifcount2=0;
my $ifcount3=0;
my $ifcount4=0;
my $elsecount;
my $myfirstline = $ARGV[0];
my $mylastline = $ARGV[1];
my $tablename = $ARGV[2];
my @array0 = ();
my @array1 = ();
my @array2 = ();
my @array3 = ();
my $is_not_a_corner;
my $is_a_corner;
open(my $in1, "<", $filein) or die "Can't open $filein: $!";
open(my $out, ">", $fileout) or die "Can't open $fileout: $!";
$array0[0] = 0;
$i1=1;
while (my $line1 = <$in1>)
{
chomp $line1;
$array0[$i1] = 0;
if (($i1>=$myfirstline)&&($i1<=$mylastline))
{
if($line1=~/if\(ptr\[offset(\d+)/)
{
if($line1=~/if\(ptr\[offset(\d+).*\>.*cb/)
{
$tmp=$1;
}
else
{
if($line1=~/if\(ptr\[offset(\d+).*\<.*c\_b/)
{
$tmp=$1+128;
}
else
{
die "invalid array index!"
}
}
$array1[$ifcount1] = $tmp;
$array0[$ifcount1] = $i1;
$ifcount1++;
}
else
{
}
}
$i1++;
}
$is_not_a_corner=$ifcount1;
$is_a_corner=$ifcount1+1;
close $in1 or die "Can't close $filein: $!";
open($in1, "<", $filein) or die "Can't open $filein: $!";
$i1=1;
while (my $line1 = <$in1>)
{
chomp $line1;
if (($i1>=$myfirstline)&&($i1<=$mylastline))
{
if ($array0[$ifcount2] == $i1)
{
$array2[$ifcount2]=0;
$array3[$ifcount2]=0;
if ($array0[$ifcount2+1] == ($i1+1))
{
$array2[$ifcount2]=($ifcount2+1);
}
else
{
open(my $in2, "<", $filein) or die "Can't open $filein: $!";
$i2=1;
while (my $line2 = <$in2>)
{
chomp $line2;
if ($i2 == $i1)
{
last;
}
$i2++;
}
my $line2 = <$in2>;
chomp $line2;
if ($line2=~/goto (\w+)/)
{
$tmp=$1;
if ($tmp eq "is_not_a_corner")
{
$array2[$ifcount2]=$is_not_a_corner;
}
if ($tmp eq "is_a_corner")
{
$array2[$ifcount2]=$is_a_corner;
}
}
else
{
die "goto expected: $!";
}
close $in2 or die "Can't close $filein: $!";
}
#find next else and interpret it
open(my $in3, "<", $filein) or die "Can't open $filein: $!";
$i3=1;
$ifcount3=0;
$elsecount=0;
while (my $line3 = <$in3>)
{
chomp $line3;
$i3++;
if ($i3 == $i1)
{
last;
}
}
while (my $line3 = <$in3>)
{
chomp $line3;
$ifcount3++;
if (($elsecount==0)&&($i3>$i1))
{
if ($line3=~/goto (\w+)/)
{
$tmp=$1;
if ($tmp eq "is_not_a_corner")
{
$array3[$ifcount2]=$is_not_a_corner;
}
if ($tmp eq "is_a_corner")
{
$array3[$ifcount2]=$is_a_corner;
}
}
else
{
if ($line3=~/if\(ptr\[offset/)
{
$ifcount4=0;
while ($array0[$ifcount4]!=$i3)
{
$ifcount4++;
if ($ifcount4==$ifcount1)
{
die "if else match expected: $!";
}
$array3[$ifcount2]=$ifcount4;
}
}
else
{
die "elseif or elsegoto match expected: $!";
}
}
last;
}
else
{
if ($line3=~/if\(ptr\[offset/)
{
$elsecount++;
}
else
{
if ($line3=~/else/)
{
$elsecount--;
}
}
}
$i3++;
}
printf("%3d [%3d][0x%08x]\n", $array0[$ifcount2], $ifcount2, (($array1[$ifcount2]&15)<<28)|($array2[$ifcount2]<<16)|(($array1[$ifcount2]&128)<<5)|($array3[$ifcount2]));
close $in3 or die "Can't close $filein: $!";
$ifcount2++;
}
else
{
}
}
$i1++;
}
printf(" [%3d][0x%08x]\n", $is_not_a_corner, 254);
printf(" [%3d][0x%08x]\n", $is_a_corner, 255);
close $in1 or die "Can't close $filein: $!";
$ifcount0=0;
$ifcount2=0;
printf $out " static const unsigned long %s[] = {\n ", $tablename;
while ($ifcount0 < $ifcount1)
{
printf $out "0x%08x, ", (($array1[$ifcount0]&15)<<28)|($array2[$ifcount0]<<16)|(($array1[$ifcount0]&128)<<5)|($array3[$ifcount0]);
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
}
printf $out "0x%08x, ", 254;
$ifcount0++;
$ifcount2++;
if ($ifcount2==8)
{
$ifcount2=0;
printf $out "\n";
printf $out " ";
}
printf $out "0x%08x\n", 255;
$ifcount0++;
$ifcount2++;
printf $out " };\n\n";
$#array0 = -1;
$#array1 = -1;
$#array2 = -1;
$#array3 = -1;
close $out or die "Can't close $fileout: $!";

View File

@@ -0,0 +1,32 @@
perl read_file_score32.pl 9059 9385 table_5_8_corner_struct
move agast_new.txt agast_score_table.txt
perl read_file_score32.pl 2215 3387 table_7_12d_corner_struct
copy /A agast_score_table.txt + agast_new.txt agast_score_table.txt
del agast_new.txt
perl read_file_score32.pl 3428 9022 table_7_12s_corner_struct
copy /A agast_score_table.txt + agast_new.txt agast_score_table.txt
del agast_new.txt
perl read_file_score32.pl 118 2174 table_9_16_corner_struct
copy /A agast_score_table.txt + agast_new.txt agast_score_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 103 430 table_5_8_struct1
move agast_new.txt agast_table.txt
perl read_file_nondiff32.pl 440 779 table_5_8_struct2
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 869 2042 table_7_12d_struct1
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 2052 3225 table_7_12d_struct2
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 3315 4344 table_7_12s_struct1
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 4354 5308 table_7_12s_struct2
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt
perl read_file_nondiff32.pl 5400 7454 table_9_16_struct
copy /A agast_table.txt + agast_new.txt agast_table.txt
del agast_new.txt

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/features2d.hpp"

View File

@@ -0,0 +1,33 @@
#ifndef OPENCV_FEATURE2D_HAL_INTERFACE_H
#define OPENCV_FEATURE2D_HAL_INTERFACE_H
#include "opencv2/core/cvdef.h"
//! @addtogroup features2d_hal_interface
//! @{
//! @name Fast feature detector types
//! @sa cv::FastFeatureDetector
//! @{
#define CV_HAL_TYPE_5_8 0
#define CV_HAL_TYPE_7_12 1
#define CV_HAL_TYPE_9_16 2
//! @}
//! @name Key point
//! @sa cv::KeyPoint
//! @{
struct CV_EXPORTS cvhalKeyPoint
{
float x;
float y;
float size;
float angle;
float response;
int octave;
int class_id;
};
//! @}
//! @}
#endif

View File

@@ -0,0 +1 @@
include/opencv2/features2d.hpp

View File

@@ -0,0 +1 @@
misc/java/src/cpp/features2d_converters.hpp

View File

@@ -0,0 +1,12 @@
{
"type_dict" : {
"Feature2D": {
"j_type": "Feature2D",
"jn_type": "long",
"jni_type": "jlong",
"jni_var": "Feature2D %(n)s",
"suffix": "J",
"j_import": "org.opencv.features2d.Feature2D"
}
}
}

View File

@@ -0,0 +1,112 @@
#define LOG_TAG "org.opencv.utils.Converters"
#include "common.h"
#include "features2d_converters.hpp"
using namespace cv;
#define CHECK_MAT(cond) if(!(cond)){ LOGD("FAILED: " #cond); return; }
//vector_KeyPoint
void Mat_to_vector_KeyPoint(Mat& mat, std::vector<KeyPoint>& v_kp)
{
v_kp.clear();
CHECK_MAT(mat.type()==CV_32FC(7) && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 7> v = mat.at< Vec<float, 7> >(i, 0);
KeyPoint kp(v[0], v[1], v[2], v[3], v[4], (int)v[5], (int)v[6]);
v_kp.push_back(kp);
}
return;
}
void vector_KeyPoint_to_Mat(std::vector<KeyPoint>& v_kp, Mat& mat)
{
int count = (int)v_kp.size();
mat.create(count, 1, CV_32FC(7));
for(int i=0; i<count; i++)
{
KeyPoint kp = v_kp[i];
mat.at< Vec<float, 7> >(i, 0) = Vec<float, 7>(kp.pt.x, kp.pt.y, kp.size, kp.angle, kp.response, (float)kp.octave, (float)kp.class_id);
}
}
//vector_DMatch
void Mat_to_vector_DMatch(Mat& mat, std::vector<DMatch>& v_dm)
{
v_dm.clear();
CHECK_MAT(mat.type()==CV_32FC4 && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 4> v = mat.at< Vec<float, 4> >(i, 0);
DMatch dm((int)v[0], (int)v[1], (int)v[2], v[3]);
v_dm.push_back(dm);
}
return;
}
void vector_DMatch_to_Mat(std::vector<DMatch>& v_dm, Mat& mat)
{
int count = (int)v_dm.size();
mat.create(count, 1, CV_32FC4);
for(int i=0; i<count; i++)
{
DMatch dm = v_dm[i];
mat.at< Vec<float, 4> >(i, 0) = Vec<float, 4>((float)dm.queryIdx, (float)dm.trainIdx, (float)dm.imgIdx, dm.distance);
}
}
void Mat_to_vector_vector_KeyPoint(Mat& mat, std::vector< std::vector< KeyPoint > >& vv_kp)
{
std::vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
std::vector<KeyPoint> vkp;
Mat_to_vector_KeyPoint(vm[i], vkp);
vv_kp.push_back(vkp);
}
}
void vector_vector_KeyPoint_to_Mat(std::vector< std::vector< KeyPoint > >& vv_kp, Mat& mat)
{
std::vector<Mat> vm;
vm.reserve( vv_kp.size() );
for(size_t i=0; i<vv_kp.size(); i++)
{
Mat m;
vector_KeyPoint_to_Mat(vv_kp[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void Mat_to_vector_vector_DMatch(Mat& mat, std::vector< std::vector< DMatch > >& vv_dm)
{
std::vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
std::vector<DMatch> vdm;
Mat_to_vector_DMatch(vm[i], vdm);
vv_dm.push_back(vdm);
}
}
void vector_vector_DMatch_to_Mat(std::vector< std::vector< DMatch > >& vv_dm, Mat& mat)
{
std::vector<Mat> vm;
vm.reserve( vv_dm.size() );
for(size_t i=0; i<vv_dm.size(); i++)
{
Mat m;
vector_DMatch_to_Mat(vv_dm[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}

View File

@@ -0,0 +1,21 @@
#ifndef __FEATURES2D_CONVERTERS_HPP__
#define __FEATURES2D_CONVERTERS_HPP__
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
void Mat_to_vector_KeyPoint(cv::Mat& mat, std::vector<cv::KeyPoint>& v_kp);
void vector_KeyPoint_to_Mat(std::vector<cv::KeyPoint>& v_kp, cv::Mat& mat);
void Mat_to_vector_DMatch(cv::Mat& mat, std::vector<cv::DMatch>& v_dm);
void vector_DMatch_to_Mat(std::vector<cv::DMatch>& v_dm, cv::Mat& mat);
void Mat_to_vector_vector_KeyPoint(cv::Mat& mat, std::vector< std::vector< cv::KeyPoint > >& vv_kp);
void vector_vector_KeyPoint_to_Mat(std::vector< std::vector< cv::KeyPoint > >& vv_kp, cv::Mat& mat);
void Mat_to_vector_vector_DMatch(cv::Mat& mat, std::vector< std::vector< cv::DMatch > >& vv_dm);
void vector_vector_DMatch_to_Mat(std::vector< std::vector< cv::DMatch > >& vv_dm, cv::Mat& mat);
#endif

View File

@@ -0,0 +1,102 @@
package org.opencv.test.features2d;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BRIEFDescriptorExtractorTest extends OpenCVTestCase {
Feature2D extractor;
int matSize;
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = createClassInstance(XFEATURES2D+"BriefDescriptorExtractor", DEFAULT_FACTORY, null, null);
matSize = 100;
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testComputeMatListOfKeyPointMat() {
KeyPoint point = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
MatOfKeyPoint keypoints = new MatOfKeyPoint(point);
Mat img = getTestImg();
Mat descriptors = new Mat();
extractor.compute(img, keypoints, descriptors);
Mat truth = new Mat(1, 32, CvType.CV_8UC1) {
{
put(0, 0, 96, 0, 76, 24, 47, 182, 68, 137,
149, 195, 67, 16, 187, 224, 74, 8,
82, 169, 87, 70, 44, 4, 192, 56,
13, 128, 44, 106, 146, 72, 194, 245);
}
};
assertMatEqual(truth, descriptors);
}
public void testCreate() {
assertNotNull(extractor);
}
public void testDescriptorSize() {
assertEquals(32, extractor.descriptorSize());
}
public void testDescriptorType() {
assertEquals(CvType.CV_8U, extractor.descriptorType());
}
public void testEmpty() {
// assertFalse(extractor.empty());
fail("Not yet implemented"); // BRIEF does not override empty() method
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\ndescriptorSize: 64\n");
extractor.read(filename);
assertEquals(64, extractor.descriptorSize());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
extractor.write(filename);
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<descriptorSize>32</descriptorSize>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
extractor.write(filename);
String truth = "%YAML:1.0\n---\ndescriptorSize: 32\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,304 @@
package org.opencv.test.features2d;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.BFMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(5, 2, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1);
}
};
}
private Mat getQueryDescriptors() {
Mat img = getQueryImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "nOctaveLayers", "int", 4);
setProperty(detector, "upright", "boolean", false);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getQueryImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(30, matSize / 2), new Point(matSize - 31, matSize / 2), new Scalar(100), 3);
Imgproc.line(cross, new Point(matSize / 2, 30), new Point(matSize / 2, matSize - 31), new Scalar(100), 3);
return cross;
}
private Mat getTrainDescriptors() {
Mat img = getTrainImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint(new KeyPoint(50, 50, 16, 0, 20000, 1, -1), new KeyPoint(42, 42, 16, 160, 10000, 1, -1));
Mat descriptors = new Mat();
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.6159003f),
new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.2925074f),
new DMatch(4, 1, 0, 0.26520672f)
};
}
// https://github.com/opencv/opencv/issues/11268
public void testConstructor()
{
BFMatcher self_created_matcher = new BFMatcher();
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
self_created_matcher.add(Arrays.asList(train));
assertTrue(!self_created_matcher.empty());
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
DescriptorMatcher cloned = matcher.clone();
assertNotNull(cloned);
List<Mat> descriptors = cloned.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertTrue(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
final int k = 3;
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
List<MatOfDMatch> matches = new ArrayList<MatOfDMatch>();
matcher.knnMatch(query, train, matches, k);
/*
Log.d("knnMatch", "train = " + train);
Log.d("knnMatch", "query = " + query);
matcher.add(train);
matcher.knnMatch(query, matches, k);
*/
assertEquals(query.rows(), matches.size());
for(int i = 0; i<matches.size(); i++)
{
MatOfDMatch vdm = matches.get(i);
//Log.d("knn", "vdm["+i+"]="+vdm.dump());
assertTrue(Math.min(k, train.rows()) >= vdm.total());
for(DMatch dm : vdm.toArray())
{
assertEquals(dm.queryIdx, i);
}
}
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches, Arrays.asList(mask));
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
// OpenCVTestRunner.Log("matches found: " + matches.size());
// for (DMatch m : matches)
// OpenCVTestRunner.Log(m.toString());
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\n");
matcher.read(filename);
assertTrue(true);// BruteforceMatcher has no settings
}
public void testTrain() {
matcher.train();// BruteforceMatcher does not need to train
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,262 @@
package org.opencv.test.features2d;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FastFeatureDetector;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BruteForceHammingDescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(4, 4, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1, 1, 1, 1, 1);
}
};
}
private Mat getQueryDescriptors() {
return getTestDescriptors(getQueryImg());
}
private Mat getQueryImg() {
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(img, new Point(40, matSize - 40), new Point(matSize - 50, 50), new Scalar(0), 8);
return img;
}
private Mat getTestDescriptors(Mat img) {
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = FastFeatureDetector.create();
Feature2D extractor = createClassInstance(XFEATURES2D+"BriefDescriptorExtractor", DEFAULT_FACTORY, null, null);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainDescriptors() {
return getTestDescriptors(getTrainImg());
}
private Mat getTrainImg() {
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(img, new Point(40, 40), new Point(matSize - 40, matSize - 40), new Scalar(0), 8);
return img;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 51),
new DMatch(1, 2, 0, 42),
new DMatch(2, 1, 0, 40),
new DMatch(3, 3, 0, 53) };
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
DescriptorMatcher cloned = matcher.clone();
assertNotNull(cloned);
List<Mat> descriptors = cloned.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertTrue(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches);
assertListDMatchEquals(Arrays.asList(truth), matches.toList(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches, Arrays.asList(mask));
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
assertListDMatchEquals(Arrays.asList(truth), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
ArrayList<MatOfDMatch> matches = new ArrayList<MatOfDMatch>();
matcher.radiusMatch(query, train, matches, 50.f);
assertEquals(4, matches.size());
assertTrue(matches.get(0).empty());
assertMatEqual(matches.get(1), new MatOfDMatch(truth[1]), EPS);
assertMatEqual(matches.get(2), new MatOfDMatch(truth[2]), EPS);
assertTrue(matches.get(3).empty());
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\n");
matcher.read(filename);
assertTrue(true);// BruteforceMatcher has no settings
}
public void testTrain() {
matcher.train();// BruteforceMatcher does not need to train
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,257 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FastFeatureDetector;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BruteForceHammingLUTDescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(4, 4, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1, 1, 1, 1, 1);
}
};
}
private Mat getQueryDescriptors() {
return getTestDescriptors(getQueryImg());
}
private Mat getQueryImg() {
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(img, new Point(40, matSize - 40), new Point(matSize - 50, 50), new Scalar(0), 8);
return img;
}
private Mat getTestDescriptors(Mat img) {
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = FastFeatureDetector.create();
Feature2D extractor = createClassInstance(XFEATURES2D+"BriefDescriptorExtractor", DEFAULT_FACTORY, null, null);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainDescriptors() {
return getTestDescriptors(getTrainImg());
}
private Mat getTrainImg() {
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(img, new Point(40, 40), new Point(matSize - 40, matSize - 40), new Scalar(0), 8);
return img;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 51),
new DMatch(1, 2, 0, 42),
new DMatch(2, 1, 0, 40),
new DMatch(3, 3, 0, 53) };
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
DescriptorMatcher cloned = matcher.clone();
assertNotNull(cloned);
List<Mat> descriptors = cloned.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertTrue(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches, Arrays.asList(mask));
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
/*
OpenCVTestRunner.Log("matches found: " + matches.size());
for (DMatch m : matches.toArray())
OpenCVTestRunner.Log(m.toString());
*/
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\n");
matcher.read(filename);
assertTrue(true);// BruteforceMatcher has no settings
}
public void testTrain() {
matcher.train();// BruteforceMatcher does not need to train
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,268 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BruteForceL1DescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(5, 2, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1);
}
};
}
private Mat getQueryDescriptors() {
Mat img = getQueryImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
setProperty(detector, "extended", "boolean", true);
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaveLayers", "int", 2);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "upright", "boolean", false);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getQueryImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(30, matSize / 2), new Point(matSize - 31, matSize / 2), new Scalar(100), 3);
Imgproc.line(cross, new Point(matSize / 2, 30), new Point(matSize / 2, matSize - 31), new Scalar(100), 3);
return cross;
}
private Mat getTrainDescriptors() {
Mat img = getTrainImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint(new KeyPoint(50, 50, 16, 0, 20000, 1, -1), new KeyPoint(42, 42, 16, 160, 10000, 1, -1));
Mat descriptors = new Mat();
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_L1);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 3.0710702f),
new DMatch(1, 1, 0, 3.562016f),
new DMatch(2, 1, 0, 1.3682679f),
new DMatch(3, 1, 0, 1.3012862f),
new DMatch(4, 1, 0, 1.1852086f)
};
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
DescriptorMatcher cloned = matcher.clone();
assertNotNull(cloned);
List<Mat> descriptors = cloned.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertTrue(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches, Arrays.asList(mask));
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\n");
matcher.read(filename);
assertTrue(true);// BruteforceMatcher has no settings
}
public void testTrain() {
matcher.train();// BruteforceMatcher does not need to train
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,280 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class BruteForceSL2DescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(5, 2, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1);
}
};
}
/*
private float sqr(float val){
return val * val;
}
*/
private Mat getQueryDescriptors() {
Mat img = getQueryImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "nOctaveLayers", "int", 4);
setProperty(detector, "upright", "boolean", false);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getQueryImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(30, matSize / 2), new Point(matSize - 31, matSize / 2), new Scalar(100), 3);
Imgproc.line(cross, new Point(matSize / 2, 30), new Point(matSize / 2, matSize - 31), new Scalar(100), 3);
return cross;
}
private Mat getTrainDescriptors() {
Mat img = getTrainImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint(new KeyPoint(50, 50, 16, 0, 20000, 1, -1), new KeyPoint(42, 42, 16, 160, 10000, 1, -1));
Mat descriptors = new Mat();
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_SL2);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.37933317f),
new DMatch(1, 1, 0, 0.8421953f),
new DMatch(2, 1, 0, 0.0968556f),
new DMatch(3, 1, 0, 0.0855606f),
new DMatch(4, 1, 0, 0.07033461f)
};
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
DescriptorMatcher cloned = matcher.clone();
assertNotNull(cloned);
List<Mat> descriptors = cloned.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertTrue(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches);
OpenCVTestRunner.Log(matches);
OpenCVTestRunner.Log(matches);
OpenCVTestRunner.Log(matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.match(query, matches, Arrays.asList(mask));
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
// OpenCVTestRunner.Log("matches found: " + matches.size());
// for (DMatch m : matches)
// OpenCVTestRunner.Log(m.toString());
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth[0], truth[1]), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\n");
matcher.read(filename);
assertTrue(true);// BruteforceMatcher has no settings
}
public void testTrain() {
matcher.train();// BruteforceMatcher does not need to train
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class DENSEFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,150 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.Feature2D;
import org.opencv.features2d.FastFeatureDetector;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
public class FASTFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(100, 100, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, 100, 50, 100);
right.setTo(new Scalar(0));
return mask;
}
private Mat getTestImg() {
Mat img = new Mat(100, 100, CvType.CV_8U, new Scalar(255));
Imgproc.line(img, new Point(30, 30), new Point(70, 70), new Scalar(0), 8);
return img;
}
@Override
protected void setUp() throws Exception {
super.setUp();
detector = FastFeatureDetector.create();
truth = new KeyPoint[] { new KeyPoint(32, 27, 7, -1, 254, 0, -1), new KeyPoint(27, 32, 7, -1, 254, 0, -1), new KeyPoint(73, 68, 7, -1, 254, 0, -1),
new KeyPoint(68, 73, 7, -1, 254, 0, -1) };
}
public void testCreate() {
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
Mat img = getTestImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints);
assertListKeyPointEquals(Arrays.asList(truth), keypoints.toList(), EPS);
// OpenCVTestRunner.Log("points found: " + keypoints.size());
// for (KeyPoint kp : keypoints)
// OpenCVTestRunner.Log(kp.toString());
}
public void testDetectMatListOfKeyPointMat() {
Mat img = getTestImg();
Mat mask = getMaskImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints, mask);
assertListKeyPointEquals(Arrays.asList(truth[0], truth[1]), keypoints.toList(), EPS);
}
public void testEmpty() {
// assertFalse(detector.empty());
fail("Not yet implemented"); //FAST does not override empty() method
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\nthreshold: 130\nnonmaxSuppression: 1\n");
detector.read(filename);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
detector.detect(grayChess, keypoints1);
writeFile(filename, "%YAML:1.0\n---\nthreshold: 150\nnonmaxSuppression: 1\n");
detector.read(filename);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
detector.detect(grayChess, keypoints2);
assertTrue(keypoints2.total() <= keypoints1.total());
}
public void testReadYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename,
"<?xml version=\"1.0\"?>\n<opencv_storage>\n<threshold>130</threshold>\n<nonmaxSuppression>1</nonmaxSuppression>\n</opencv_storage>\n");
detector.read(filename);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
detector.detect(grayChess, keypoints1);
writeFile(filename,
"<?xml version=\"1.0\"?>\n<opencv_storage>\n<threshold>150</threshold>\n<nonmaxSuppression>1</nonmaxSuppression>\n</opencv_storage>\n");
detector.read(filename);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
detector.detect(grayChess, keypoints2);
assertTrue(keypoints2.total() <= keypoints1.total());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
detector.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.FAST</name>\n<nonmaxSuppression>1</nonmaxSuppression>\n<threshold>10</threshold>\n<type>2</type>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
String data = readFile(filename);
//Log.d("qqq", "\"" + data + "\"");
assertEquals(truth, data);
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
detector.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.FAST\"\nnonmaxSuppression: 1\nthreshold: 10\ntype: 2\n";
String truth = "%YAML:1.0\n---\n";
String data = readFile(filename);
//Log.d("qqq", "\"" + data + "\"");
assertEquals(truth, data);
}
}

View File

@@ -0,0 +1,172 @@
package org.opencv.test.features2d;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfInt;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Range;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.Features2d;
import org.opencv.core.KeyPoint;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.features2d.Feature2D;
public class Features2dTest extends OpenCVTestCase {
public void testDrawKeypointsMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testDrawKeypointsMatListOfKeyPointMatScalar() {
fail("Not yet implemented");
}
public void testDrawKeypointsMatListOfKeyPointMatScalarInt() {
fail("Not yet implemented");
}
public void testDrawMatches2MatListOfKeyPointMatListOfKeyPointListOfListOfDMatchMat() {
fail("Not yet implemented");
}
public void testDrawMatches2MatListOfKeyPointMatListOfKeyPointListOfListOfDMatchMatScalar() {
fail("Not yet implemented");
}
public void testDrawMatches2MatListOfKeyPointMatListOfKeyPointListOfListOfDMatchMatScalarScalar() {
fail("Not yet implemented");
}
public void testDrawMatches2MatListOfKeyPointMatListOfKeyPointListOfListOfDMatchMatScalarScalarListOfListOfByte() {
fail("Not yet implemented");
}
public void testDrawMatches2MatListOfKeyPointMatListOfKeyPointListOfListOfDMatchMatScalarScalarListOfListOfByteInt() {
fail("Not yet implemented");
}
public void testDrawMatchesMatListOfKeyPointMatListOfKeyPointListOfDMatchMat() {
fail("Not yet implemented");
}
public void testDrawMatchesMatListOfKeyPointMatListOfKeyPointListOfDMatchMatScalar() {
fail("Not yet implemented");
}
public void testDrawMatchesMatListOfKeyPointMatListOfKeyPointListOfDMatchMatScalarScalar() {
fail("Not yet implemented");
}
public void testDrawMatchesMatListOfKeyPointMatListOfKeyPointListOfDMatchMatScalarScalarListOfByte() {
fail("Not yet implemented");
}
public void testDrawMatchesMatListOfKeyPointMatListOfKeyPointListOfDMatchMatScalarScalarListOfByteInt() {
fail("Not yet implemented");
}
public void testPTOD()
{
String detectorCfg = "%YAML:1.0\n---\nhessianThreshold: 4000.\noctaves: 3\noctaveLayers: 4\nupright: 0\n";
String extractorCfg = "%YAML:1.0\n---\nnOctaves: 4\nnOctaveLayers: 2\nextended: 0\nupright: 0\n";
Feature2D detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
String detectorCfgFile = OpenCVTestRunner.getTempFileName("yml");
writeFile(detectorCfgFile, detectorCfg);
detector.read(detectorCfgFile);
String extractorCfgFile = OpenCVTestRunner.getTempFileName("yml");
writeFile(extractorCfgFile, extractorCfg);
extractor.read(extractorCfgFile);
Mat imgTrain = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.IMREAD_GRAYSCALE);
Mat imgQuery = imgTrain.submat(new Range(0, imgTrain.rows() - 100), Range.all());
MatOfKeyPoint trainKeypoints = new MatOfKeyPoint();
MatOfKeyPoint queryKeypoints = new MatOfKeyPoint();
detector.detect(imgTrain, trainKeypoints);
detector.detect(imgQuery, queryKeypoints);
// OpenCVTestRunner.Log("Keypoints found: " + trainKeypoints.size() +
// ":" + queryKeypoints.size());
Mat trainDescriptors = new Mat();
Mat queryDescriptors = new Mat();
extractor.compute(imgTrain, trainKeypoints, trainDescriptors);
extractor.compute(imgQuery, queryKeypoints, queryDescriptors);
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(trainDescriptors));
matcher.match(queryDescriptors, matches);
// OpenCVTestRunner.Log("Matches found: " + matches.size());
DMatch adm[] = matches.toArray();
List<Point> lp1 = new ArrayList<Point>(adm.length);
List<Point> lp2 = new ArrayList<Point>(adm.length);
KeyPoint tkp[] = trainKeypoints.toArray();
KeyPoint qkp[] = queryKeypoints.toArray();
for (int i = 0; i < adm.length; i++) {
DMatch dm = adm[i];
lp1.add(tkp[dm.trainIdx].pt);
lp2.add(qkp[dm.queryIdx].pt);
}
MatOfPoint2f points1 = new MatOfPoint2f(lp1.toArray(new Point[0]));
MatOfPoint2f points2 = new MatOfPoint2f(lp2.toArray(new Point[0]));
Mat hmg = Calib3d.findHomography(points1, points2, Calib3d.RANSAC, 3);
assertMatEqual(Mat.eye(3, 3, CvType.CV_64F), hmg, EPS);
Mat outimg = new Mat();
Features2d.drawMatches(imgQuery, queryKeypoints, imgTrain, trainKeypoints, matches, outimg);
String outputPath = OpenCVTestRunner.getOutputFileName("PTODresult.png");
Imgcodecs.imwrite(outputPath, outimg);
// OpenCVTestRunner.Log("Output image is saved to: " + outputPath);
}
public void testDrawKeypoints()
{
Mat outImg = Mat.ones(11, 11, CvType.CV_8U);
MatOfKeyPoint kps = new MatOfKeyPoint(new KeyPoint(5, 5, 1)); // x, y, size
Features2d.drawKeypoints(new Mat(), kps, outImg, new Scalar(255),
Features2d.DrawMatchesFlags_DRAW_OVER_OUTIMG);
Mat ref = new MatOfInt(new int[] {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 15, 54, 15, 1, 1, 1, 1,
1, 1, 1, 76, 217, 217, 221, 81, 1, 1, 1,
1, 1, 100, 224, 111, 57, 115, 225, 101, 1, 1,
1, 44, 215, 100, 1, 1, 1, 101, 214, 44, 1,
1, 54, 212, 57, 1, 1, 1, 55, 212, 55, 1,
1, 40, 215, 104, 1, 1, 1, 105, 215, 40, 1,
1, 1, 102, 221, 111, 55, 115, 222, 103, 1, 1,
1, 1, 1, 76, 218, 217, 220, 81, 1, 1, 1,
1, 1, 1, 1, 15, 55, 15, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
}).reshape(1, 11);
ref.convertTo(ref, CvType.CV_8U);
assertMatEqual(ref, outImg);
}
}

View File

@@ -0,0 +1,389 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import java.util.List;
import org.opencv.core.CvException;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FlannBasedMatcher;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
static final String xmlParamsDefault = "<?xml version=\"1.0\"?>\n"
+ "<opencv_storage>\n"
+ "<format>3</format>\n"
+ "<indexParams>\n"
+ " <_>\n"
+ " <name>algorithm</name>\n"
+ " <type>9</type>\n" // FLANN_INDEX_TYPE_ALGORITHM
+ " <value>1</value></_>\n"
+ " <_>\n"
+ " <name>trees</name>\n"
+ " <type>4</type>\n"
+ " <value>4</value></_></indexParams>\n"
+ "<searchParams>\n"
+ " <_>\n"
+ " <name>checks</name>\n"
+ " <type>4</type>\n"
+ " <value>32</value></_>\n"
+ " <_>\n"
+ " <name>eps</name>\n"
+ " <type>5</type>\n"
+ " <value>0.</value></_>\n"
+ " <_>\n"
+ " <name>explore_all_trees</name>\n"
+ " <type>8</type>\n"
+ " <value>0</value></_>\n"
+ " <_>\n"
+ " <name>sorted</name>\n"
+ " <type>8</type>\n" // FLANN_INDEX_TYPE_BOOL
+ " <value>1</value></_></searchParams>\n"
+ "</opencv_storage>\n";
static final String ymlParamsDefault = "%YAML:1.0\n---\n"
+ "format: 3\n"
+ "indexParams:\n"
+ " -\n"
+ " name: algorithm\n"
+ " type: 9\n" // FLANN_INDEX_TYPE_ALGORITHM
+ " value: 1\n"
+ " -\n"
+ " name: trees\n"
+ " type: 4\n"
+ " value: 4\n"
+ "searchParams:\n"
+ " -\n"
+ " name: checks\n"
+ " type: 4\n"
+ " value: 32\n"
+ " -\n"
+ " name: eps\n"
+ " type: 5\n"
+ " value: 0.\n"
+ " -\n"
+ " name: explore_all_trees\n"
+ " type: 8\n"
+ " value: 0\n"
+ " -\n"
+ " name: sorted\n"
+ " type: 8\n" // FLANN_INDEX_TYPE_BOOL
+ " value: 1\n";
static final String ymlParamsModified = "%YAML:1.0\n---\n"
+ "format: 3\n"
+ "indexParams:\n"
+ " -\n"
+ " name: algorithm\n"
+ " type: 9\n" // FLANN_INDEX_TYPE_ALGORITHM
+ " value: 6\n"// this line is changed!
+ " -\n"
+ " name: trees\n"
+ " type: 4\n"
+ " value: 4\n"
+ "searchParams:\n"
+ " -\n"
+ " name: checks\n"
+ " type: 4\n"
+ " value: 32\n"
+ " -\n"
+ " name: eps\n"
+ " type: 5\n"
+ " value: 4.\n"// this line is changed!
+ " -\n"
+ " name: explore_all_trees\n"
+ " type: 8\n"
+ " value: 1\n"// this line is changed!
+ " -\n"
+ " name: sorted\n"
+ " type: 8\n" // FLANN_INDEX_TYPE_BOOL
+ " value: 1\n";
DescriptorMatcher matcher;
int matSize;
DMatch[] truth;
private Mat getMaskImg() {
return new Mat(5, 2, CvType.CV_8U, new Scalar(0)) {
{
put(0, 0, 1, 1, 1, 1);
}
};
}
private Mat getQueryDescriptors() {
Mat img = getQueryImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
Feature2D detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "upright", "boolean", false);
detector.detect(img, keypoints);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getQueryImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(30, matSize / 2), new Point(matSize - 31, matSize / 2), new Scalar(100), 3);
Imgproc.line(cross, new Point(matSize / 2, 30), new Point(matSize / 2, matSize - 31), new Scalar(100), 3);
return cross;
}
private Mat getTrainDescriptors() {
Mat img = getTrainImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint(new KeyPoint(50, 50, 16, 0, 20000, 1, -1), new KeyPoint(42, 42, 16, 160, 10000, 1, -1));
Mat descriptors = new Mat();
Feature2D extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
extractor.compute(img, keypoints, descriptors);
return descriptors;
}
private Mat getTrainImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
protected void setUp() throws Exception {
super.setUp();
matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.6159003f),
new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.2925075f),
new DMatch(4, 1, 0, 0.26520672f)
};
}
// https://github.com/opencv/opencv/issues/11268
public void testConstructor()
{
FlannBasedMatcher self_created_matcher = new FlannBasedMatcher();
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
self_created_matcher.add(Arrays.asList(train));
assertTrue(!self_created_matcher.empty());
}
public void testAdd() {
matcher.add(Arrays.asList(new Mat()));
assertFalse(matcher.empty());
}
public void testClear() {
matcher.add(Arrays.asList(new Mat()));
matcher.clear();
assertTrue(matcher.empty());
}
public void testClone() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
matcher.add(Arrays.asList(train));
try {
matcher.clone();
fail("Expected CvException (CV_StsNotImplemented)");
} catch (CvException cverr) {
// expected
}
}
public void testCloneBoolean() {
matcher.add(Arrays.asList(new Mat()));
DescriptorMatcher cloned = matcher.clone(true);
assertNotNull(cloned);
assertTrue(cloned.empty());
}
public void testCreate() {
assertNotNull(matcher);
}
public void testEmpty() {
assertTrue(matcher.empty());
}
public void testGetTrainDescriptors() {
Mat train = new Mat(1, 1, CvType.CV_8U, new Scalar(123));
Mat truth = train.clone();
matcher.add(Arrays.asList(train));
List<Mat> descriptors = matcher.getTrainDescriptors();
assertEquals(1, descriptors.size());
assertMatEqual(truth, descriptors.get(0));
}
public void testIsMaskSupported() {
assertFalse(matcher.isMaskSupported());
}
public void testKnnMatchMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatListOfListOfDMatchIntListOfMatBoolean() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchInt() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMat() {
fail("Not yet implemented");
}
public void testKnnMatchMatMatListOfListOfDMatchIntMatBoolean() {
fail("Not yet implemented");
}
public void testMatchMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.train();
matcher.match(query, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatListOfDMatchListOfMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.add(Arrays.asList(train));
matcher.train();
matcher.match(query, matches, Arrays.asList(mask));
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
public void testMatchMatMatListOfDMatch() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches);
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
// OpenCVTestRunner.Log(matches.toString());
// OpenCVTestRunner.Log(matches);
}
public void testMatchMatMatListOfDMatchMat() {
Mat train = getTrainDescriptors();
Mat query = getQueryDescriptors();
Mat mask = getMaskImg();
MatOfDMatch matches = new MatOfDMatch();
matcher.match(query, train, matches, mask);
assertListDMatchEquals(Arrays.asList(truth), matches.toList(), EPS);
}
public void testRadiusMatchMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatListOfListOfDMatchFloatListOfMatBoolean() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMat() {
fail("Not yet implemented");
}
public void testRadiusMatchMatMatListOfListOfDMatchFloatMatBoolean() {
fail("Not yet implemented");
}
public void testRead() {
String filenameR = OpenCVTestRunner.getTempFileName("yml");
String filenameW = OpenCVTestRunner.getTempFileName("yml");
writeFile(filenameR, ymlParamsModified);
matcher.read(filenameR);
matcher.write(filenameW);
assertEquals(ymlParamsModified, readFile(filenameW));
}
public void testTrain() {
Mat train = getTrainDescriptors();
matcher.add(Arrays.asList(train));
matcher.train();
}
public void testTrainNoData() {
try {
matcher.train();
fail("Expected CvException - FlannBasedMatcher::train should fail on empty train set");
} catch (CvException cverr) {
// expected
}
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
matcher.write(filename);
assertEquals(xmlParamsDefault, readFile(filename));
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
matcher.write(filename);
assertEquals(ymlParamsDefault, readFile(filename));
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class GFTTFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class HARRISFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class MSERFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,124 @@
package org.opencv.test.features2d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.features2d.ORB;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
public class ORBDescriptorExtractorTest extends OpenCVTestCase {
ORB extractor;
int matSize;
public static void assertDescriptorsClose(Mat expected, Mat actual, int allowedDistance) {
double distance = Core.norm(expected, actual, Core.NORM_HAMMING);
assertTrue("expected:<" + allowedDistance + "> but was:<" + distance + ">", distance <= allowedDistance);
}
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = ORB.create();
matSize = 100;
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testComputeMatListOfKeyPointMat() {
KeyPoint point = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
MatOfKeyPoint keypoints = new MatOfKeyPoint(point);
Mat img = getTestImg();
Mat descriptors = new Mat();
extractor.compute(img, keypoints, descriptors);
Mat truth = new Mat(1, 32, CvType.CV_8UC1) {
{
put(0, 0,
6, 74, 6, 129, 2, 130, 56, 0, 44, 132, 66, 165, 172, 6, 3, 72, 102, 61, 171, 214, 0, 144, 65, 232, 4, 32, 138, 131, 4, 21, 37, 217);
}
};
assertDescriptorsClose(truth, descriptors, 1);
}
public void testCreate() {
assertNotNull(extractor);
}
public void testDescriptorSize() {
assertEquals(32, extractor.descriptorSize());
}
public void testDescriptorType() {
assertEquals(CvType.CV_8U, extractor.descriptorType());
}
public void testEmpty() {
// assertFalse(extractor.empty());
fail("Not yet implemented"); // ORB does not override empty() method
}
public void testRead() {
KeyPoint point = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
MatOfKeyPoint keypoints = new MatOfKeyPoint(point);
Mat img = getTestImg();
Mat descriptors = new Mat();
// String filename = OpenCVTestRunner.getTempFileName("yml");
// writeFile(filename, "%YAML:1.0\n---\nscaleFactor: 1.1\nnLevels: 3\nfirstLevel: 0\nedgeThreshold: 31\npatchSize: 31\n");
// extractor.read(filename);
extractor = ORB.create(500, 1.1f, 3, 31, 0, 2, ORB.HARRIS_SCORE, 31, 20);
extractor.compute(img, keypoints, descriptors);
Mat truth = new Mat(1, 32, CvType.CV_8UC1) {
{
put(0, 0,
6, 10, 22, 5, 2, 130, 56, 0, 44, 164, 66, 165, 140, 6, 1, 72, 38, 61, 163, 210, 0, 208, 1, 104, 4, 32, 74, 131, 0, 37, 37, 67);
}
};
assertDescriptorsClose(truth, descriptors, 1);
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
extractor.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.ORB</name>\n<WTA_K>2</WTA_K>\n<edgeThreshold>31</edgeThreshold>\n<firstLevel>0</firstLevel>\n<nFeatures>500</nFeatures>\n<nLevels>8</nLevels>\n<patchSize>31</patchSize>\n<scaleFactor>1.2000000476837158e+00</scaleFactor>\n<scoreType>0</scoreType>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
String actual = readFile(filename);
actual = actual.replaceAll("e\\+000", "e+00"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
extractor.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.ORB\"\nWTA_K: 2\nedgeThreshold: 31\nfirstLevel: 0\nnFeatures: 500\nnLevels: 8\npatchSize: 31\nscaleFactor: 1.2000000476837158e+00\nscoreType: 0\n";
String truth = "%YAML:1.0\n---\n";
String actual = readFile(filename);
actual = actual.replaceAll("e\\+000", "e+00"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class ORBFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,110 @@
package org.opencv.test.features2d;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.features2d.SIFT;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class SIFTDescriptorExtractorTest extends OpenCVTestCase {
Feature2D extractor;
KeyPoint keypoint;
int matSize;
Mat truth;
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
@Override
protected void setUp() throws Exception {
super.setUp();
extractor = SIFT.create();
keypoint = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
matSize = 100;
truth = new Mat(1, 128, CvType.CV_32FC1) {
{
put(0, 0,
0, 0, 0, 1, 3, 0, 0, 0, 15, 23, 22, 20, 24, 2, 0, 0, 7, 8, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 16, 13, 2, 0, 0, 117,
86, 79, 68, 117, 42, 5, 5, 79, 60, 117, 25, 9, 2, 28, 19, 11, 13,
20, 2, 0, 0, 5, 8, 0, 0, 76, 58, 34, 31, 97, 16, 95, 49, 117, 92,
117, 112, 117, 76, 117, 54, 117, 25, 29, 22, 117, 117, 16, 11, 14,
1, 0, 0, 22, 26, 0, 0, 0, 0, 1, 4, 15, 2, 47, 8, 0, 0, 82, 56, 31,
17, 81, 12, 0, 0, 26, 23, 18, 23, 0, 0, 0, 0, 0, 0, 0, 0
);
}
};
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testComputeMatListOfKeyPointMat() {
MatOfKeyPoint keypoints = new MatOfKeyPoint(keypoint);
Mat img = getTestImg();
Mat descriptors = new Mat();
extractor.compute(img, keypoints, descriptors);
assertMatEqual(truth, descriptors, EPS);
}
public void testCreate() {
assertNotNull(extractor);
}
public void testDescriptorSize() {
assertEquals(128, extractor.descriptorSize());
}
public void testDescriptorType() {
assertEquals(CvType.CV_32F, extractor.descriptorType());
}
public void testEmpty() {
// assertFalse(extractor.empty());
fail("Not yet implemented"); //SIFT does not override empty() method
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
extractor.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SIFT</name>\n<contrastThreshold>4.0000000000000001e-02</contrastThreshold>\n<edgeThreshold>10.</edgeThreshold>\n<nFeatures>0</nFeatures>\n<nOctaveLayers>3</nOctaveLayers>\n<sigma>1.6000000000000001e+00</sigma>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
String actual = readFile(filename);
actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
extractor.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.SIFT\"\ncontrastThreshold: 4.0000000000000001e-02\nedgeThreshold: 10.\nnFeatures: 0\nnOctaveLayers: 3\nsigma: 1.6000000000000001e+00\n";
String truth = "%YAML:1.0\n---\n";
String actual = readFile(filename);
actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation
assertEquals(truth, actual);
}
}

View File

@@ -0,0 +1,39 @@
package org.opencv.test.features2d;
import org.opencv.test.OpenCVTestCase;
public class SIFTFeatureDetectorTest extends OpenCVTestCase {
public void testCreate() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPointMat() {
fail("Not yet implemented");
}
public void testEmpty() {
fail("Not yet implemented");
}
public void testRead() {
fail("Not yet implemented");
}
public void testWrite() {
fail("Not yet implemented");
}
}

View File

@@ -0,0 +1,118 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
import org.opencv.features2d.SimpleBlobDetector;
public class SIMPLEBLOBFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, matSize, matSize / 2, matSize);
right.setTo(new Scalar(0));
return mask;
}
private Mat getTestImg() {
int center = matSize / 2;
int offset = 40;
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.circle(img, new Point(center - offset, center), 24, new Scalar(0), -1);
Imgproc.circle(img, new Point(center + offset, center), 20, new Scalar(50), -1);
Imgproc.circle(img, new Point(center, center - offset), 18, new Scalar(100), -1);
Imgproc.circle(img, new Point(center, center + offset), 14, new Scalar(150), -1);
Imgproc.circle(img, new Point(center, center), 10, new Scalar(200), -1);
return img;
}
@Override
protected void setUp() throws Exception {
super.setUp();
detector = SimpleBlobDetector.create();
matSize = 200;
truth = new KeyPoint[] {
new KeyPoint( 140, 100, 41.036568f, -1, 0, 0, -1),
new KeyPoint( 60, 100, 48.538486f, -1, 0, 0, -1),
new KeyPoint(100, 60, 36.769554f, -1, 0, 0, -1),
new KeyPoint(100, 140, 28.635643f, -1, 0, 0, -1),
new KeyPoint(100, 100, 20.880613f, -1, 0, 0, -1)
};
}
public void testCreate() {
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
Mat img = getTestImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints);
assertListKeyPointEquals(Arrays.asList(truth), keypoints.toList(), EPS);
}
public void testDetectMatListOfKeyPointMat() {
Mat img = getTestImg();
Mat mask = getMaskImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints, mask);
assertListKeyPointEquals(Arrays.asList(truth[1]), keypoints.toList(), EPS);
}
public void testEmpty() {
// assertFalse(detector.empty());
fail("Not yet implemented");
}
public void testRead() {
Mat img = getTestImg();
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
detector.detect(img, keypoints1);
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\nthresholdStep: 10\nminThreshold: 50\nmaxThreshold: 220\nminRepeatability: 2\nfilterByArea: true\nminArea: 800\nmaxArea: 5000\n");
detector.read(filename);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
detector.detect(img, keypoints2);
assertTrue(keypoints2.total() <= keypoints1.total());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
detector.write(filename);
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<format>3</format>\n<thresholdStep>10.</thresholdStep>\n<minThreshold>50.</minThreshold>\n<maxThreshold>220.</maxThreshold>\n<minRepeatability>2</minRepeatability>\n<minDistBetweenBlobs>10.</minDistBetweenBlobs>\n<filterByColor>1</filterByColor>\n<blobColor>0</blobColor>\n<filterByArea>1</filterByArea>\n<minArea>25.</minArea>\n<maxArea>5000.</maxArea>\n<filterByCircularity>0</filterByCircularity>\n<minCircularity>8.0000001192092896e-01</minCircularity>\n<maxCircularity>3.4028234663852886e+38</maxCircularity>\n<filterByInertia>1</filterByInertia>\n<minInertiaRatio>1.0000000149011612e-01</minInertiaRatio>\n<maxInertiaRatio>3.4028234663852886e+38</maxInertiaRatio>\n<filterByConvexity>1</filterByConvexity>\n<minConvexity>9.4999998807907104e-01</minConvexity>\n<maxConvexity>3.4028234663852886e+38</maxConvexity>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,133 @@
package org.opencv.test.features2d;
import java.util.Arrays;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class STARFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, matSize, matSize / 2, matSize);
right.setTo(new Scalar(0));
return mask;
}
private Mat getTestImg() {
Scalar color = new Scalar(0);
int center = matSize / 2;
int radius = 6;
int offset = 40;
Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.circle(img, new Point(center - offset, center), radius, color, -1);
Imgproc.circle(img, new Point(center + offset, center), radius, color, -1);
Imgproc.circle(img, new Point(center, center - offset), radius, color, -1);
Imgproc.circle(img, new Point(center, center + offset), radius, color, -1);
Imgproc.circle(img, new Point(center, center), radius, color, -1);
return img;
}
protected void setUp() throws Exception {
super.setUp();
detector = createClassInstance(XFEATURES2D+"StarDetector", DEFAULT_FACTORY, null, null);
matSize = 200;
truth = new KeyPoint[] {
new KeyPoint( 95, 80, 22, -1, 31.5957f, 0, -1),
new KeyPoint(105, 80, 22, -1, 31.5957f, 0, -1),
new KeyPoint( 80, 95, 22, -1, 31.5957f, 0, -1),
new KeyPoint(120, 95, 22, -1, 31.5957f, 0, -1),
new KeyPoint(100, 100, 8, -1, 30.f, 0, -1),
new KeyPoint( 80, 105, 22, -1, 31.5957f, 0, -1),
new KeyPoint(120, 105, 22, -1, 31.5957f, 0, -1),
new KeyPoint( 95, 120, 22, -1, 31.5957f, 0, -1),
new KeyPoint(105, 120, 22, -1, 31.5957f, 0, -1)
};
}
public void testCreate() {
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
Mat img = getTestImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints);
assertListKeyPointEquals(Arrays.asList(truth), keypoints.toList(), EPS);
}
public void testDetectMatListOfKeyPointMat() {
Mat img = getTestImg();
Mat mask = getMaskImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints, mask);
assertListKeyPointEquals(Arrays.asList(truth[0], truth[2], truth[5], truth[7]), keypoints.toList(), EPS);
}
public void testEmpty() {
// assertFalse(detector.empty());
fail("Not yet implemented");
}
public void testRead() {
Mat img = getTestImg();
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
detector.detect(img, keypoints1);
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\nmaxSize: 45\nresponseThreshold: 150\nlineThresholdProjected: 10\nlineThresholdBinarized: 8\nsuppressNonmaxSize: 5\n");
detector.read(filename);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
detector.detect(img, keypoints2);
assertTrue(keypoints2.total() <= keypoints1.total());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
detector.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.STAR</name>\n<lineThresholdBinarized>8</lineThresholdBinarized>\n<lineThresholdProjected>10</lineThresholdProjected>\n<maxSize>45</maxSize>\n<responseThreshold>30</responseThreshold>\n<suppressNonmaxSize>5</suppressNonmaxSize>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
detector.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.STAR\"\nlineThresholdBinarized: 8\nlineThresholdProjected: 10\nmaxSize: 45\nresponseThreshold: 30\nsuppressNonmaxSize: 5\n";
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,119 @@
package org.opencv.test.features2d;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class SURFDescriptorExtractorTest extends OpenCVTestCase {
Feature2D extractor;
int matSize;
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
@Override
protected void setUp() throws Exception {
super.setUp();
Class[] cParams = {double.class, int.class, int.class, boolean.class, boolean.class};
Object[] oValues = {100, 2, 4, true, false};
extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, cParams, oValues);
matSize = 100;
}
public void testComputeListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testComputeMatListOfKeyPointMat() {
KeyPoint point = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1);
MatOfKeyPoint keypoints = new MatOfKeyPoint(point);
Mat img = getTestImg();
Mat descriptors = new Mat();
extractor.compute(img, keypoints, descriptors);
Mat truth = new Mat(1, 128, CvType.CV_32FC1) {
{
put(0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.058821894, 0.058821894, -0.045962855, 0.046261817, 0.0085156476,
0.0085754395, -0.0064509804, 0.0064509804, 0.00044069235, 0.00044069235, 0, 0, 0.00025723741,
0.00025723741, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00025723741, 0.00025723741, -0.00044069235,
0.00044069235, 0, 0, 0.36278215, 0.36278215, -0.24688604, 0.26173124, 0.052068226, 0.052662034,
-0.032815345, 0.032815345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0064523756,
0.0064523756, 0.0082002236, 0.0088908644, -0.059001274, 0.059001274, 0.045789491, 0.04648013,
0.11961588, 0.22789426, -0.01322381, 0.18291828, -0.14042182, 0.23973691, 0.073782086, 0.23769434,
-0.027880307, 0.027880307, 0.049587864, 0.049587864, -0.33991757, 0.33991757, 0.21437603, 0.21437603,
-0.0020763327, 0.0020763327, 0.006245892, 0.006245892, -0.04067041, 0.04067041, 0.019361559,
0.019361559, 0, 0, -0.0035977389, 0.0035977389, 0, 0, -0.00099993451, 0.00099993451, 0.040670406,
0.040670406, -0.019361559, 0.019361559, 0.006245892, 0.006245892, -0.0020763327, 0.0020763327,
-0.00034532088, 0.00034532088, 0, 0, 0, 0, 0.00034532088, 0.00034532088, -0.00099993451,
0.00099993451, 0, 0, 0, 0, 0.0035977389, 0.0035977389
);
}
};
assertMatEqual(truth, descriptors, EPS);
}
public void testCreate() {
assertNotNull(extractor);
}
public void testDescriptorSize() {
assertEquals(128, extractor.descriptorSize());
}
public void testDescriptorType() {
assertEquals(CvType.CV_32F, extractor.descriptorType());
}
public void testEmpty() {
// assertFalse(extractor.empty());
fail("Not yet implemented");
}
public void testRead() {
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\nnOctaves: 4\nnOctaveLayers: 2\nextended: 1\nupright: 0\n");
extractor.read(filename);
assertEquals(128, extractor.descriptorSize());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
extractor.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SURF</name>\n<extended>1</extended>\n<hessianThreshold>100.</hessianThreshold>\n<nOctaveLayers>2</nOctaveLayers>\n<nOctaves>4</nOctaves>\n<upright>0</upright>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
extractor.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.SURF\"\nextended: 1\nhessianThreshold: 100.\nnOctaveLayers: 2\nnOctaves: 4\nupright: 0\n";
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,175 @@
package org.opencv.test.features2d;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.Feature2D;
public class SURFFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, matSize, matSize / 2, matSize);
right.setTo(new Scalar(0));
return mask;
}
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2);
return cross;
}
private void order(List<KeyPoint> points) {
Collections.sort(points, new Comparator<KeyPoint>() {
public int compare(KeyPoint p1, KeyPoint p2) {
if (p1.angle < p2.angle)
return -1;
if (p1.angle > p2.angle)
return 1;
return 0;
}
});
}
@Override
protected void setUp() throws Exception {
super.setUp();
detector = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, null, null);
matSize = 100;
truth = new KeyPoint[] {
new KeyPoint(55.775578f, 55.775578f, 16, 80.245735f, 8617.8633f, 0, -1),
new KeyPoint(44.224422f, 55.775578f, 16, 170.24574f, 8617.8633f, 0, -1),
new KeyPoint(44.224422f, 44.224422f, 16, 260.24573f, 8617.8633f, 0, -1),
new KeyPoint(55.775578f, 44.224422f, 16, 350.24573f, 8617.8633f, 0, -1)
};
}
public void testCreate() {
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "nOctaveLayers", "int", 4);
setProperty(detector, "upright", "boolean", false);
List<MatOfKeyPoint> keypoints = new ArrayList<MatOfKeyPoint>();
Mat cross = getTestImg();
List<Mat> crosses = new ArrayList<Mat>(3);
crosses.add(cross);
crosses.add(cross);
crosses.add(cross);
detector.detect(crosses, keypoints);
assertEquals(3, keypoints.size());
for (MatOfKeyPoint mkp : keypoints) {
List<KeyPoint> lkp = mkp.toList();
order(lkp);
assertListKeyPointEquals(Arrays.asList(truth), lkp, EPS);
}
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "nOctaveLayers", "int", 4);
setProperty(detector, "upright", "boolean", false);
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat cross = getTestImg();
detector.detect(cross, keypoints);
List<KeyPoint> lkp = keypoints.toList();
order(lkp);
assertListKeyPointEquals(Arrays.asList(truth), lkp, EPS);
}
public void testDetectMatListOfKeyPointMat() {
setProperty(detector, "hessianThreshold", "double", 8000);
setProperty(detector, "nOctaves", "int", 3);
setProperty(detector, "nOctaveLayers", "int", 4);
setProperty(detector, "upright", "boolean", false);
Mat img = getTestImg();
Mat mask = getMaskImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints, mask);
List<KeyPoint> lkp = keypoints.toList();
order(lkp);
assertListKeyPointEquals(Arrays.asList(truth[1], truth[2]), lkp, EPS);
}
public void testEmpty() {
// assertFalse(detector.empty());
fail("Not yet implemented");
}
public void testRead() {
Mat cross = getTestImg();
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
detector.detect(cross, keypoints1);
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\n---\nhessianThreshold: 8000.\noctaves: 3\noctaveLayers: 4\nupright: 0\n");
detector.read(filename);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
detector.detect(cross, keypoints2);
assertTrue(keypoints2.total() <= keypoints1.total());
}
public void testWrite() {
String filename = OpenCVTestRunner.getTempFileName("xml");
detector.write(filename);
// String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SURF</name>\n<extended>0</extended>\n<hessianThreshold>100.</hessianThreshold>\n<nOctaveLayers>3</nOctaveLayers>\n<nOctaves>4</nOctaves>\n<upright>0</upright>\n</opencv_storage>\n";
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename));
}
public void testWriteYml() {
String filename = OpenCVTestRunner.getTempFileName("yml");
detector.write(filename);
// String truth = "%YAML:1.0\n---\nname: \"Feature2D.SURF\"\nextended: 0\nhessianThreshold: 100.\nnOctaveLayers: 3\nnOctaves: 4\nupright: 0\n";
String truth = "%YAML:1.0\n---\n";
assertEquals(truth, readFile(filename));
}
}

View File

@@ -0,0 +1,23 @@
{
"enum_fix" : {
"FastFeatureDetector" : { "DetectorType": "FastDetectorType" },
"AgastFeatureDetector" : { "DetectorType": "AgastDetectorType" }
},
"func_arg_fix" : {
"Feature2D": {
"(void)compute:(NSArray<Mat*>*)images keypoints:(NSMutableArray<NSMutableArray<KeyPoint*>*>*)keypoints descriptors:(NSMutableArray<Mat*>*)descriptors" : { "compute" : {"name" : "compute2"} },
"(void)detect:(NSArray<Mat*>*)images keypoints:(NSMutableArray<NSMutableArray<KeyPoint*>*>*)keypoints masks:(NSArray<Mat*>*)masks" : { "detect" : {"name" : "detect2"} }
},
"DescriptorMatcher": {
"(DescriptorMatcher*)create:(NSString*)descriptorMatcherType" : { "create" : {"name" : "create2"} }
},
"FlannBasedMatcher": {
"FlannBasedMatcher": { "indexParams" : {"defval" : "cv::makePtr<cv::flann::KDTreeIndexParams>()"}, "searchParams" : {"defval" : "cv::makePtr<cv::flann::SearchParams>()"} }
},
"BFMatcher": {
"BFMatcher" : { "normType" : {"ctype" : "NormTypes"} },
"(BFMatcher*)create:(int)normType crossCheck:(BOOL)crossCheck" : { "create" : {"name" : "createBFMatcher"},
"normType" : {"ctype" : "NormTypes"} }
}
}
}

View File

@@ -0,0 +1,9 @@
#ifdef HAVE_OPENCV_FEATURES2D
typedef SimpleBlobDetector::Params SimpleBlobDetector_Params;
typedef AKAZE::DescriptorType AKAZE_DescriptorType;
typedef AgastFeatureDetector::DetectorType AgastFeatureDetector_DetectorType;
typedef FastFeatureDetector::DetectorType FastFeatureDetector_DetectorType;
typedef DescriptorMatcher::MatcherType DescriptorMatcher_MatcherType;
typedef KAZE::DiffusivityType KAZE_DiffusivityType;
typedef ORB::ScoreType ORB_ScoreType;
#endif

View File

@@ -0,0 +1,164 @@
#!/usr/bin/env python
'''
Feature homography
==================
Example of using features2d framework for interactive video homography matching.
ORB features and FLANN matcher are used. The actual tracking is implemented by
PlaneTracker class in plane_tracker.py
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
# local modules
from tst_scene_render import TestSceneRender
def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, _intersection = cv.intersectConvexConvex(s1, np.array(s2))
return 2 * area / (cv.contourArea(s1) + cv.contourArea(np.array(s2)))
from tests_common import NewOpenCVTests
class feature_homography_test(NewOpenCVTests):
render = None
tracker = None
framesCounter = 0
frame = None
def test_feature_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5)
self.frame = self.render.getNextFrame()
self.tracker = PlaneTracker()
self.tracker.clear()
self.tracker.add_target(self.frame, self.render.getCurrentRect())
while self.framesCounter < 100:
self.framesCounter += 1
tracked = self.tracker.track(self.frame)
if len(tracked) > 0:
tracked = tracked[0]
self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6)
else:
self.assertEqual(0, 1, 'Tracking error')
self.frame = self.render.getNextFrame()
# built-in modules
from collections import namedtuple
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
MIN_MATCH_COUNT = 10
'''
image - image to track
rect - tracked rectangle (x1, y1, x2, y2)
keypoints - keypoints detected inside rect
descrs - their descriptors
data - some user-provided data
'''
PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data')
'''
target - reference to PlanarTarget
p0 - matched points coords in target image
p1 - matched points coords in input frame
H - homography matrix from p0 to p1
quad - target boundary quad in input frame
'''
TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad')
class PlaneTracker:
def __init__(self):
self.detector = cv.AKAZE_create(threshold = 0.003)
self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
self.targets = []
self.frame_points = []
def add_target(self, image, rect, data=None):
'''Add a new tracking target.'''
x0, y0, x1, y1 = rect
raw_points, raw_descrs = self.detect_features(image)
points, descs = [], []
for kp, desc in zip(raw_points, raw_descrs):
x, y = kp.pt
if x0 <= x <= x1 and y0 <= y <= y1:
points.append(kp)
descs.append(desc)
descs = np.uint8(descs)
self.matcher.add([descs])
target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data)
self.targets.append(target)
def clear(self):
'''Remove all targets'''
self.targets = []
self.matcher.clear()
def track(self, frame):
'''Returns a list of detected TrackedTarget objects'''
self.frame_points, frame_descrs = self.detect_features(frame)
if len(self.frame_points) < MIN_MATCH_COUNT:
return []
matches = self.matcher.knnMatch(frame_descrs, k = 2)
matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
if len(matches) < MIN_MATCH_COUNT:
return []
matches_by_id = [[] for _ in xrange(len(self.targets))]
for m in matches:
matches_by_id[m.imgIdx].append(m)
tracked = []
for imgIdx, matches in enumerate(matches_by_id):
if len(matches) < MIN_MATCH_COUNT:
continue
target = self.targets[imgIdx]
p0 = [target.keypoints[m.trainIdx].pt for m in matches]
p1 = [self.frame_points[m.queryIdx].pt for m in matches]
p0, p1 = np.float32((p0, p1))
H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0)
status = status.ravel() != 0
if status.sum() < MIN_MATCH_COUNT:
continue
p0, p1 = p0[status], p1[status]
x0, y0, x1, y1 = target.rect
quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
tracked.append(track)
tracked.sort(key = lambda t: len(t.p0), reverse=True)
return tracked
def detect_features(self, frame):
'''detect_features(self, frame) -> keypoints, descrs'''
keypoints, descrs = self.detector.detectAndCompute(frame, None)
if descrs is None: # detectAndCompute returns descs=None if no keypoints found
descrs = []
return keypoints, descrs
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@@ -0,0 +1,129 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Fangfang Bai, fangfang@multicorewareinc.com
// Jin Ma, jin@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
//////////////////// BruteForceMatch /////////////////
typedef Size_MatType BruteForceMatcherFixture;
OCL_PERF_TEST_P(BruteForceMatcherFixture, Match, ::testing::Combine(OCL_PERF_ENUM(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3), OCL_PERF_ENUM((MatType)CV_32FC1) ) )
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params);
checkDeviceMaxMemoryAllocSize(srcSize, type);
vector<DMatch> matches;
UMat uquery(srcSize, type), utrain(srcSize, type);
declare.in(uquery, utrain, WARMUP_RNG);
BFMatcher matcher(NORM_L2);
OCL_TEST_CYCLE()
matcher.match(uquery, utrain, matches);
SANITY_CHECK_MATCHES(matches, 1e-3);
}
OCL_PERF_TEST_P(BruteForceMatcherFixture, KnnMatch, ::testing::Combine(OCL_PERF_ENUM(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3), OCL_PERF_ENUM((MatType)CV_32FC1) ) )
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params);
checkDeviceMaxMemoryAllocSize(srcSize, type);
vector< vector<DMatch> > matches;
UMat uquery(srcSize, type), utrain(srcSize, type);
declare.in(uquery, utrain, WARMUP_RNG);
BFMatcher matcher(NORM_L2);
OCL_TEST_CYCLE()
matcher.knnMatch(uquery, utrain, matches, 2);
vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
SANITY_CHECK_MATCHES(matches0, 1e-3);
SANITY_CHECK_MATCHES(matches1, 1e-3);
}
OCL_PERF_TEST_P(BruteForceMatcherFixture, RadiusMatch, ::testing::Combine(OCL_PERF_ENUM(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3), OCL_PERF_ENUM((MatType)CV_32FC1) ) )
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params);
checkDeviceMaxMemoryAllocSize(srcSize, type);
vector< vector<DMatch> > matches;
UMat uquery(srcSize, type), utrain(srcSize, type);
declare.in(uquery, utrain, WARMUP_RNG);
BFMatcher matcher(NORM_L2);
OCL_TEST_CYCLE()
matcher.radiusMatch(uquery, utrain, matches, 2.0f);
vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
SANITY_CHECK_MATCHES(matches0, 1e-3);
SANITY_CHECK_MATCHES(matches1, 1e-3);
}
} // ocl
} // cvtest
#endif // HAVE_OPENCL

View File

@@ -0,0 +1,81 @@
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#include "../perf_feature2d.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
OCL_PERF_TEST_P(feature2d, detect, testing::Combine(Feature2DType::all(), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(detector);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
OCL_TEST_CYCLE() detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(feature2d, extract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = AKAZE::create();
Ptr<Feature2D> extractor = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(extractor);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
UMat descriptors;
OCL_TEST_CYCLE() extractor->compute(img, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
OCL_PERF_TEST_P(feature2d, detectAndExtract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat mimg = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(mimg.empty());
ASSERT_TRUE(detector);
UMat img, mask;
mimg.copyTo(img);
declare.in(img);
vector<KeyPoint> points;
UMat descriptors;
OCL_TEST_CYCLE() detector->detectAndCompute(img, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
} // ocl
} // cvtest
#endif // HAVE_OPENCL

View File

@@ -0,0 +1,167 @@
#include "perf_precomp.hpp"
namespace opencv_test
{
using namespace perf;
CV_ENUM(NormType, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, NORM_HAMMING2)
typedef tuple<NormType, MatType, bool> Norm_Destination_CrossCheck_t;
typedef perf::TestBaseWithParam<Norm_Destination_CrossCheck_t> Norm_Destination_CrossCheck;
typedef tuple<NormType, bool> Norm_CrossCheck_t;
typedef perf::TestBaseWithParam<Norm_CrossCheck_t> Norm_CrossCheck;
typedef tuple<MatType, bool> Source_CrossCheck_t;
typedef perf::TestBaseWithParam<Source_CrossCheck_t> Source_CrossCheck;
void generateData( Mat& query, Mat& train, const int sourceType );
PERF_TEST_P(Norm_Destination_CrossCheck, batchDistance_8U,
testing::Combine(testing::Values((int)NORM_L1, (int)NORM_L2SQR),
testing::Values(CV_32S, CV_32F),
testing::Bool()
)
)
{
NormType normType = get<0>(GetParam());
int destinationType = get<1>(GetParam());
bool isCrossCheck = get<2>(GetParam());
int knn = isCrossCheck ? 1 : 0;
Mat queryDescriptors;
Mat trainDescriptors;
Mat dist;
Mat ndix;
generateData(queryDescriptors, trainDescriptors, CV_8U);
TEST_CYCLE()
{
batchDistance(queryDescriptors, trainDescriptors, dist, destinationType, (isCrossCheck) ? ndix : noArray(),
normType, knn, Mat(), 0, isCrossCheck);
}
SANITY_CHECK(dist);
if (isCrossCheck) SANITY_CHECK(ndix);
}
PERF_TEST_P(Norm_CrossCheck, batchDistance_Dest_32S,
testing::Combine(testing::Values((int)NORM_HAMMING, (int)NORM_HAMMING2),
testing::Bool()
)
)
{
NormType normType = get<0>(GetParam());
bool isCrossCheck = get<1>(GetParam());
int knn = isCrossCheck ? 1 : 0;
Mat queryDescriptors;
Mat trainDescriptors;
Mat dist;
Mat ndix;
generateData(queryDescriptors, trainDescriptors, CV_8U);
TEST_CYCLE()
{
batchDistance(queryDescriptors, trainDescriptors, dist, CV_32S, (isCrossCheck) ? ndix : noArray(),
normType, knn, Mat(), 0, isCrossCheck);
}
SANITY_CHECK(dist);
if (isCrossCheck) SANITY_CHECK(ndix);
}
PERF_TEST_P(Source_CrossCheck, batchDistance_L2,
testing::Combine(testing::Values(CV_8U, CV_32F),
testing::Bool()
)
)
{
int sourceType = get<0>(GetParam());
bool isCrossCheck = get<1>(GetParam());
int knn = isCrossCheck ? 1 : 0;
Mat queryDescriptors;
Mat trainDescriptors;
Mat dist;
Mat ndix;
generateData(queryDescriptors, trainDescriptors, sourceType);
declare.time(50);
TEST_CYCLE()
{
batchDistance(queryDescriptors, trainDescriptors, dist, CV_32F, (isCrossCheck) ? ndix : noArray(),
NORM_L2, knn, Mat(), 0, isCrossCheck);
}
SANITY_CHECK(dist);
if (isCrossCheck) SANITY_CHECK(ndix);
}
PERF_TEST_P(Norm_CrossCheck, batchDistance_32F,
testing::Combine(testing::Values((int)NORM_L1, (int)NORM_L2SQR),
testing::Bool()
)
)
{
NormType normType = get<0>(GetParam());
bool isCrossCheck = get<1>(GetParam());
int knn = isCrossCheck ? 1 : 0;
Mat queryDescriptors;
Mat trainDescriptors;
Mat dist;
Mat ndix;
generateData(queryDescriptors, trainDescriptors, CV_32F);
declare.time(100);
TEST_CYCLE()
{
batchDistance(queryDescriptors, trainDescriptors, dist, CV_32F, (isCrossCheck) ? ndix : noArray(),
normType, knn, Mat(), 0, isCrossCheck);
}
SANITY_CHECK(dist, 1e-4);
if (isCrossCheck) SANITY_CHECK(ndix);
}
void generateData( Mat& query, Mat& train, const int sourceType )
{
const int dim = 500;
const int queryDescCount = 300; // must be even number because we split train data in some cases in two
const int countFactor = 4; // do not change it
RNG& rng = theRNG();
// Generate query descriptors randomly.
// Descriptor vector elements are integer values.
Mat buf( queryDescCount, dim, CV_32SC1 );
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
buf.convertTo( query, sourceType );
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
// are (0.f, 1.f).
train.create( query.rows*countFactor, query.cols, sourceType );
float step = (sourceType == CV_8U ? 256.f : 1.f) / countFactor;
for( int qIdx = 0; qIdx < query.rows; qIdx++ )
{
Mat queryDescriptor = query.row(qIdx);
for( int c = 0; c < countFactor; c++ )
{
int tIdx = qIdx * countFactor + c;
Mat trainDescriptor = train.row(tIdx);
queryDescriptor.copyTo( trainDescriptor );
int elem = rng(dim);
float diff = rng.uniform( step*c, step*(c+1) );
trainDescriptor.col(elem) += diff;
}
}
}
} // namespace

View File

@@ -0,0 +1,72 @@
#include "perf_feature2d.hpp"
namespace opencv_test
{
using namespace perf;
PERF_TEST_P(feature2d, detect, testing::Combine(Feature2DType::all(), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(detector);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
TEST_CYCLE() detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(feature2d, extract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = AKAZE::create();
Ptr<Feature2D> extractor = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(extractor);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
detector->detect(img, points, mask);
EXPECT_GT(points.size(), 20u);
Mat descriptors;
TEST_CYCLE() extractor->compute(img, points, descriptors);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
PERF_TEST_P(feature2d, detectAndExtract, testing::Combine(testing::Values(DETECTORS_EXTRACTORS), TEST_IMAGES))
{
Ptr<Feature2D> detector = getFeature2D(get<0>(GetParam()));
std::string filename = getDataPath(get<1>(GetParam()));
Mat img = imread(filename, IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
ASSERT_TRUE(detector);
declare.in(img);
Mat mask;
vector<KeyPoint> points;
Mat descriptors;
TEST_CYCLE() detector->detectAndCompute(img, mask, points, descriptors, false);
EXPECT_GT(points.size(), 20u);
EXPECT_EQ((size_t)descriptors.rows, points.size());
SANITY_CHECK_NOTHING();
}
} // namespace

View File

@@ -0,0 +1,90 @@
#ifndef __OPENCV_PERF_FEATURE2D_HPP__
#define __OPENCV_PERF_FEATURE2D_HPP__
#include "perf_precomp.hpp"
namespace opencv_test
{
/* configuration for tests of detectors/descriptors. shared between ocl and cpu tests. */
// detectors/descriptors configurations to test
#define DETECTORS_ONLY \
FAST_DEFAULT, FAST_20_TRUE_TYPE5_8, FAST_20_TRUE_TYPE7_12, FAST_20_TRUE_TYPE9_16, \
FAST_20_FALSE_TYPE5_8, FAST_20_FALSE_TYPE7_12, FAST_20_FALSE_TYPE9_16, \
\
AGAST_DEFAULT, AGAST_5_8, AGAST_7_12d, AGAST_7_12s, AGAST_OAST_9_16, \
\
MSER_DEFAULT
#define DETECTORS_EXTRACTORS \
ORB_DEFAULT, ORB_1500_13_1, \
AKAZE_DEFAULT, AKAZE_DESCRIPTOR_KAZE, \
BRISK_DEFAULT, \
KAZE_DEFAULT, \
SIFT_DEFAULT
#define CV_ENUM_EXPAND(name, ...) CV_ENUM(name, __VA_ARGS__)
enum Feature2DVals { DETECTORS_ONLY, DETECTORS_EXTRACTORS };
CV_ENUM_EXPAND(Feature2DType, DETECTORS_ONLY, DETECTORS_EXTRACTORS)
typedef tuple<Feature2DType, string> Feature2DType_String_t;
typedef perf::TestBaseWithParam<Feature2DType_String_t> feature2d;
#define TEST_IMAGES testing::Values(\
"cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"stitching/a3.png", \
"stitching/s2.jpg")
static inline Ptr<Feature2D> getFeature2D(Feature2DType type)
{
switch(type) {
case ORB_DEFAULT:
return ORB::create();
case ORB_1500_13_1:
return ORB::create(1500, 1.3f, 1);
case FAST_DEFAULT:
return FastFeatureDetector::create();
case FAST_20_TRUE_TYPE5_8:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_5_8);
case FAST_20_TRUE_TYPE7_12:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_7_12);
case FAST_20_TRUE_TYPE9_16:
return FastFeatureDetector::create(20, true, FastFeatureDetector::TYPE_9_16);
case FAST_20_FALSE_TYPE5_8:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_5_8);
case FAST_20_FALSE_TYPE7_12:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_7_12);
case FAST_20_FALSE_TYPE9_16:
return FastFeatureDetector::create(20, false, FastFeatureDetector::TYPE_9_16);
case AGAST_DEFAULT:
return AgastFeatureDetector::create();
case AGAST_5_8:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_5_8);
case AGAST_7_12d:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_7_12d);
case AGAST_7_12s:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::AGAST_7_12s);
case AGAST_OAST_9_16:
return AgastFeatureDetector::create(70, true, AgastFeatureDetector::OAST_9_16);
case AKAZE_DEFAULT:
return AKAZE::create();
case AKAZE_DESCRIPTOR_KAZE:
return AKAZE::create(AKAZE::DESCRIPTOR_KAZE);
case BRISK_DEFAULT:
return BRISK::create();
case KAZE_DEFAULT:
return KAZE::create();
case MSER_DEFAULT:
return MSER::create();
case SIFT_DEFAULT:
return SIFT::create();
default:
return Ptr<Feature2D>();
}
}
} // namespace
#endif // __OPENCV_PERF_FEATURE2D_HPP__

View File

@@ -0,0 +1,7 @@
#include "perf_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_PERF_TEST_MAIN(features2d)

View File

@@ -0,0 +1,7 @@
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/features2d.hpp"
#endif

View File

@@ -0,0 +1,358 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// This file is based on code issued with the following license.
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
* Copyright (C) 2008-2013, Willow Garage Inc., all rights reserved.
* Copyright (C) 2013, Evgeny Toropov, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * The name of the copyright holders may not be used to endorse
* or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/*
Guoshen Yu, Jean-Michel Morel, ASIFT: An Algorithm for Fully Affine
Invariant Comparison, Image Processing On Line, 1 (2011), pp. 1138.
https://doi.org/10.5201/ipol.2011.my-asift
*/
#include "precomp.hpp"
#include <iostream>
namespace cv {
class AffineFeature_Impl CV_FINAL : public AffineFeature
{
public:
explicit AffineFeature_Impl(const Ptr<Feature2D>& backend,
int maxTilt, int minTilt, float tiltStep, float rotateStepBase);
int descriptorSize() const CV_OVERRIDE
{
return backend_->descriptorSize();
}
int descriptorType() const CV_OVERRIDE
{
return backend_->descriptorType();
}
int defaultNorm() const CV_OVERRIDE
{
return backend_->defaultNorm();
}
void detectAndCompute(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints,
OutputArray descriptors, bool useProvidedKeypoints=false) CV_OVERRIDE;
void setViewParams(const std::vector<float>& tilts, const std::vector<float>& rolls) CV_OVERRIDE;
void getViewParams(std::vector<float>& tilts, std::vector<float>& rolls) const CV_OVERRIDE;
protected:
void splitKeypointsByView(const std::vector<KeyPoint>& keypoints_,
std::vector< std::vector<KeyPoint> >& keypointsByView) const;
const Ptr<Feature2D> backend_;
int maxTilt_;
int minTilt_;
float tiltStep_;
float rotateStepBase_;
// Tilt factors.
std::vector<float> tilts_;
// Roll factors.
std::vector<float> rolls_;
private:
AffineFeature_Impl(const AffineFeature_Impl &); // copy disabled
AffineFeature_Impl& operator=(const AffineFeature_Impl &); // assign disabled
};
AffineFeature_Impl::AffineFeature_Impl(const Ptr<FeatureDetector>& backend,
int maxTilt, int minTilt, float tiltStep, float rotateStepBase)
: backend_(backend), maxTilt_(maxTilt), minTilt_(minTilt), tiltStep_(tiltStep), rotateStepBase_(rotateStepBase)
{
int i = minTilt_;
if( i == 0 )
{
tilts_.push_back(1);
rolls_.push_back(0);
i++;
}
float tilt = 1;
for( ; i <= maxTilt_; i++ )
{
tilt *= tiltStep_;
float rotateStep = rotateStepBase_ / tilt;
int rollN = cvFloor(180.0f / rotateStep);
if( rollN * rotateStep == 180.0f )
rollN--;
for( int j = 0; j <= rollN; j++ )
{
tilts_.push_back(tilt);
rolls_.push_back(rotateStep * j);
}
}
}
void AffineFeature_Impl::setViewParams(const std::vector<float>& tilts,
const std::vector<float>& rolls)
{
CV_Assert(tilts.size() == rolls.size());
tilts_ = tilts;
rolls_ = rolls;
}
void AffineFeature_Impl::getViewParams(std::vector<float>& tilts,
std::vector<float>& rolls) const
{
tilts = tilts_;
rolls = rolls_;
}
void AffineFeature_Impl::splitKeypointsByView(const std::vector<KeyPoint>& keypoints_,
std::vector< std::vector<KeyPoint> >& keypointsByView) const
{
for( size_t i = 0; i < keypoints_.size(); i++ )
{
const KeyPoint& kp = keypoints_[i];
CV_Assert( kp.class_id >= 0 && kp.class_id < (int)tilts_.size() );
keypointsByView[kp.class_id].push_back(kp);
}
}
class skewedDetectAndCompute : public ParallelLoopBody
{
public:
skewedDetectAndCompute(
const std::vector<float>& _tilts,
const std::vector<float>& _rolls,
std::vector< std::vector<KeyPoint> >& _keypointsCollection,
std::vector<Mat>& _descriptorCollection,
const Mat& _image,
const Mat& _mask,
const bool _do_keypoints,
const bool _do_descriptors,
const Ptr<Feature2D>& _backend)
: tilts(_tilts),
rolls(_rolls),
keypointsCollection(_keypointsCollection),
descriptorCollection(_descriptorCollection),
image(_image),
mask(_mask),
do_keypoints(_do_keypoints),
do_descriptors(_do_descriptors),
backend(_backend) {}
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
for( int a = begin; a < end; a++ )
{
Mat warpedImage, warpedMask;
Matx23f pose, invPose;
affineSkew(tilts[a], rolls[a], warpedImage, warpedMask, pose);
invertAffineTransform(pose, invPose);
std::vector<KeyPoint> wKeypoints;
Mat wDescriptors;
if( !do_keypoints )
{
const std::vector<KeyPoint>& keypointsInView = keypointsCollection[a];
if( keypointsInView.size() == 0 ) // when there are no keypoints in this affine view
continue;
std::vector<Point2f> pts_, pts;
KeyPoint::convert(keypointsInView, pts_);
transform(pts_, pts, pose);
wKeypoints.resize(keypointsInView.size());
for( size_t wi = 0; wi < wKeypoints.size(); wi++ )
{
wKeypoints[wi] = keypointsInView[wi];
wKeypoints[wi].pt = pts[wi];
}
}
backend->detectAndCompute(warpedImage, warpedMask, wKeypoints, wDescriptors, !do_keypoints);
if( do_keypoints )
{
// KeyPointsFilter::runByPixelsMask( wKeypoints, warpedMask );
if( wKeypoints.size() == 0 )
{
keypointsCollection[a].clear();
continue;
}
std::vector<Point2f> pts_, pts;
KeyPoint::convert(wKeypoints, pts_);
transform(pts_, pts, invPose);
keypointsCollection[a].resize(wKeypoints.size());
for( size_t wi = 0; wi < wKeypoints.size(); wi++ )
{
keypointsCollection[a][wi] = wKeypoints[wi];
keypointsCollection[a][wi].pt = pts[wi];
keypointsCollection[a][wi].class_id = a;
}
}
if( do_descriptors )
wDescriptors.copyTo(descriptorCollection[a]);
}
}
private:
void affineSkew(float tilt, float phi,
Mat& warpedImage, Mat& warpedMask, Matx23f& pose) const
{
int h = image.size().height;
int w = image.size().width;
Mat rotImage;
Mat mask0;
if( mask.empty() )
mask0 = Mat(h, w, CV_8UC1, 255);
else
mask0 = mask;
pose = Matx23f(1,0,0,
0,1,0);
if( phi == 0 )
image.copyTo(rotImage);
else
{
phi = phi * (float)CV_PI / 180;
float s = std::sin(phi);
float c = std::cos(phi);
Matx22f A(c, -s, s, c);
Matx<float, 4, 2> corners(0, 0, (float)w, 0, (float)w,(float)h, 0, (float)h);
Mat tf(corners * A.t());
Mat tcorners;
tf.convertTo(tcorners, CV_32S);
Rect rect = boundingRect(tcorners);
h = rect.height; w = rect.width;
pose = Matx23f(c, -s, -(float)rect.x,
s, c, -(float)rect.y);
warpAffine(image, rotImage, pose, Size(w, h), INTER_LINEAR, BORDER_REPLICATE);
}
if( tilt == 1 )
warpedImage = rotImage;
else
{
float s = 0.8f * sqrt(tilt * tilt - 1);
GaussianBlur(rotImage, rotImage, Size(0, 0), s, 0.01);
resize(rotImage, warpedImage, Size(0, 0), 1.0/tilt, 1.0, INTER_NEAREST);
pose(0, 0) /= tilt;
pose(0, 1) /= tilt;
pose(0, 2) /= tilt;
}
if( phi != 0 || tilt != 1 )
warpAffine(mask0, warpedMask, pose, warpedImage.size(), INTER_NEAREST);
}
const std::vector<float>& tilts;
const std::vector<float>& rolls;
std::vector< std::vector<KeyPoint> >& keypointsCollection;
std::vector<Mat>& descriptorCollection;
const Mat& image;
const Mat& mask;
const bool do_keypoints;
const bool do_descriptors;
const Ptr<Feature2D>& backend;
};
void AffineFeature_Impl::detectAndCompute(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints)
{
CV_TRACE_FUNCTION();
bool do_keypoints = !useProvidedKeypoints;
bool do_descriptors = _descriptors.needed();
Mat image = _image.getMat(), mask = _mask.getMat();
Mat descriptors;
if( (!do_keypoints && !do_descriptors) || _image.empty() )
return;
std::vector< std::vector<KeyPoint> > keypointsCollection(tilts_.size());
std::vector< Mat > descriptorCollection(tilts_.size());
if( do_keypoints )
keypoints.clear();
else
splitKeypointsByView(keypoints, keypointsCollection);
parallel_for_(Range(0, (int)tilts_.size()), skewedDetectAndCompute(tilts_, rolls_, keypointsCollection, descriptorCollection,
image, mask, do_keypoints, do_descriptors, backend_));
if( do_keypoints )
for( size_t i = 0; i < keypointsCollection.size(); i++ )
{
const std::vector<KeyPoint>& keys = keypointsCollection[i];
keypoints.insert(keypoints.end(), keys.begin(), keys.end());
}
if( do_descriptors )
{
_descriptors.create((int)keypoints.size(), backend_->descriptorSize(), backend_->descriptorType());
descriptors = _descriptors.getMat();
int iter = 0;
for( size_t i = 0; i < descriptorCollection.size(); i++ )
{
const Mat& descs = descriptorCollection[i];
if( descs.empty() )
continue;
Mat roi(descriptors, Rect(0, iter, descriptors.cols, descs.rows));
descs.copyTo(roi);
iter += descs.rows;
}
}
}
Ptr<AffineFeature> AffineFeature::create(const Ptr<Feature2D>& backend,
int maxTilt, int minTilt, float tiltStep, float rotateStepBase)
{
CV_Assert(minTilt < maxTilt);
CV_Assert(tiltStep > 0);
CV_Assert(rotateStepBase > 0);
return makePtr<AffineFeature_Impl>(backend, maxTilt, minTilt, tiltStep, rotateStepBase);
}
String AffineFeature::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".AffineFeature");
}
} // namespace

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
/* This is AGAST and OAST, an optimal and accelerated corner detector
based on the accelerated segment tests
Below is the original copyright and the references */
/*
Copyright (C) 2010 Elmar Mair
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Adaptive and Generic Corner Detection Based on the Accelerated Segment Test,
Elmar Mair and Gregory D. Hager and Darius Burschka
and Michael Suppa and Gerhard Hirzinger ECCV 2010
URL: http://www6.in.tum.de/Main/ResearchAgast
*/
#ifndef __OPENCV_FEATURES_2D_AGAST_HPP__
#define __OPENCV_FEATURES_2D_AGAST_HPP__
#ifdef __cplusplus
#include "precomp.hpp"
namespace cv
{
#if !(defined __i386__ || defined(_M_IX86) || defined __x86_64__ || defined(_M_X64))
int agast_tree_search(const uint32_t table_struct32[], int pixel_[], const unsigned char* const ptr, int threshold);
int AGAST_ALL_SCORE(const uchar* ptr, const int pixel[], int threshold, AgastFeatureDetector::DetectorType agasttype);
#endif //!(defined __i386__ || defined(_M_IX86) || defined __x86_64__ || defined(_M_X64))
void makeAgastOffsets(int pixel[16], int row_stride, AgastFeatureDetector::DetectorType type);
template<AgastFeatureDetector::DetectorType type>
int agast_cornerScore(const uchar* ptr, const int pixel[], int threshold);
}
#endif
#endif

View File

@@ -0,0 +1,253 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
OpenCV wrapper of reference implementation of
[1] Fast Explicit Diffusion for Accelerated Features in Nonlinear Scale Spaces.
Pablo F. Alcantarilla, J. Nuevo and Adrien Bartoli.
In British Machine Vision Conference (BMVC), Bristol, UK, September 2013
http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla13bmvc.pdf
@author Eugene Khvedchenya <ekhvedchenya@gmail.com>
*/
#include "precomp.hpp"
#include "kaze/AKAZEFeatures.h"
#include <iostream>
namespace cv
{
using namespace std;
class AKAZE_Impl : public AKAZE
{
public:
AKAZE_Impl(DescriptorType _descriptor_type, int _descriptor_size, int _descriptor_channels,
float _threshold, int _octaves, int _sublevels, KAZE::DiffusivityType _diffusivity)
: descriptor(_descriptor_type)
, descriptor_channels(_descriptor_channels)
, descriptor_size(_descriptor_size)
, threshold(_threshold)
, octaves(_octaves)
, sublevels(_sublevels)
, diffusivity(_diffusivity)
{
}
virtual ~AKAZE_Impl() CV_OVERRIDE
{
}
void setDescriptorType(DescriptorType dtype) CV_OVERRIDE{ descriptor = dtype; }
DescriptorType getDescriptorType() const CV_OVERRIDE{ return descriptor; }
void setDescriptorSize(int dsize) CV_OVERRIDE { descriptor_size = dsize; }
int getDescriptorSize() const CV_OVERRIDE { return descriptor_size; }
void setDescriptorChannels(int dch) CV_OVERRIDE { descriptor_channels = dch; }
int getDescriptorChannels() const CV_OVERRIDE { return descriptor_channels; }
void setThreshold(double threshold_) CV_OVERRIDE { threshold = (float)threshold_; }
double getThreshold() const CV_OVERRIDE { return threshold; }
void setNOctaves(int octaves_) CV_OVERRIDE { octaves = octaves_; }
int getNOctaves() const CV_OVERRIDE { return octaves; }
void setNOctaveLayers(int octaveLayers_) CV_OVERRIDE { sublevels = octaveLayers_; }
int getNOctaveLayers() const CV_OVERRIDE { return sublevels; }
void setDiffusivity(KAZE::DiffusivityType diff_) CV_OVERRIDE{ diffusivity = diff_; }
KAZE::DiffusivityType getDiffusivity() const CV_OVERRIDE{ return diffusivity; }
// returns the descriptor size in bytes
int descriptorSize() const CV_OVERRIDE
{
switch (descriptor)
{
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return 64;
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
// We use the full length binary descriptor -> 486 bits
if (descriptor_size == 0)
{
int t = (6 + 36 + 120) * descriptor_channels;
return divUp(t, 8);
}
else
{
// We use the random bit selection length binary descriptor
return divUp(descriptor_size, 8);
}
default:
return -1;
}
}
// returns the descriptor type
int descriptorType() const CV_OVERRIDE
{
switch (descriptor)
{
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return CV_32F;
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
return CV_8U;
default:
return -1;
}
}
// returns the default norm type
int defaultNorm() const CV_OVERRIDE
{
switch (descriptor)
{
case DESCRIPTOR_KAZE:
case DESCRIPTOR_KAZE_UPRIGHT:
return NORM_L2;
case DESCRIPTOR_MLDB:
case DESCRIPTOR_MLDB_UPRIGHT:
return NORM_HAMMING;
default:
return -1;
}
}
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
CV_Assert( ! image.empty() );
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = image.cols();
options.img_height = image.rows();
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(image);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if(descriptors.needed())
{
impl.Compute_Descriptors(keypoints, descriptors);
CV_Assert((descriptors.empty() || descriptors.cols() == descriptorSize()));
CV_Assert((descriptors.empty() || (descriptors.type() == descriptorType())));
}
}
void write(FileStorage& fs) const CV_OVERRIDE
{
writeFormat(fs);
fs << "descriptor" << descriptor;
fs << "descriptor_channels" << descriptor_channels;
fs << "descriptor_size" << descriptor_size;
fs << "threshold" << threshold;
fs << "octaves" << octaves;
fs << "sublevels" << sublevels;
fs << "diffusivity" << diffusivity;
}
void read(const FileNode& fn) CV_OVERRIDE
{
descriptor = static_cast<DescriptorType>((int)fn["descriptor"]);
descriptor_channels = (int)fn["descriptor_channels"];
descriptor_size = (int)fn["descriptor_size"];
threshold = (float)fn["threshold"];
octaves = (int)fn["octaves"];
sublevels = (int)fn["sublevels"];
diffusivity = static_cast<KAZE::DiffusivityType>((int)fn["diffusivity"]);
}
DescriptorType descriptor;
int descriptor_channels;
int descriptor_size;
float threshold;
int octaves;
int sublevels;
KAZE::DiffusivityType diffusivity;
};
Ptr<AKAZE> AKAZE::create(DescriptorType descriptor_type,
int descriptor_size, int descriptor_channels,
float threshold, int octaves,
int sublevels, KAZE::DiffusivityType diffusivity)
{
return makePtr<AKAZE_Impl>(descriptor_type, descriptor_size, descriptor_channels,
threshold, octaves, sublevels, diffusivity);
}
String AKAZE::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".AKAZE");
}
}

View File

@@ -0,0 +1,216 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
BOWTrainer::BOWTrainer() : size(0)
{}
BOWTrainer::~BOWTrainer()
{}
void BOWTrainer::add( const Mat& _descriptors )
{
CV_Assert( !_descriptors.empty() );
if( !descriptors.empty() )
{
CV_Assert( descriptors[0].cols == _descriptors.cols );
CV_Assert( descriptors[0].type() == _descriptors.type() );
size += _descriptors.rows;
}
else
{
size = _descriptors.rows;
}
descriptors.push_back(_descriptors);
}
const std::vector<Mat>& BOWTrainer::getDescriptors() const
{
return descriptors;
}
int BOWTrainer::descriptorsCount() const
{
return descriptors.empty() ? 0 : size;
}
void BOWTrainer::clear()
{
descriptors.clear();
}
BOWKMeansTrainer::BOWKMeansTrainer( int _clusterCount, const TermCriteria& _termcrit,
int _attempts, int _flags ) :
clusterCount(_clusterCount), termcrit(_termcrit), attempts(_attempts), flags(_flags)
{}
Mat BOWKMeansTrainer::cluster() const
{
CV_INSTRUMENT_REGION();
CV_Assert( !descriptors.empty() );
Mat mergedDescriptors( descriptorsCount(), descriptors[0].cols, descriptors[0].type() );
for( size_t i = 0, start = 0; i < descriptors.size(); i++ )
{
Mat submut = mergedDescriptors.rowRange((int)start, (int)(start + descriptors[i].rows));
descriptors[i].copyTo(submut);
start += descriptors[i].rows;
}
return cluster( mergedDescriptors );
}
BOWKMeansTrainer::~BOWKMeansTrainer()
{}
Mat BOWKMeansTrainer::cluster( const Mat& _descriptors ) const
{
CV_INSTRUMENT_REGION();
Mat labels, vocabulary;
kmeans( _descriptors, clusterCount, labels, termcrit, attempts, flags, vocabulary );
return vocabulary;
}
BOWImgDescriptorExtractor::BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& _dextractor,
const Ptr<DescriptorMatcher>& _dmatcher ) :
dextractor(_dextractor), dmatcher(_dmatcher)
{}
BOWImgDescriptorExtractor::BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& _dmatcher ) :
dmatcher(_dmatcher)
{}
BOWImgDescriptorExtractor::~BOWImgDescriptorExtractor()
{}
void BOWImgDescriptorExtractor::setVocabulary( const Mat& _vocabulary )
{
dmatcher->clear();
vocabulary = _vocabulary;
dmatcher->add( std::vector<Mat>(1, vocabulary) );
}
const Mat& BOWImgDescriptorExtractor::getVocabulary() const
{
return vocabulary;
}
void BOWImgDescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* descriptors )
{
CV_INSTRUMENT_REGION();
imgDescriptor.release();
if( keypoints.empty() )
return;
// Compute descriptors for the image.
Mat _descriptors;
dextractor->compute( image, keypoints, _descriptors );
compute( _descriptors, imgDescriptor, pointIdxsOfClusters );
// Add the descriptors of image keypoints
if (descriptors) {
*descriptors = _descriptors.clone();
}
}
int BOWImgDescriptorExtractor::descriptorSize() const
{
return vocabulary.empty() ? 0 : vocabulary.rows;
}
int BOWImgDescriptorExtractor::descriptorType() const
{
return CV_32FC1;
}
void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputArray _imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters )
{
CV_INSTRUMENT_REGION();
CV_Assert( !vocabulary.empty() );
CV_Assert(!keypointDescriptors.empty());
int clusterCount = descriptorSize(); // = vocabulary.rows
// Match keypoint descriptors to cluster center (to vocabulary)
std::vector<DMatch> matches;
dmatcher->match( keypointDescriptors, matches );
// Compute image descriptor
if( pointIdxsOfClusters )
{
pointIdxsOfClusters->clear();
pointIdxsOfClusters->resize(clusterCount);
}
_imgDescriptor.create(1, clusterCount, descriptorType());
_imgDescriptor.setTo(Scalar::all(0));
Mat imgDescriptor = _imgDescriptor.getMat();
float *dptr = imgDescriptor.ptr<float>();
for( size_t i = 0; i < matches.size(); i++ )
{
int queryIdx = matches[i].queryIdx;
int trainIdx = matches[i].trainIdx; // cluster index
CV_Assert( queryIdx == (int)i );
dptr[trainIdx] = dptr[trainIdx] + 1.f;
if( pointIdxsOfClusters )
(*pointIdxsOfClusters)[trainIdx].push_back( queryIdx );
}
// Normalize image descriptor.
imgDescriptor /= keypointDescriptors.size().height;
}
}

View File

@@ -0,0 +1,404 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <iterator>
#include <limits>
#include <opencv2/core/utils/logger.hpp>
// Requires CMake flag: DEBUG_opencv_features2d=ON
//#define DEBUG_BLOB_DETECTOR
#ifdef DEBUG_BLOB_DETECTOR
#include "opencv2/highgui.hpp"
#endif
namespace cv
{
class CV_EXPORTS_W SimpleBlobDetectorImpl : public SimpleBlobDetector
{
public:
explicit SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
virtual void read( const FileNode& fn ) CV_OVERRIDE;
virtual void write( FileStorage& fs ) const CV_OVERRIDE;
protected:
struct CV_EXPORTS Center
{
Point2d location;
double radius;
double confidence;
};
virtual void detect( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) CV_OVERRIDE;
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;
Params params;
};
/*
* SimpleBlobDetector
*/
SimpleBlobDetector::Params::Params()
{
thresholdStep = 10;
minThreshold = 50;
maxThreshold = 220;
minRepeatability = 2;
minDistBetweenBlobs = 10;
filterByColor = true;
blobColor = 0;
filterByArea = true;
minArea = 25;
maxArea = 5000;
filterByCircularity = false;
minCircularity = 0.8f;
maxCircularity = std::numeric_limits<float>::max();
filterByInertia = true;
//minInertiaRatio = 0.6;
minInertiaRatio = 0.1f;
maxInertiaRatio = std::numeric_limits<float>::max();
filterByConvexity = true;
//minConvexity = 0.8;
minConvexity = 0.95f;
maxConvexity = std::numeric_limits<float>::max();
}
void SimpleBlobDetector::Params::read(const cv::FileNode& fn )
{
thresholdStep = fn["thresholdStep"];
minThreshold = fn["minThreshold"];
maxThreshold = fn["maxThreshold"];
minRepeatability = (size_t)(int)fn["minRepeatability"];
minDistBetweenBlobs = fn["minDistBetweenBlobs"];
filterByColor = (int)fn["filterByColor"] != 0 ? true : false;
blobColor = (uchar)(int)fn["blobColor"];
filterByArea = (int)fn["filterByArea"] != 0 ? true : false;
minArea = fn["minArea"];
maxArea = fn["maxArea"];
filterByCircularity = (int)fn["filterByCircularity"] != 0 ? true : false;
minCircularity = fn["minCircularity"];
maxCircularity = fn["maxCircularity"];
filterByInertia = (int)fn["filterByInertia"] != 0 ? true : false;
minInertiaRatio = fn["minInertiaRatio"];
maxInertiaRatio = fn["maxInertiaRatio"];
filterByConvexity = (int)fn["filterByConvexity"] != 0 ? true : false;
minConvexity = fn["minConvexity"];
maxConvexity = fn["maxConvexity"];
}
void SimpleBlobDetector::Params::write(cv::FileStorage& fs) const
{
fs << "thresholdStep" << thresholdStep;
fs << "minThreshold" << minThreshold;
fs << "maxThreshold" << maxThreshold;
fs << "minRepeatability" << (int)minRepeatability;
fs << "minDistBetweenBlobs" << minDistBetweenBlobs;
fs << "filterByColor" << (int)filterByColor;
fs << "blobColor" << (int)blobColor;
fs << "filterByArea" << (int)filterByArea;
fs << "minArea" << minArea;
fs << "maxArea" << maxArea;
fs << "filterByCircularity" << (int)filterByCircularity;
fs << "minCircularity" << minCircularity;
fs << "maxCircularity" << maxCircularity;
fs << "filterByInertia" << (int)filterByInertia;
fs << "minInertiaRatio" << minInertiaRatio;
fs << "maxInertiaRatio" << maxInertiaRatio;
fs << "filterByConvexity" << (int)filterByConvexity;
fs << "minConvexity" << minConvexity;
fs << "maxConvexity" << maxConvexity;
}
SimpleBlobDetectorImpl::SimpleBlobDetectorImpl(const SimpleBlobDetector::Params &parameters) :
params(parameters)
{
}
void SimpleBlobDetectorImpl::read( const cv::FileNode& fn )
{
params.read(fn);
}
void SimpleBlobDetectorImpl::write( cv::FileStorage& fs ) const
{
writeFormat(fs);
params.write(fs);
}
void SimpleBlobDetectorImpl::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{
CV_INSTRUMENT_REGION();
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
CV_UNUSED(image);
centers.clear();
std::vector < std::vector<Point> > contours;
findContours(binaryImage, contours, RETR_LIST, CHAIN_APPROX_NONE);
#ifdef DEBUG_BLOB_DETECTOR
Mat keypointsImage;
cvtColor(binaryImage, keypointsImage, COLOR_GRAY2RGB);
Mat contoursImage;
cvtColor(binaryImage, contoursImage, COLOR_GRAY2RGB);
drawContours( contoursImage, contours, -1, Scalar(0,255,0) );
imshow("contours", contoursImage );
#endif
for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
Center center;
center.confidence = 1;
Moments moms = moments(contours[contourIdx]);
if (params.filterByArea)
{
double area = moms.m00;
if (area < params.minArea || area >= params.maxArea)
continue;
}
if (params.filterByCircularity)
{
double area = moms.m00;
double perimeter = arcLength(contours[contourIdx], true);
double ratio = 4 * CV_PI * area / (perimeter * perimeter);
if (ratio < params.minCircularity || ratio >= params.maxCircularity)
continue;
}
if (params.filterByInertia)
{
double denominator = std::sqrt(std::pow(2 * moms.mu11, 2) + std::pow(moms.mu20 - moms.mu02, 2));
const double eps = 1e-2;
double ratio;
if (denominator > eps)
{
double cosmin = (moms.mu20 - moms.mu02) / denominator;
double sinmin = 2 * moms.mu11 / denominator;
double cosmax = -cosmin;
double sinmax = -sinmin;
double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
ratio = imin / imax;
}
else
{
ratio = 1;
}
if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
continue;
center.confidence = ratio * ratio;
}
if (params.filterByConvexity)
{
std::vector < Point > hull;
convexHull(contours[contourIdx], hull);
double area = moms.m00;
double hullArea = contourArea(hull);
if (fabs(hullArea) < DBL_EPSILON)
continue;
double ratio = area / hullArea;
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
continue;
}
if(moms.m00 == 0.0)
continue;
center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
if (params.filterByColor)
{
if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
continue;
}
//compute blob radius
{
std::vector<double> dists;
for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
{
Point2d pt = contours[contourIdx][pointIdx];
dists.push_back(norm(center.location - pt));
}
std::sort(dists.begin(), dists.end());
center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
}
centers.push_back(center);
#ifdef DEBUG_BLOB_DETECTOR
circle( keypointsImage, center.location, 1, Scalar(0,0,255), 1 );
#endif
}
#ifdef DEBUG_BLOB_DETECTOR
imshow("bk", keypointsImage );
waitKey();
#endif
}
void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>& keypoints, InputArray mask)
{
CV_INSTRUMENT_REGION();
keypoints.clear();
CV_Assert(params.minRepeatability != 0);
Mat grayscaleImage;
if (image.channels() == 3 || image.channels() == 4)
cvtColor(image, grayscaleImage, COLOR_BGR2GRAY);
else
grayscaleImage = image.getMat();
if (grayscaleImage.type() != CV_8UC1) {
CV_Error(Error::StsUnsupportedFormat, "Blob detector only supports 8-bit images!");
}
CV_CheckGT(params.thresholdStep, 0.0f, "");
if (params.minThreshold + params.thresholdStep >= params.maxThreshold)
{
// https://github.com/opencv/opencv/issues/6667
CV_LOG_ONCE_INFO(NULL, "SimpleBlobDetector: params.minDistBetweenBlobs is ignored for case with single threshold");
#if 0 // OpenCV 5.0
CV_CheckEQ(params.minRepeatability, 1u, "Incompatible parameters for case with single threshold");
#else
if (params.minRepeatability != 1)
CV_LOG_WARNING(NULL, "SimpleBlobDetector: params.minRepeatability=" << params.minRepeatability << " is incompatible for case with single threshold. Empty result is expected.");
#endif
}
std::vector < std::vector<Center> > centers;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{
Mat binarizedImage;
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
std::vector < Center > curCenters;
findBlobs(grayscaleImage, binarizedImage, curCenters);
std::vector < std::vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew)
{
centers[j].push_back(curCenters[i]);
size_t k = centers[j].size() - 1;
while( k > 0 && curCenters[i].radius < centers[j][k-1].radius )
{
centers[j][k] = centers[j][k-1];
k--;
}
centers[j][k] = curCenters[i];
break;
}
}
if (isNew)
newCenters.push_back(std::vector<Center> (1, curCenters[i]));
}
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
}
for (size_t i = 0; i < centers.size(); i++)
{
if (centers[i].size() < params.minRepeatability)
continue;
Point2d sumPoint(0, 0);
double normalizer = 0;
for (size_t j = 0; j < centers[i].size(); j++)
{
sumPoint += centers[i][j].confidence * centers[i][j].location;
normalizer += centers[i][j].confidence;
}
sumPoint *= (1. / normalizer);
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius) * 2.0f);
keypoints.push_back(kpt);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
}
Ptr<SimpleBlobDetector> SimpleBlobDetector::create(const SimpleBlobDetector::Params& params)
{
return makePtr<SimpleBlobDetectorImpl>(params);
}
String SimpleBlobDetector::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".SimpleBlobDetector");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,279 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
const int draw_shift_bits = 4;
const int draw_multiplier = 1 << draw_shift_bits;
namespace cv
{
/*
* Functions to draw keypoints and matches.
*/
static inline void _drawKeypoint( InputOutputArray img, const KeyPoint& p, const Scalar& color, DrawMatchesFlags flags )
{
CV_Assert( !img.empty() );
Point center( cvRound(p.pt.x * draw_multiplier), cvRound(p.pt.y * draw_multiplier) );
if( !!(flags & DrawMatchesFlags::DRAW_RICH_KEYPOINTS) )
{
int radius = cvRound(p.size/2 * draw_multiplier); // KeyPoint::size is a diameter
// draw the circles around keypoints with the keypoints size
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
// draw orientation of the keypoint, if it is applicable
if( p.angle != -1 )
{
float srcAngleRad = p.angle*(float)CV_PI/180.f;
Point orient( cvRound(cos(srcAngleRad)*radius ),
cvRound(sin(srcAngleRad)*radius )
);
line( img, center, center+orient, color, 1, LINE_AA, draw_shift_bits );
}
#if 0
else
{
// draw center with R=1
int radius = 1 * draw_multiplier;
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
}
#endif
}
else
{
// draw center with R=3
int radius = 3 * draw_multiplier;
circle( img, center, radius, color, 1, LINE_AA, draw_shift_bits );
}
}
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& _color, DrawMatchesFlags flags )
{
CV_INSTRUMENT_REGION();
if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
if (image.type() == CV_8UC3 || image.type() == CV_8UC4)
{
image.copyTo(outImage);
}
else if( image.type() == CV_8UC1 )
{
cvtColor( image, outImage, COLOR_GRAY2BGR );
}
else
{
CV_Error( Error::StsBadArg, "Incorrect type of input image: " + typeToString(image.type()) );
}
}
RNG& rng=theRNG();
bool isRandColor = _color == Scalar::all(-1);
CV_Assert( !outImage.empty() );
std::vector<KeyPoint>::const_iterator it = keypoints.begin(),
end = keypoints.end();
for( ; it != end; ++it )
{
Scalar color = isRandColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : _color;
_drawKeypoint( outImage, *it, color, flags );
}
}
static void _prepareImage(InputArray src, const Mat& dst)
{
CV_CheckType(src.type(), src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4, "Unsupported source image");
CV_CheckType(dst.type(), dst.type() == CV_8UC3 || dst.type() == CV_8UC4, "Unsupported destination image");
const int src_cn = src.channels();
const int dst_cn = dst.channels();
if (src_cn == dst_cn)
src.copyTo(dst);
else if (src_cn == 1)
cvtColor(src, dst, dst_cn == 3 ? COLOR_GRAY2BGR : COLOR_GRAY2BGRA);
else if (src_cn == 3 && dst_cn == 4)
cvtColor(src, dst, COLOR_BGR2BGRA);
else if (src_cn == 4 && dst_cn == 3)
cvtColor(src, dst, COLOR_BGRA2BGR);
else
CV_Error(Error::StsInternal, "");
}
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, DrawMatchesFlags flags )
{
Mat outImg;
Size img1size = img1.size(), img2size = img2.size();
Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
outImg = _outImg.getMat();
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
}
else
{
const int cn1 = img1.channels(), cn2 = img2.channels();
const int out_cn = std::max(3, std::max(cn1, cn2));
_outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
outImg = _outImg.getMat();
outImg = Scalar::all(0);
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
_prepareImage(img1, outImg1);
_prepareImage(img2, outImg2);
}
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
static inline void _drawMatch( InputOutputArray outImg, InputOutputArray outImg1, InputOutputArray outImg2 ,
const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, DrawMatchesFlags flags,
const int matchesThickness )
{
RNG& rng = theRNG();
bool isRandMatchColor = matchColor == Scalar::all(-1);
Scalar color = isRandMatchColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : matchColor;
_drawKeypoint( outImg1, kp1, color, flags );
_drawKeypoint( outImg2, kp2, color, flags );
Point2f pt1 = kp1.pt,
pt2 = kp2.pt,
dpt2 = Point2f( std::min(pt2.x+outImg1.size().width, float(outImg.size().width-1)), pt2.y );
line( outImg,
Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
Point(cvRound(dpt2.x*draw_multiplier), cvRound(dpt2.y*draw_multiplier)),
color, matchesThickness, LINE_AA, draw_shift_bits );
}
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<char>& matchesMask, DrawMatchesFlags flags )
{
drawMatches( img1, keypoints1,
img2, keypoints2,
matches1to2, outImg,
1, matchColor,
singlePointColor, matchesMask,
flags);
}
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
const int matchesThickness, const Scalar& matchColor,
const Scalar& singlePointColor, const std::vector<char>& matchesMask,
DrawMatchesFlags flags )
{
if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
Mat outImg1, outImg2;
_prepareImgAndDrawKeypoints( img1, keypoints1, img2, keypoints2,
outImg, outImg1, outImg2, singlePointColor, flags );
// draw matches
for( size_t m = 0; m < matches1to2.size(); m++ )
{
if( matchesMask.empty() || matchesMask[m] )
{
int i1 = matches1to2[m].queryIdx;
int i2 = matches1to2[m].trainIdx;
CV_Assert(i1 >= 0 && i1 < static_cast<int>(keypoints1.size()));
CV_Assert(i2 >= 0 && i2 < static_cast<int>(keypoints2.size()));
const KeyPoint &kp1 = keypoints1[i1], &kp2 = keypoints2[i2];
_drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags, matchesThickness );
}
}
}
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<std::vector<char> >& matchesMask, DrawMatchesFlags flags )
{
if( !matchesMask.empty() && matchesMask.size() != matches1to2.size() )
CV_Error( Error::StsBadSize, "matchesMask must have the same size as matches1to2" );
Mat outImg1, outImg2;
_prepareImgAndDrawKeypoints( img1, keypoints1, img2, keypoints2,
outImg, outImg1, outImg2, singlePointColor, flags );
// draw matches
for( size_t i = 0; i < matches1to2.size(); i++ )
{
for( size_t j = 0; j < matches1to2[i].size(); j++ )
{
int i1 = matches1to2[i][j].queryIdx;
int i2 = matches1to2[i][j].trainIdx;
if( matchesMask.empty() || matchesMask[i][j] )
{
const KeyPoint &kp1 = keypoints1[i1], &kp2 = keypoints2[i2];
_drawMatch( outImg, outImg1, outImg2, kp1, kp2, matchColor, flags, 1 );
}
}
}
}
}

View File

@@ -0,0 +1,47 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
}

View File

@@ -0,0 +1,570 @@
//*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <limits>
using namespace cv;
template<typename _Tp> static int solveQuadratic(_Tp a, _Tp b, _Tp c, _Tp& x1, _Tp& x2)
{
if( a == 0 )
{
if( b == 0 )
{
x1 = x2 = 0;
return c == 0;
}
x1 = x2 = -c/b;
return 1;
}
_Tp d = b*b - 4*a*c;
if( d < 0 )
{
x1 = x2 = 0;
return 0;
}
if( d > 0 )
{
d = std::sqrt(d);
double s = 1/(2*a);
x1 = (-b - d)*s;
x2 = (-b + d)*s;
if( x1 > x2 )
std::swap(x1, x2);
return 2;
}
x1 = x2 = -b/(2*a);
return 1;
}
//for android ndk
#undef _S
static inline Point2f applyHomography( const Mat_<double>& H, const Point2f& pt )
{
double z = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2);
if( z )
{
double w = 1./z;
return Point2f( (float)((H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w), (float)((H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w) );
}
return Point2f( std::numeric_limits<float>::max(), std::numeric_limits<float>::max() );
}
static inline void linearizeHomographyAt( const Mat_<double>& H, const Point2f& pt, Mat_<double>& A )
{
A.create(2,2);
double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2),
p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2),
p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2),
p3_2 = p3*p3;
if( p3 )
{
A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx
A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy
A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx
A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx
}
else
A.setTo(Scalar::all(std::numeric_limits<double>::max()));
}
class EllipticKeyPoint
{
public:
EllipticKeyPoint();
EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse );
static void convert( const std::vector<KeyPoint>& src, std::vector<EllipticKeyPoint>& dst );
static void convert( const std::vector<EllipticKeyPoint>& src, std::vector<KeyPoint>& dst );
static Mat_<double> getSecondMomentsMatrix( const Scalar& _ellipse );
Mat_<double> getSecondMomentsMatrix() const;
void calcProjection( const Mat_<double>& H, EllipticKeyPoint& projection ) const;
static void calcProjection( const std::vector<EllipticKeyPoint>& src, const Mat_<double>& H, std::vector<EllipticKeyPoint>& dst );
Point2f center;
Scalar ellipse; // 3 elements a, b, c: ax^2+2bxy+cy^2=1
Size_<float> axes; // half length of ellipse axes
Size_<float> boundingBox; // half sizes of bounding box which sides are parallel to the coordinate axes
};
EllipticKeyPoint::EllipticKeyPoint()
{
*this = EllipticKeyPoint(Point2f(0,0), Scalar(1, 0, 1) );
}
EllipticKeyPoint::EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse )
{
center = _center;
ellipse = _ellipse;
double a = ellipse[0], b = ellipse[1], c = ellipse[2];
double ac_b2 = a*c - b*b;
double x1, x2;
solveQuadratic(1., -(a+c), ac_b2, x1, x2);
axes.width = (float)(1/sqrt(x1));
axes.height = (float)(1/sqrt(x2));
boundingBox.width = (float)sqrt(ellipse[2]/ac_b2);
boundingBox.height = (float)sqrt(ellipse[0]/ac_b2);
}
Mat_<double> EllipticKeyPoint::getSecondMomentsMatrix( const Scalar& _ellipse )
{
Mat_<double> M(2, 2);
M(0,0) = _ellipse[0];
M(1,0) = M(0,1) = _ellipse[1];
M(1,1) = _ellipse[2];
return M;
}
Mat_<double> EllipticKeyPoint::getSecondMomentsMatrix() const
{
return getSecondMomentsMatrix(ellipse);
}
void EllipticKeyPoint::calcProjection( const Mat_<double>& H, EllipticKeyPoint& projection ) const
{
Point2f dstCenter = applyHomography(H, center);
Mat_<double> invM; invert(getSecondMomentsMatrix(), invM);
Mat_<double> Aff; linearizeHomographyAt(H, center, Aff);
Mat_<double> dstM; invert(Aff*invM*Aff.t(), dstM);
projection = EllipticKeyPoint( dstCenter, Scalar(dstM(0,0), dstM(0,1), dstM(1,1)) );
}
void EllipticKeyPoint::convert( const std::vector<KeyPoint>& src, std::vector<EllipticKeyPoint>& dst )
{
CV_INSTRUMENT_REGION();
if( !src.empty() )
{
dst.resize(src.size());
for( size_t i = 0; i < src.size(); i++ )
{
float rad = src[i].size/2;
CV_Assert( rad );
float fac = 1.f/(rad*rad);
dst[i] = EllipticKeyPoint( src[i].pt, Scalar(fac, 0, fac) );
}
}
}
void EllipticKeyPoint::convert( const std::vector<EllipticKeyPoint>& src, std::vector<KeyPoint>& dst )
{
CV_INSTRUMENT_REGION();
if( !src.empty() )
{
dst.resize(src.size());
for( size_t i = 0; i < src.size(); i++ )
{
Size_<float> axes = src[i].axes;
float rad = sqrt(axes.height*axes.width);
dst[i] = KeyPoint(src[i].center, 2*rad );
}
}
}
void EllipticKeyPoint::calcProjection( const std::vector<EllipticKeyPoint>& src, const Mat_<double>& H, std::vector<EllipticKeyPoint>& dst )
{
if( !src.empty() )
{
CV_Assert( !H.empty() && H.cols == 3 && H.rows == 3);
dst.resize(src.size());
std::vector<EllipticKeyPoint>::const_iterator srcIt = src.begin();
std::vector<EllipticKeyPoint>::iterator dstIt = dst.begin();
for( ; srcIt != src.end() && dstIt != dst.end(); ++srcIt, ++dstIt )
srcIt->calcProjection(H, *dstIt);
}
}
static void filterEllipticKeyPointsByImageSize( std::vector<EllipticKeyPoint>& keypoints, const Size& imgSize )
{
if( !keypoints.empty() )
{
std::vector<EllipticKeyPoint> filtered;
filtered.reserve(keypoints.size());
std::vector<EllipticKeyPoint>::const_iterator it = keypoints.begin();
for( int i = 0; it != keypoints.end(); ++it, i++ )
{
if( it->center.x + it->boundingBox.width < imgSize.width &&
it->center.x - it->boundingBox.width > 0 &&
it->center.y + it->boundingBox.height < imgSize.height &&
it->center.y - it->boundingBox.height > 0 )
filtered.push_back(*it);
}
keypoints.assign(filtered.begin(), filtered.end());
}
}
struct IntersectAreaCounter
{
IntersectAreaCounter( float _dr, int _minx,
int _miny, int _maxy,
const Point2f& _diff,
const Scalar& _ellipse1, const Scalar& _ellipse2 ) :
dr(_dr), bua(0), bna(0), minx(_minx), miny(_miny), maxy(_maxy),
diff(_diff), ellipse1(_ellipse1), ellipse2(_ellipse2) {}
IntersectAreaCounter( const IntersectAreaCounter& counter, Split )
{
*this = counter;
bua = 0;
bna = 0;
}
void operator()( const BlockedRange& range )
{
CV_Assert( miny < maxy );
CV_Assert( dr > FLT_EPSILON );
int temp_bua = bua, temp_bna = bna;
for( int i = range.begin(); i != range.end(); i++ )
{
float rx1 = minx + i*dr;
float rx2 = rx1 - diff.x;
for( float ry1 = (float)miny; ry1 <= (float)maxy; ry1 += dr )
{
float ry2 = ry1 - diff.y;
//compute the distance from the ellipse center
float e1 = (float)(ellipse1[0]*rx1*rx1 + 2*ellipse1[1]*rx1*ry1 + ellipse1[2]*ry1*ry1);
float e2 = (float)(ellipse2[0]*rx2*rx2 + 2*ellipse2[1]*rx2*ry2 + ellipse2[2]*ry2*ry2);
//compute the area
if( e1<1 && e2<1 ) temp_bna++;
if( e1<1 || e2<1 ) temp_bua++;
}
}
bua = temp_bua;
bna = temp_bna;
}
void join( IntersectAreaCounter& ac )
{
bua += ac.bua;
bna += ac.bna;
}
float dr;
int bua, bna;
int minx;
int miny, maxy;
Point2f diff;
Scalar ellipse1, ellipse2;
};
struct SIdx
{
SIdx() : S(-1), i1(-1), i2(-1) {}
SIdx(float _S, int _i1, int _i2) : S(_S), i1(_i1), i2(_i2) {}
float S;
int i1;
int i2;
bool operator<(const SIdx& v) const { return S > v.S; }
struct UsedFinder
{
UsedFinder(const SIdx& _used) : used(_used) {}
const SIdx& used;
bool operator()(const SIdx& v) const { return (v.i1 == used.i1 || v.i2 == used.i2); }
UsedFinder& operator=(const UsedFinder&) = delete;
};
};
static void computeOneToOneMatchedOverlaps( const std::vector<EllipticKeyPoint>& keypoints1, const std::vector<EllipticKeyPoint>& keypoints2t,
bool commonPart, std::vector<SIdx>& overlaps, float minOverlap )
{
CV_Assert( minOverlap >= 0.f );
overlaps.clear();
if( keypoints1.empty() || keypoints2t.empty() )
return;
overlaps.clear();
overlaps.reserve(cvRound(keypoints1.size() * keypoints2t.size() * 0.01));
for( size_t i1 = 0; i1 < keypoints1.size(); i1++ )
{
EllipticKeyPoint kp1 = keypoints1[i1];
float maxDist = sqrt(kp1.axes.width*kp1.axes.height),
fac = 30.f/maxDist;
if( !commonPart )
fac=3;
maxDist = maxDist*4;
fac = 1.f/(fac*fac);
EllipticKeyPoint keypoint1a = EllipticKeyPoint( kp1.center, Scalar(fac*kp1.ellipse[0], fac*kp1.ellipse[1], fac*kp1.ellipse[2]) );
for( size_t i2 = 0; i2 < keypoints2t.size(); i2++ )
{
EllipticKeyPoint kp2 = keypoints2t[i2];
Point2f diff = kp2.center - kp1.center;
if( norm(diff) < maxDist )
{
EllipticKeyPoint keypoint2a = EllipticKeyPoint( kp2.center, Scalar(fac*kp2.ellipse[0], fac*kp2.ellipse[1], fac*kp2.ellipse[2]) );
//find the largest eigenvalue
int maxx = (int)ceil(( keypoint1a.boundingBox.width > (diff.x+keypoint2a.boundingBox.width)) ?
keypoint1a.boundingBox.width : (diff.x+keypoint2a.boundingBox.width));
int minx = (int)floor((-keypoint1a.boundingBox.width < (diff.x-keypoint2a.boundingBox.width)) ?
-keypoint1a.boundingBox.width : (diff.x-keypoint2a.boundingBox.width));
int maxy = (int)ceil(( keypoint1a.boundingBox.height > (diff.y+keypoint2a.boundingBox.height)) ?
keypoint1a.boundingBox.height : (diff.y+keypoint2a.boundingBox.height));
int miny = (int)floor((-keypoint1a.boundingBox.height < (diff.y-keypoint2a.boundingBox.height)) ?
-keypoint1a.boundingBox.height : (diff.y-keypoint2a.boundingBox.height));
int mina = (maxx-minx) < (maxy-miny) ? (maxx-minx) : (maxy-miny) ;
//compute the area
float dr = (float)mina/50.f;
int N = (int)floor((float)(maxx - minx) / dr);
IntersectAreaCounter ac( dr, minx, miny, maxy, diff, keypoint1a.ellipse, keypoint2a.ellipse );
parallel_reduce( BlockedRange(0, N+1), ac );
if( ac.bna > 0 )
{
float ov = (float)ac.bna / (float)ac.bua;
if( ov >= minOverlap )
overlaps.push_back(SIdx(ov, (int)i1, (int)i2));
}
}
}
}
std::sort( overlaps.begin(), overlaps.end() );
typedef std::vector<SIdx>::iterator It;
It pos = overlaps.begin();
It end = overlaps.end();
while(pos != end)
{
It prev = pos++;
end = std::remove_if(pos, end, SIdx::UsedFinder(*prev));
}
overlaps.erase(pos, overlaps.end());
}
static void calculateRepeatability( const Mat& img1, const Mat& img2, const Mat& H1to2,
const std::vector<KeyPoint>& _keypoints1, const std::vector<KeyPoint>& _keypoints2,
float& repeatability, int& correspondencesCount,
Mat* thresholdedOverlapMask=0 )
{
std::vector<EllipticKeyPoint> keypoints1, keypoints2, keypoints1t, keypoints2t;
EllipticKeyPoint::convert( _keypoints1, keypoints1 );
EllipticKeyPoint::convert( _keypoints2, keypoints2 );
// calculate projections of key points
EllipticKeyPoint::calcProjection( keypoints1, H1to2, keypoints1t );
Mat H2to1; invert(H1to2, H2to1);
EllipticKeyPoint::calcProjection( keypoints2, H2to1, keypoints2t );
float overlapThreshold;
bool ifEvaluateDetectors = thresholdedOverlapMask == 0;
if( ifEvaluateDetectors )
{
overlapThreshold = 1.f - 0.4f;
// remove key points from outside of the common image part
Size sz1 = img1.size(), sz2 = img2.size();
filterEllipticKeyPointsByImageSize( keypoints1, sz1 );
filterEllipticKeyPointsByImageSize( keypoints1t, sz2 );
filterEllipticKeyPointsByImageSize( keypoints2, sz2 );
filterEllipticKeyPointsByImageSize( keypoints2t, sz1 );
}
else
{
overlapThreshold = 1.f - 0.5f;
thresholdedOverlapMask->create( (int)keypoints1.size(), (int)keypoints2t.size(), CV_8UC1 );
thresholdedOverlapMask->setTo( Scalar::all(0) );
}
size_t size1 = keypoints1.size(), size2 = keypoints2t.size();
size_t minCount = MIN( size1, size2 );
// calculate overlap errors
std::vector<SIdx> overlaps;
computeOneToOneMatchedOverlaps( keypoints1, keypoints2t, ifEvaluateDetectors, overlaps, overlapThreshold/*min overlap*/ );
correspondencesCount = -1;
repeatability = -1.f;
if( overlaps.empty() )
return;
if( ifEvaluateDetectors )
{
// regions one-to-one matching
correspondencesCount = (int)overlaps.size();
repeatability = minCount ? (float)correspondencesCount / minCount : -1;
}
else
{
for( size_t i = 0; i < overlaps.size(); i++ )
{
int y = overlaps[i].i1;
int x = overlaps[i].i2;
thresholdedOverlapMask->at<uchar>(y,x) = 1;
}
}
}
void cv::evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,
std::vector<KeyPoint>* _keypoints1, std::vector<KeyPoint>* _keypoints2,
float& repeatability, int& correspCount,
const Ptr<FeatureDetector>& _fdetector )
{
CV_INSTRUMENT_REGION();
Ptr<FeatureDetector> fdetector(_fdetector);
std::vector<KeyPoint> *keypoints1, *keypoints2, buf1, buf2;
keypoints1 = _keypoints1 != 0 ? _keypoints1 : &buf1;
keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2;
if( (keypoints1->empty() || keypoints2->empty()) && !fdetector )
CV_Error( Error::StsBadArg, "fdetector must not be empty when keypoints1 or keypoints2 is empty" );
if( keypoints1->empty() )
fdetector->detect( img1, *keypoints1 );
if( keypoints2->empty() )
fdetector->detect( img2, *keypoints2 );
calculateRepeatability( img1, img2, H1to2, *keypoints1, *keypoints2, repeatability, correspCount );
}
struct DMatchForEvaluation : public DMatch
{
uchar isCorrect;
DMatchForEvaluation( const DMatch &dm ) : DMatch( dm ), isCorrect(0) {}
};
static inline float recall( int correctMatchCount, int correspondenceCount )
{
return correspondenceCount ? (float)correctMatchCount / (float)correspondenceCount : -1;
}
static inline float precision( int correctMatchCount, int falseMatchCount )
{
return correctMatchCount + falseMatchCount ? (float)correctMatchCount / (float)(correctMatchCount + falseMatchCount) : -1;
}
void cv::computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2,
const std::vector<std::vector<uchar> >& correctMatches1to2Mask,
std::vector<Point2f>& recallPrecisionCurve )
{
CV_INSTRUMENT_REGION();
CV_Assert( matches1to2.size() == correctMatches1to2Mask.size() );
std::vector<DMatchForEvaluation> allMatches;
int correspondenceCount = 0;
for( size_t i = 0; i < matches1to2.size(); i++ )
{
for( size_t j = 0; j < matches1to2[i].size(); j++ )
{
DMatchForEvaluation match = matches1to2[i][j];
match.isCorrect = correctMatches1to2Mask[i][j] ;
allMatches.push_back( match );
correspondenceCount += match.isCorrect != 0 ? 1 : 0;
}
}
std::sort( allMatches.begin(), allMatches.end() );
int correctMatchCount = 0, falseMatchCount = 0;
recallPrecisionCurve.resize( allMatches.size() );
for( size_t i = 0; i < allMatches.size(); i++ )
{
if( allMatches[i].isCorrect )
correctMatchCount++;
else
falseMatchCount++;
float r = recall( correctMatchCount, correspondenceCount );
float p = precision( correctMatchCount, falseMatchCount );
recallPrecisionCurve[i] = Point2f(1-p, r);
}
}
float cv::getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision )
{
CV_INSTRUMENT_REGION();
int nearestPointIndex = getNearestPoint( recallPrecisionCurve, l_precision );
float recall = -1.f;
if( nearestPointIndex >= 0 )
recall = recallPrecisionCurve[nearestPointIndex].y;
return recall;
}
int cv::getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision )
{
CV_INSTRUMENT_REGION();
int nearestPointIndex = -1;
if( l_precision >= 0 && l_precision <= 1 )
{
float minDiff = FLT_MAX;
for( size_t i = 0; i < recallPrecisionCurve.size(); i++ )
{
float curDiff = std::fabs(l_precision - recallPrecisionCurve[i].x);
if( curDiff <= minDiff )
{
nearestPointIndex = (int)i;
minDiff = curDiff;
}
}
}
return nearestPointIndex;
}

View File

@@ -0,0 +1,184 @@
/* This is FAST corner detector, contributed to OpenCV by the author, Edward Rosten.
Below is the original copyright and the references */
/*
Copyright (c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Machine learning for high-speed corner detection,
E. Rosten and T. Drummond, ECCV 2006
* Faster and better: A machine learning approach to corner detection
E. Rosten, R. Porter and T. Drummond, PAMI, 2009
*/
#include "precomp.hpp"
#include "fast.hpp"
#include "opencv2/core/hal/intrin.hpp"
namespace cv
{
namespace opt_AVX2
{
class FAST_t_patternSize16_AVX2_Impl CV_FINAL: public FAST_t_patternSize16_AVX2
{
public:
FAST_t_patternSize16_AVX2_Impl(int _cols, int _threshold, bool _nonmax_suppression, const int* _pixel):
cols(_cols), nonmax_suppression(_nonmax_suppression), pixel(_pixel)
{
//patternSize = 16
t256c = (char)_threshold;
threshold = std::min(std::max(_threshold, 0), 255);
}
virtual void process(int &j, const uchar* &ptr, uchar* curr, int* cornerpos, int &ncorners) CV_OVERRIDE
{
static const __m256i delta256 = _mm256_broadcastsi128_si256(_mm_set1_epi8((char)(-128))), K16_256 = _mm256_broadcastsi128_si256(_mm_set1_epi8((char)8));
const __m256i t256 = _mm256_broadcastsi128_si256(_mm_set1_epi8(t256c));
for (; j < cols - 32 - 3; j += 32, ptr += 32)
{
__m256i m0, m1;
__m256i v0 = _mm256_loadu_si256((const __m256i*)ptr);
__m256i v1 = _mm256_xor_si256(_mm256_subs_epu8(v0, t256), delta256);
v0 = _mm256_xor_si256(_mm256_adds_epu8(v0, t256), delta256);
__m256i x0 = _mm256_sub_epi8(_mm256_loadu_si256((const __m256i*)(ptr + pixel[0])), delta256);
__m256i x1 = _mm256_sub_epi8(_mm256_loadu_si256((const __m256i*)(ptr + pixel[4])), delta256);
__m256i x2 = _mm256_sub_epi8(_mm256_loadu_si256((const __m256i*)(ptr + pixel[8])), delta256);
__m256i x3 = _mm256_sub_epi8(_mm256_loadu_si256((const __m256i*)(ptr + pixel[12])), delta256);
m0 = _mm256_and_si256(_mm256_cmpgt_epi8(x0, v0), _mm256_cmpgt_epi8(x1, v0));
m1 = _mm256_and_si256(_mm256_cmpgt_epi8(v1, x0), _mm256_cmpgt_epi8(v1, x1));
m0 = _mm256_or_si256(m0, _mm256_and_si256(_mm256_cmpgt_epi8(x1, v0), _mm256_cmpgt_epi8(x2, v0)));
m1 = _mm256_or_si256(m1, _mm256_and_si256(_mm256_cmpgt_epi8(v1, x1), _mm256_cmpgt_epi8(v1, x2)));
m0 = _mm256_or_si256(m0, _mm256_and_si256(_mm256_cmpgt_epi8(x2, v0), _mm256_cmpgt_epi8(x3, v0)));
m1 = _mm256_or_si256(m1, _mm256_and_si256(_mm256_cmpgt_epi8(v1, x2), _mm256_cmpgt_epi8(v1, x3)));
m0 = _mm256_or_si256(m0, _mm256_and_si256(_mm256_cmpgt_epi8(x3, v0), _mm256_cmpgt_epi8(x0, v0)));
m1 = _mm256_or_si256(m1, _mm256_and_si256(_mm256_cmpgt_epi8(v1, x3), _mm256_cmpgt_epi8(v1, x0)));
m0 = _mm256_or_si256(m0, m1);
unsigned int mask = _mm256_movemask_epi8(m0); //unsigned is important!
if (mask == 0){
continue;
}
if ((mask & 0xffff) == 0)
{
j -= 16;
ptr -= 16;
continue;
}
__m256i c0 = _mm256_setzero_si256(), c1 = c0, max0 = c0, max1 = c0;
for (int k = 0; k < 25; k++)
{
__m256i x = _mm256_xor_si256(_mm256_loadu_si256((const __m256i*)(ptr + pixel[k])), delta256);
m0 = _mm256_cmpgt_epi8(x, v0);
m1 = _mm256_cmpgt_epi8(v1, x);
c0 = _mm256_and_si256(_mm256_sub_epi8(c0, m0), m0);
c1 = _mm256_and_si256(_mm256_sub_epi8(c1, m1), m1);
max0 = _mm256_max_epu8(max0, c0);
max1 = _mm256_max_epu8(max1, c1);
}
max0 = _mm256_max_epu8(max0, max1);
unsigned int m = _mm256_movemask_epi8(_mm256_cmpgt_epi8(max0, K16_256));
for (int k = 0; m > 0 && k < 32; k++, m >>= 1)
if (m & 1)
{
cornerpos[ncorners++] = j + k;
if (nonmax_suppression)
{
short d[25];
for (int q = 0; q < 25; q++)
d[q] = (short)(ptr[k] - ptr[k + pixel[q]]);
v_int16x8 q0 = v_setall_s16(-1000), q1 = v_setall_s16(1000);
for (int q = 0; q < 16; q += 8)
{
v_int16x8 v0_ = v_load(d + q + 1);
v_int16x8 v1_ = v_load(d + q + 2);
v_int16x8 a = v_min(v0_, v1_);
v_int16x8 b = v_max(v0_, v1_);
v0_ = v_load(d + q + 3);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q + 4);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q + 5);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q + 6);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q + 7);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q + 8);
a = v_min(a, v0_);
b = v_max(b, v0_);
v0_ = v_load(d + q);
q0 = v_max(q0, v_min(a, v0_));
q1 = v_min(q1, v_max(b, v0_));
v0_ = v_load(d + q + 9);
q0 = v_max(q0, v_min(a, v0_));
q1 = v_min(q1, v_max(b, v0_));
}
q0 = v_max(q0, v_setzero_s16() - q1);
curr[j + k] = (uchar)(v_reduce_max(q0) - 1);
}
}
}
_mm256_zeroupper();
}
virtual ~FAST_t_patternSize16_AVX2_Impl() CV_OVERRIDE {};
private:
int cols;
char t256c;
int threshold;
bool nonmax_suppression;
const int* pixel;
};
Ptr<FAST_t_patternSize16_AVX2> FAST_t_patternSize16_AVX2::getImpl(int _cols, int _threshold, bool _nonmax_suppression, const int* _pixel)
{
return Ptr<FAST_t_patternSize16_AVX2>(new FAST_t_patternSize16_AVX2_Impl(_cols, _threshold, _nonmax_suppression, _pixel));
}
}
}

View File

@@ -0,0 +1,613 @@
/* This is FAST corner detector, contributed to OpenCV by the author, Edward Rosten.
Below is the original copyright and the references */
/*
Copyright (c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Machine learning for high-speed corner detection,
E. Rosten and T. Drummond, ECCV 2006
* Faster and better: A machine learning approach to corner detection
E. Rosten, R. Porter and T. Drummond, PAMI, 2009
*/
#include "precomp.hpp"
#include "fast.hpp"
#include "fast_score.hpp"
#include "opencl_kernels_features2d.hpp"
#include "hal_replacement.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include "opencv2/core/utils/buffer_area.private.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
template<int patternSize>
void FAST_t(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
{
Mat img = _img.getMat();
const int K = patternSize/2, N = patternSize + K + 1;
int i, j, k, pixel[25];
makeOffsets(pixel, (int)img.step, patternSize);
#if CV_SIMD128
const int quarterPatternSize = patternSize/4;
v_uint8x16 delta = v_setall_u8(0x80), t = v_setall_u8((char)threshold), K16 = v_setall_u8((char)K);
#if CV_TRY_AVX2
Ptr<opt_AVX2::FAST_t_patternSize16_AVX2> fast_t_impl_avx2;
if(CV_CPU_HAS_SUPPORT_AVX2)
fast_t_impl_avx2 = opt_AVX2::FAST_t_patternSize16_AVX2::getImpl(img.cols, threshold, nonmax_suppression, pixel);
#endif
#endif
keypoints.clear();
threshold = std::min(std::max(threshold, 0), 255);
uchar threshold_tab[512];
for( i = -255; i <= 255; i++ )
threshold_tab[i+255] = (uchar)(i < -threshold ? 1 : i > threshold ? 2 : 0);
uchar* buf[3] = { 0 };
int* cpbuf[3] = { 0 };
utils::BufferArea area;
for (unsigned idx = 0; idx < 3; ++idx)
{
area.allocate(buf[idx], img.cols);
area.allocate(cpbuf[idx], img.cols + 1);
}
area.commit();
for (unsigned idx = 0; idx < 3; ++idx)
{
memset(buf[idx], 0, img.cols);
}
for(i = 3; i < img.rows-2; i++)
{
const uchar* ptr = img.ptr<uchar>(i) + 3;
uchar* curr = buf[(i - 3)%3];
int* cornerpos = cpbuf[(i - 3)%3] + 1; // cornerpos[-1] is used to store a value
memset(curr, 0, img.cols);
int ncorners = 0;
if( i < img.rows - 3 )
{
j = 3;
#if CV_SIMD128
{
if( patternSize == 16 )
{
#if CV_TRY_AVX2
if (fast_t_impl_avx2)
fast_t_impl_avx2->process(j, ptr, curr, cornerpos, ncorners);
#endif
//vz if (j <= (img.cols - 27)) //it doesn't make sense using vectors for less than 8 elements
{
for (; j < img.cols - 16 - 3; j += 16, ptr += 16)
{
v_uint8x16 v = v_load(ptr);
v_int8x16 v0 = v_reinterpret_as_s8((v + t) ^ delta);
v_int8x16 v1 = v_reinterpret_as_s8((v - t) ^ delta);
v_int8x16 x0 = v_reinterpret_as_s8(v_sub_wrap(v_load(ptr + pixel[0]), delta));
v_int8x16 x1 = v_reinterpret_as_s8(v_sub_wrap(v_load(ptr + pixel[quarterPatternSize]), delta));
v_int8x16 x2 = v_reinterpret_as_s8(v_sub_wrap(v_load(ptr + pixel[2*quarterPatternSize]), delta));
v_int8x16 x3 = v_reinterpret_as_s8(v_sub_wrap(v_load(ptr + pixel[3*quarterPatternSize]), delta));
v_int8x16 m0, m1;
m0 = (v0 < x0) & (v0 < x1);
m1 = (x0 < v1) & (x1 < v1);
m0 = m0 | ((v0 < x1) & (v0 < x2));
m1 = m1 | ((x1 < v1) & (x2 < v1));
m0 = m0 | ((v0 < x2) & (v0 < x3));
m1 = m1 | ((x2 < v1) & (x3 < v1));
m0 = m0 | ((v0 < x3) & (v0 < x0));
m1 = m1 | ((x3 < v1) & (x0 < v1));
m0 = m0 | m1;
if( !v_check_any(m0) )
continue;
if( !v_check_any(v_combine_low(m0, m0)) )
{
j -= 8;
ptr -= 8;
continue;
}
v_int8x16 c0 = v_setzero_s8();
v_int8x16 c1 = v_setzero_s8();
v_uint8x16 max0 = v_setzero_u8();
v_uint8x16 max1 = v_setzero_u8();
for( k = 0; k < N; k++ )
{
v_int8x16 x = v_reinterpret_as_s8(v_load((ptr + pixel[k])) ^ delta);
m0 = v0 < x;
m1 = x < v1;
c0 = v_sub_wrap(c0, m0) & m0;
c1 = v_sub_wrap(c1, m1) & m1;
max0 = v_max(max0, v_reinterpret_as_u8(c0));
max1 = v_max(max1, v_reinterpret_as_u8(c1));
}
max0 = K16 < v_max(max0, max1);
unsigned int m = v_signmask(v_reinterpret_as_s8(max0));
for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
{
if( m & 1 )
{
cornerpos[ncorners++] = j+k;
if(nonmax_suppression)
{
short d[25];
for (int _k = 0; _k < 25; _k++)
d[_k] = (short)(ptr[k] - ptr[k + pixel[_k]]);
v_int16x8 a0, b0, a1, b1;
a0 = b0 = a1 = b1 = v_load(d + 8);
for(int shift = 0; shift < 8; ++shift)
{
v_int16x8 v_nms = v_load(d + shift);
a0 = v_min(a0, v_nms);
b0 = v_max(b0, v_nms);
v_nms = v_load(d + 9 + shift);
a1 = v_min(a1, v_nms);
b1 = v_max(b1, v_nms);
}
curr[j + k] = (uchar)(v_reduce_max(v_max(v_max(a0, a1), v_setzero_s16() - v_min(b0, b1))) - 1);
}
}
}
}
}
}
}
#endif
for( ; j < img.cols - 3; j++, ptr++ )
{
int v = ptr[0];
const uchar* tab = &threshold_tab[0] - v + 255;
int d = tab[ptr[pixel[0]]] | tab[ptr[pixel[8]]];
if( d == 0 )
continue;
d &= tab[ptr[pixel[2]]] | tab[ptr[pixel[10]]];
d &= tab[ptr[pixel[4]]] | tab[ptr[pixel[12]]];
d &= tab[ptr[pixel[6]]] | tab[ptr[pixel[14]]];
if( d == 0 )
continue;
d &= tab[ptr[pixel[1]]] | tab[ptr[pixel[9]]];
d &= tab[ptr[pixel[3]]] | tab[ptr[pixel[11]]];
d &= tab[ptr[pixel[5]]] | tab[ptr[pixel[13]]];
d &= tab[ptr[pixel[7]]] | tab[ptr[pixel[15]]];
if( d & 1 )
{
int vt = v - threshold, count = 0;
for( k = 0; k < N; k++ )
{
int x = ptr[pixel[k]];
if(x < vt)
{
if( ++count > K )
{
cornerpos[ncorners++] = j;
if(nonmax_suppression)
curr[j] = (uchar)cornerScore<patternSize>(ptr, pixel, threshold);
break;
}
}
else
count = 0;
}
}
if( d & 2 )
{
int vt = v + threshold, count = 0;
for( k = 0; k < N; k++ )
{
int x = ptr[pixel[k]];
if(x > vt)
{
if( ++count > K )
{
cornerpos[ncorners++] = j;
if(nonmax_suppression)
curr[j] = (uchar)cornerScore<patternSize>(ptr, pixel, threshold);
break;
}
}
else
count = 0;
}
}
}
}
cornerpos[-1] = ncorners;
if( i == 3 )
continue;
const uchar* prev = buf[(i - 4 + 3)%3];
const uchar* pprev = buf[(i - 5 + 3)%3];
cornerpos = cpbuf[(i - 4 + 3)%3] + 1; // cornerpos[-1] is used to store a value
ncorners = cornerpos[-1];
for( k = 0; k < ncorners; k++ )
{
j = cornerpos[k];
int score = prev[j];
if( !nonmax_suppression ||
(score > prev[j+1] && score > prev[j-1] &&
score > pprev[j-1] && score > pprev[j] && score > pprev[j+1] &&
score > curr[j-1] && score > curr[j] && score > curr[j+1]) )
{
keypoints.push_back(KeyPoint((float)j, (float)(i-1), 7.f, -1, (float)score));
}
}
}
}
#ifdef HAVE_OPENCL
template<typename pt>
struct cmp_pt
{
bool operator ()(const pt& a, const pt& b) const { return a.y < b.y || (a.y == b.y && a.x < b.x); }
};
static bool ocl_FAST( InputArray _img, std::vector<KeyPoint>& keypoints,
int threshold, bool nonmax_suppression, int maxKeypoints )
{
UMat img = _img.getUMat();
if( img.cols < 7 || img.rows < 7 )
return false;
size_t globalsize[] = { (size_t)img.cols-6, (size_t)img.rows-6 };
ocl::Kernel fastKptKernel("FAST_findKeypoints", ocl::features2d::fast_oclsrc);
if (fastKptKernel.empty())
return false;
UMat kp1(1, maxKeypoints*2+1, CV_32S);
UMat ucounter1(kp1, Rect(0,0,1,1));
ucounter1.setTo(Scalar::all(0));
if( !fastKptKernel.args(ocl::KernelArg::ReadOnly(img),
ocl::KernelArg::PtrReadWrite(kp1),
maxKeypoints, threshold).run(2, globalsize, 0, true))
return false;
Mat mcounter;
ucounter1.copyTo(mcounter);
int i, counter = mcounter.at<int>(0);
counter = std::min(counter, maxKeypoints);
keypoints.clear();
if( counter == 0 )
return true;
if( !nonmax_suppression )
{
Mat m;
kp1(Rect(0, 0, counter*2+1, 1)).copyTo(m);
const Point* pt = (const Point*)(m.ptr<int>() + 1);
for( i = 0; i < counter; i++ )
keypoints.push_back(KeyPoint((float)pt[i].x, (float)pt[i].y, 7.f, -1, 1.f));
}
else
{
UMat kp2(1, maxKeypoints*3+1, CV_32S);
UMat ucounter2 = kp2(Rect(0,0,1,1));
ucounter2.setTo(Scalar::all(0));
ocl::Kernel fastNMSKernel("FAST_nonmaxSupression", ocl::features2d::fast_oclsrc);
if (fastNMSKernel.empty())
return false;
size_t globalsize_nms[] = { (size_t)counter };
if( !fastNMSKernel.args(ocl::KernelArg::PtrReadOnly(kp1),
ocl::KernelArg::PtrReadWrite(kp2),
ocl::KernelArg::ReadOnly(img),
counter, counter).run(1, globalsize_nms, 0, true))
return false;
Mat m2;
kp2(Rect(0, 0, counter*3+1, 1)).copyTo(m2);
Point3i* pt2 = (Point3i*)(m2.ptr<int>() + 1);
int newcounter = std::min(m2.at<int>(0), counter);
std::sort(pt2, pt2 + newcounter, cmp_pt<Point3i>());
for( i = 0; i < newcounter; i++ )
keypoints.push_back(KeyPoint((float)pt2[i].x, (float)pt2[i].y, 7.f, -1, (float)pt2[i].z));
}
return true;
}
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_FAST_CORNERS>(int w, int h) { return w*h < 800 * 600; }
}
static bool openvx_FAST(InputArray _img, std::vector<KeyPoint>& keypoints,
int _threshold, bool nonmaxSuppression, int type)
{
using namespace ivx;
// Nonmax suppression is done differently in OpenCV than in OpenVX
// 9/16 is the only supported mode in OpenVX
if(nonmaxSuppression || type != FastFeatureDetector::TYPE_9_16)
return false;
Mat imgMat = _img.getMat();
if(imgMat.empty() || imgMat.type() != CV_8UC1)
return false;
if (ovx::skipSmallImages<VX_KERNEL_FAST_CORNERS>(imgMat.cols, imgMat.rows))
return false;
try
{
Context context = ovx::getOpenVXContext();
Image img = Image::createFromHandle(context, Image::matTypeToFormat(imgMat.type()),
Image::createAddressing(imgMat), (void*)imgMat.data);
ivx::Scalar threshold = ivx::Scalar::create<VX_TYPE_FLOAT32>(context, _threshold);
vx_size capacity = imgMat.cols * imgMat.rows;
Array corners = Array::create(context, VX_TYPE_KEYPOINT, capacity);
ivx::Scalar numCorners = ivx::Scalar::create<VX_TYPE_SIZE>(context, 0);
IVX_CHECK_STATUS(vxuFastCorners(context, img, threshold, (vx_bool)nonmaxSuppression, corners, numCorners));
size_t nPoints = numCorners.getValue<vx_size>();
keypoints.clear(); keypoints.reserve(nPoints);
std::vector<vx_keypoint_t> vxCorners;
corners.copyTo(vxCorners);
for(size_t i = 0; i < nPoints; i++)
{
vx_keypoint_t kp = vxCorners[i];
//if nonmaxSuppression is false, kp.strength is undefined
keypoints.push_back(KeyPoint((float)kp.x, (float)kp.y, 7.f, -1, kp.strength));
}
#ifdef VX_VERSION_1_1
//we should take user memory back before release
//(it's not done automatically according to standard)
img.swapHandle();
#endif
}
catch (const RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
static inline int hal_FAST(cv::Mat& src, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression, FastFeatureDetector::DetectorType type)
{
if (threshold > 20)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
cv::Mat scores(src.size(), src.type());
int error = cv_hal_FAST_dense(src.data, src.step, scores.data, scores.step, src.cols, src.rows, type);
if (error != CV_HAL_ERROR_OK)
return error;
cv::Mat suppressedScores(src.size(), src.type());
if (nonmax_suppression)
{
error = cv_hal_FAST_NMS(scores.data, scores.step, suppressedScores.data, suppressedScores.step, scores.cols, scores.rows);
if (error != CV_HAL_ERROR_OK)
return error;
}
else
{
suppressedScores = scores;
}
if (!threshold && nonmax_suppression) threshold = 1;
cv::KeyPoint kpt(0, 0, 7.f, -1, 0);
unsigned uthreshold = (unsigned) threshold;
int ofs = 3;
int stride = (int)suppressedScores.step;
const unsigned char* pscore = suppressedScores.data;
keypoints.clear();
for (int y = ofs; y + ofs < suppressedScores.rows; ++y)
{
kpt.pt.y = (float)(y);
for (int x = ofs; x + ofs < suppressedScores.cols; ++x)
{
unsigned score = pscore[y * stride + x];
if (score > uthreshold)
{
kpt.pt.x = (float)(x);
kpt.response = (nonmax_suppression != 0) ? (float)((int)score - 1) : 0.f;
keypoints.push_back(kpt);
}
}
}
return CV_HAL_ERROR_OK;
}
void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression, FastFeatureDetector::DetectorType type)
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_img.isUMat() && type == FastFeatureDetector::TYPE_9_16,
ocl_FAST(_img, keypoints, threshold, nonmax_suppression, 10000));
cv::Mat img = _img.getMat();
CALL_HAL(fast_dense, hal_FAST, img, keypoints, threshold, nonmax_suppression, type);
size_t keypoints_count;
CALL_HAL(fast, cv_hal_FAST, img.data, img.step, img.cols, img.rows,
(uchar*)(keypoints.data()), &keypoints_count, threshold, nonmax_suppression, type);
CV_OVX_RUN(true,
openvx_FAST(_img, keypoints, threshold, nonmax_suppression, type))
switch(type) {
case FastFeatureDetector::TYPE_5_8:
FAST_t<8>(_img, keypoints, threshold, nonmax_suppression);
break;
case FastFeatureDetector::TYPE_7_12:
FAST_t<12>(_img, keypoints, threshold, nonmax_suppression);
break;
case FastFeatureDetector::TYPE_9_16:
FAST_t<16>(_img, keypoints, threshold, nonmax_suppression);
break;
}
}
void FAST(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
{
CV_INSTRUMENT_REGION();
FAST(_img, keypoints, threshold, nonmax_suppression, FastFeatureDetector::TYPE_9_16);
}
class FastFeatureDetector_Impl CV_FINAL : public FastFeatureDetector
{
public:
FastFeatureDetector_Impl( int _threshold, bool _nonmaxSuppression, FastFeatureDetector::DetectorType _type )
: threshold(_threshold), nonmaxSuppression(_nonmaxSuppression), type(_type)
{}
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
if(_image.empty())
{
keypoints.clear();
return;
}
Mat mask = _mask.getMat(), grayImage;
UMat ugrayImage;
_InputArray gray = _image;
if( _image.type() != CV_8U )
{
_OutputArray ogray = _image.isUMat() ? _OutputArray(ugrayImage) : _OutputArray(grayImage);
cvtColor( _image, ogray, COLOR_BGR2GRAY );
gray = ogray;
}
FAST( gray, keypoints, threshold, nonmaxSuppression, type );
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
void set(int prop, double value)
{
if(prop == THRESHOLD)
threshold = cvRound(value);
else if(prop == NONMAX_SUPPRESSION)
nonmaxSuppression = value != 0;
else if(prop == FAST_N)
type = static_cast<FastFeatureDetector::DetectorType>(cvRound(value));
else
CV_Error(Error::StsBadArg, "");
}
double get(int prop) const
{
if(prop == THRESHOLD)
return threshold;
if(prop == NONMAX_SUPPRESSION)
return nonmaxSuppression;
if(prop == FAST_N)
return static_cast<int>(type);
CV_Error(Error::StsBadArg, "");
return 0;
}
void setThreshold(int threshold_) CV_OVERRIDE { threshold = threshold_; }
int getThreshold() const CV_OVERRIDE { return threshold; }
void setNonmaxSuppression(bool f) CV_OVERRIDE { nonmaxSuppression = f; }
bool getNonmaxSuppression() const CV_OVERRIDE { return nonmaxSuppression; }
void setType(FastFeatureDetector::DetectorType type_) CV_OVERRIDE{ type = type_; }
FastFeatureDetector::DetectorType getType() const CV_OVERRIDE{ return type; }
int threshold;
bool nonmaxSuppression;
FastFeatureDetector::DetectorType type;
};
Ptr<FastFeatureDetector> FastFeatureDetector::create( int threshold, bool nonmaxSuppression, FastFeatureDetector::DetectorType type )
{
return makePtr<FastFeatureDetector_Impl>(threshold, nonmaxSuppression, type);
}
String FastFeatureDetector::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".FastFeatureDetector");
}
}

View File

@@ -0,0 +1,62 @@
/* This is FAST corner detector, contributed to OpenCV by the author, Edward Rosten.
Below is the original copyright and the references */
/*
Copyright (c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Machine learning for high-speed corner detection,
E. Rosten and T. Drummond, ECCV 2006
* Faster and better: A machine learning approach to corner detection
E. Rosten, R. Porter and T. Drummond, PAMI, 2009
*/
#ifndef OPENCV_FEATURES2D_FAST_HPP
#define OPENCV_FEATURES2D_FAST_HPP
namespace cv
{
namespace opt_AVX2
{
#if CV_TRY_AVX2
class FAST_t_patternSize16_AVX2
{
public:
static Ptr<FAST_t_patternSize16_AVX2> getImpl(int _cols, int _threshold, bool _nonmax_suppression, const int* _pixel);
virtual void process(int &j, const uchar* &ptr, uchar* curr, int* cornerpos, int &ncorners) = 0;
virtual ~FAST_t_patternSize16_AVX2() {};
};
#endif
}
}
#endif

View File

@@ -0,0 +1,366 @@
/* This is FAST corner detector, contributed to OpenCV by the author, Edward Rosten.
Below is the original copyright and the references */
/*
Copyright (c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Machine learning for high-speed corner detection,
E. Rosten and T. Drummond, ECCV 2006
* Faster and better: A machine learning approach to corner detection
E. Rosten, R. Porter and T. Drummond, PAMI, 2009
*/
#include "fast_score.hpp"
#include "opencv2/core/hal/intrin.hpp"
#define VERIFY_CORNERS 0
namespace cv {
void makeOffsets(int pixel[25], int rowStride, int patternSize)
{
static const int offsets16[][2] =
{
{0, 3}, { 1, 3}, { 2, 2}, { 3, 1}, { 3, 0}, { 3, -1}, { 2, -2}, { 1, -3},
{0, -3}, {-1, -3}, {-2, -2}, {-3, -1}, {-3, 0}, {-3, 1}, {-2, 2}, {-1, 3}
};
static const int offsets12[][2] =
{
{0, 2}, { 1, 2}, { 2, 1}, { 2, 0}, { 2, -1}, { 1, -2},
{0, -2}, {-1, -2}, {-2, -1}, {-2, 0}, {-2, 1}, {-1, 2}
};
static const int offsets8[][2] =
{
{0, 1}, { 1, 1}, { 1, 0}, { 1, -1},
{0, -1}, {-1, -1}, {-1, 0}, {-1, 1}
};
const int (*offsets)[2] = patternSize == 16 ? offsets16 :
patternSize == 12 ? offsets12 :
patternSize == 8 ? offsets8 : 0;
CV_Assert(pixel && offsets);
int k = 0;
for( ; k < patternSize; k++ )
pixel[k] = offsets[k][0] + offsets[k][1] * rowStride;
for( ; k < 25; k++ )
pixel[k] = pixel[k - patternSize];
}
#if VERIFY_CORNERS
static void testCorner(const uchar* ptr, const int pixel[], int K, int N, int threshold) {
// check that with the computed "threshold" the pixel is still a corner
// and that with the increased-by-1 "threshold" the pixel is not a corner anymore
for( int delta = 0; delta <= 1; delta++ )
{
int v0 = std::min(ptr[0] + threshold + delta, 255);
int v1 = std::max(ptr[0] - threshold - delta, 0);
int c0 = 0, c1 = 0;
for( int k = 0; k < N; k++ )
{
int x = ptr[pixel[k]];
if(x > v0)
{
if( ++c0 > K )
break;
c1 = 0;
}
else if( x < v1 )
{
if( ++c1 > K )
break;
c0 = 0;
}
else
{
c0 = c1 = 0;
}
}
CV_Assert( (delta == 0 && std::max(c0, c1) > K) ||
(delta == 1 && std::max(c0, c1) <= K) );
}
}
#endif
template<>
int cornerScore<16>(const uchar* ptr, const int pixel[], int threshold)
{
const int K = 8, N = K*3 + 1;
int k, v = ptr[0];
short d[N];
for( k = 0; k < N; k++ )
d[k] = (short)(v - ptr[pixel[k]]);
#if CV_SIMD128
if (true)
{
v_int16x8 q0 = v_setall_s16(-1000), q1 = v_setall_s16(1000);
for (k = 0; k < 16; k += 8)
{
v_int16x8 v0 = v_load(d + k + 1);
v_int16x8 v1 = v_load(d + k + 2);
v_int16x8 a = v_min(v0, v1);
v_int16x8 b = v_max(v0, v1);
v0 = v_load(d + k + 3);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 4);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 5);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 6);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 7);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 8);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k);
q0 = v_max(q0, v_min(a, v0));
q1 = v_min(q1, v_max(b, v0));
v0 = v_load(d + k + 9);
q0 = v_max(q0, v_min(a, v0));
q1 = v_min(q1, v_max(b, v0));
}
q0 = v_max(q0, v_setzero_s16() - q1);
threshold = v_reduce_max(q0) - 1;
}
else
#endif
{
int a0 = threshold;
for( k = 0; k < 16; k += 2 )
{
int a = std::min((int)d[k+1], (int)d[k+2]);
a = std::min(a, (int)d[k+3]);
if( a <= a0 )
continue;
a = std::min(a, (int)d[k+4]);
a = std::min(a, (int)d[k+5]);
a = std::min(a, (int)d[k+6]);
a = std::min(a, (int)d[k+7]);
a = std::min(a, (int)d[k+8]);
a0 = std::max(a0, std::min(a, (int)d[k]));
a0 = std::max(a0, std::min(a, (int)d[k+9]));
}
int b0 = -a0;
for( k = 0; k < 16; k += 2 )
{
int b = std::max((int)d[k+1], (int)d[k+2]);
b = std::max(b, (int)d[k+3]);
b = std::max(b, (int)d[k+4]);
b = std::max(b, (int)d[k+5]);
if( b >= b0 )
continue;
b = std::max(b, (int)d[k+6]);
b = std::max(b, (int)d[k+7]);
b = std::max(b, (int)d[k+8]);
b0 = std::min(b0, std::max(b, (int)d[k]));
b0 = std::min(b0, std::max(b, (int)d[k+9]));
}
threshold = -b0 - 1;
}
#if VERIFY_CORNERS
testCorner(ptr, pixel, K, N, threshold);
#endif
return threshold;
}
template<>
int cornerScore<12>(const uchar* ptr, const int pixel[], int threshold)
{
const int K = 6, N = K*3 + 1;
int k, v = ptr[0];
short d[N + 4];
for( k = 0; k < N; k++ )
d[k] = (short)(v - ptr[pixel[k]]);
#if CV_SIMD128
for( k = 0; k < 4; k++ )
d[N+k] = d[k];
#endif
#if CV_SIMD128
if (true)
{
v_int16x8 q0 = v_setall_s16(-1000), q1 = v_setall_s16(1000);
for (k = 0; k < 16; k += 8)
{
v_int16x8 v0 = v_load(d + k + 1);
v_int16x8 v1 = v_load(d + k + 2);
v_int16x8 a = v_min(v0, v1);
v_int16x8 b = v_max(v0, v1);
v0 = v_load(d + k + 3);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 4);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 5);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k + 6);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + k);
q0 = v_max(q0, v_min(a, v0));
q1 = v_min(q1, v_max(b, v0));
v0 = v_load(d + k + 7);
q0 = v_max(q0, v_min(a, v0));
q1 = v_min(q1, v_max(b, v0));
}
q0 = v_max(q0, v_setzero_s16() - q1);
threshold = v_reduce_max(q0) - 1;
}
else
#endif
{
int a0 = threshold;
for( k = 0; k < 12; k += 2 )
{
int a = std::min((int)d[k+1], (int)d[k+2]);
if( a <= a0 )
continue;
a = std::min(a, (int)d[k+3]);
a = std::min(a, (int)d[k+4]);
a = std::min(a, (int)d[k+5]);
a = std::min(a, (int)d[k+6]);
a0 = std::max(a0, std::min(a, (int)d[k]));
a0 = std::max(a0, std::min(a, (int)d[k+7]));
}
int b0 = -a0;
for( k = 0; k < 12; k += 2 )
{
int b = std::max((int)d[k+1], (int)d[k+2]);
b = std::max(b, (int)d[k+3]);
b = std::max(b, (int)d[k+4]);
if( b >= b0 )
continue;
b = std::max(b, (int)d[k+5]);
b = std::max(b, (int)d[k+6]);
b0 = std::min(b0, std::max(b, (int)d[k]));
b0 = std::min(b0, std::max(b, (int)d[k+7]));
}
threshold = -b0-1;
}
#if VERIFY_CORNERS
testCorner(ptr, pixel, K, N, threshold);
#endif
return threshold;
}
template<>
int cornerScore<8>(const uchar* ptr, const int pixel[], int threshold)
{
const int K = 4, N = K * 3 + 1;
int k, v = ptr[0];
short d[N];
for (k = 0; k < N; k++)
d[k] = (short)(v - ptr[pixel[k]]);
#if CV_SIMD128 \
&& (!defined(CV_SIMD128_CPP) || (!defined(__GNUC__) || __GNUC__ != 5)) // "movdqa" bug on "v_load(d + 1)" line (Ubuntu 16.04 + GCC 5.4)
if (true)
{
v_int16x8 v0 = v_load(d + 1);
v_int16x8 v1 = v_load(d + 2);
v_int16x8 a = v_min(v0, v1);
v_int16x8 b = v_max(v0, v1);
v0 = v_load(d + 3);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d + 4);
a = v_min(a, v0);
b = v_max(b, v0);
v0 = v_load(d);
v_int16x8 q0 = v_min(a, v0);
v_int16x8 q1 = v_max(b, v0);
v0 = v_load(d + 5);
q0 = v_max(q0, v_min(a, v0));
q1 = v_min(q1, v_max(b, v0));
q0 = v_max(q0, v_setzero_s16() - q1);
threshold = v_reduce_max(q0) - 1;
}
else
#endif
{
int a0 = threshold;
for( k = 0; k < 8; k += 2 )
{
int a = std::min((int)d[k+1], (int)d[k+2]);
if( a <= a0 )
continue;
a = std::min(a, (int)d[k+3]);
a = std::min(a, (int)d[k+4]);
a0 = std::max(a0, std::min(a, (int)d[k]));
a0 = std::max(a0, std::min(a, (int)d[k+5]));
}
int b0 = -a0;
for( k = 0; k < 8; k += 2 )
{
int b = std::max((int)d[k+1], (int)d[k+2]);
b = std::max(b, (int)d[k+3]);
if( b >= b0 )
continue;
b = std::max(b, (int)d[k+4]);
b0 = std::min(b0, std::max(b, (int)d[k]));
b0 = std::min(b0, std::max(b, (int)d[k+5]));
}
threshold = -b0-1;
}
#if VERIFY_CORNERS
testCorner(ptr, pixel, K, N, threshold);
#endif
return threshold;
}
} // namespace cv

View File

@@ -0,0 +1,62 @@
/* This is FAST corner detector, contributed to OpenCV by the author, Edward Rosten.
Below is the original copyright and the references */
/*
Copyright (c) 2006, 2008 Edward Rosten
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
*Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
*Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
*Neither the name of the University of Cambridge nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The references are:
* Machine learning for high-speed corner detection,
E. Rosten and T. Drummond, ECCV 2006
* Faster and better: A machine learning approach to corner detection
E. Rosten, R. Porter and T. Drummond, PAMI, 2009
*/
#ifndef __OPENCV_FEATURES_2D_FAST_HPP__
#define __OPENCV_FEATURES_2D_FAST_HPP__
#ifdef __cplusplus
#include "precomp.hpp"
namespace cv
{
void makeOffsets(int pixel[25], int row_stride, int patternSize);
template<int patternSize>
int cornerScore(const uchar* ptr, const int pixel[], int threshold);
}
#endif
#endif

View File

@@ -0,0 +1,224 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
using std::vector;
Feature2D::~Feature2D() {}
/*
* Detect keypoints in an image.
* image The image.
* keypoints The detected keypoints.
* mask Mask specifying where to look for keypoints (optional). Must be a char
* matrix with non-zero values in the region of interest.
*/
void Feature2D::detect( InputArray image,
std::vector<KeyPoint>& keypoints,
InputArray mask )
{
CV_INSTRUMENT_REGION();
if( image.empty() )
{
keypoints.clear();
return;
}
detectAndCompute(image, mask, keypoints, noArray(), false);
}
void Feature2D::detect( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays masks )
{
CV_INSTRUMENT_REGION();
int nimages = (int)images.total();
if (!masks.empty())
{
CV_Assert(masks.total() == (size_t)nimages);
}
keypoints.resize(nimages);
if (images.isMatVector())
{
for (int i = 0; i < nimages; i++)
{
detect(images.getMat(i), keypoints[i], masks.empty() ? noArray() : masks.getMat(i));
}
}
else
{
// assume UMats
for (int i = 0; i < nimages; i++)
{
detect(images.getUMat(i), keypoints[i], masks.empty() ? noArray() : masks.getUMat(i));
}
}
}
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
void Feature2D::compute( InputArray image,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors )
{
CV_INSTRUMENT_REGION();
if( image.empty() )
{
descriptors.release();
return;
}
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
void Feature2D::compute( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays descriptors )
{
CV_INSTRUMENT_REGION();
if( !descriptors.needed() )
return;
int nimages = (int)images.total();
CV_Assert( keypoints.size() == (size_t)nimages );
// resize descriptors to appropriate size and compute
if (descriptors.isMatVector())
{
vector<Mat>& vec = *(vector<Mat>*)descriptors.getObj();
vec.resize(nimages);
for (int i = 0; i < nimages; i++)
{
compute(images.getMat(i), keypoints[i], vec[i]);
}
}
else if (descriptors.isUMatVector())
{
vector<UMat>& vec = *(vector<UMat>*)descriptors.getObj();
vec.resize(nimages);
for (int i = 0; i < nimages; i++)
{
compute(images.getUMat(i), keypoints[i], vec[i]);
}
}
else
{
CV_Error(Error::StsBadArg, "descriptors must be vector<Mat> or vector<UMat>");
}
}
/* Detects keypoints and computes the descriptors */
void Feature2D::detectAndCompute( InputArray, InputArray,
std::vector<KeyPoint>&,
OutputArray,
bool )
{
CV_INSTRUMENT_REGION();
CV_Error(Error::StsNotImplemented, "");
}
void Feature2D::write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
write(fs);
}
void Feature2D::read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
read(fs.root());
}
void Feature2D::write( FileStorage&) const
{
}
void Feature2D::read( const FileNode&)
{
}
int Feature2D::descriptorSize() const
{
return 0;
}
int Feature2D::descriptorType() const
{
return CV_32F;
}
int Feature2D::defaultNorm() const
{
int tp = descriptorType();
return tp == CV_8U ? NORM_HAMMING : NORM_L2;
}
// Return true if detector object is empty
bool Feature2D::empty() const
{
return true;
}
String Feature2D::getDefaultName() const
{
return "Feature2D";
}
}

View File

@@ -0,0 +1,152 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
class GFTTDetector_Impl CV_FINAL : public GFTTDetector
{
public:
GFTTDetector_Impl( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize, int _gradientSize,
bool _useHarrisDetector, double _k )
: nfeatures(_nfeatures), qualityLevel(_qualityLevel), minDistance(_minDistance),
blockSize(_blockSize), gradSize(_gradientSize), useHarrisDetector(_useHarrisDetector), k(_k)
{
}
void setMaxFeatures(int maxFeatures) CV_OVERRIDE { nfeatures = maxFeatures; }
int getMaxFeatures() const CV_OVERRIDE { return nfeatures; }
void setQualityLevel(double qlevel) CV_OVERRIDE { qualityLevel = qlevel; }
double getQualityLevel() const CV_OVERRIDE { return qualityLevel; }
void setMinDistance(double minDistance_) CV_OVERRIDE { minDistance = minDistance_; }
double getMinDistance() const CV_OVERRIDE { return minDistance; }
void setBlockSize(int blockSize_) CV_OVERRIDE { blockSize = blockSize_; }
int getBlockSize() const CV_OVERRIDE { return blockSize; }
//void setGradientSize(int gradientSize_) { gradSize = gradientSize_; }
//int getGradientSize() { return gradSize; }
void setHarrisDetector(bool val) CV_OVERRIDE { useHarrisDetector = val; }
bool getHarrisDetector() const CV_OVERRIDE { return useHarrisDetector; }
void setK(double k_) CV_OVERRIDE { k = k_; }
double getK() const CV_OVERRIDE { return k; }
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
if(_image.empty())
{
keypoints.clear();
return;
}
std::vector<Point2f> corners;
std::vector<float> cornersQuality;
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
cornersQuality, blockSize, gradSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
cornersQuality, blockSize, gradSize, useHarrisDetector, k );
}
CV_Assert(corners.size() == cornersQuality.size());
keypoints.resize(corners.size());
for (size_t i = 0; i < corners.size(); i++)
keypoints[i] = KeyPoint(corners[i], (float)blockSize, -1, cornersQuality[i]);
}
int nfeatures;
double qualityLevel;
double minDistance;
int blockSize;
int gradSize;
bool useHarrisDetector;
double k;
};
Ptr<GFTTDetector> GFTTDetector::create( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize, int _gradientSize,
bool _useHarrisDetector, double _k )
{
return makePtr<GFTTDetector_Impl>(_nfeatures, _qualityLevel,
_minDistance, _blockSize, _gradientSize, _useHarrisDetector, _k);
}
Ptr<GFTTDetector> GFTTDetector::create( int _nfeatures, double _qualityLevel,
double _minDistance, int _blockSize,
bool _useHarrisDetector, double _k )
{
return makePtr<GFTTDetector_Impl>(_nfeatures, _qualityLevel,
_minDistance, _blockSize, 3, _useHarrisDetector, _k);
}
String GFTTDetector::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".GFTTDetector");
}
}

View File

@@ -0,0 +1,135 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_FEATURES2D_HAL_REPLACEMENT_HPP
#define OPENCV_FEATURES2D_HAL_REPLACEMENT_HPP
#include "opencv2/core/hal/interface.h"
#if defined __GNUC__
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wunused-parameter"
#elif defined _MSC_VER
# pragma warning( push )
# pragma warning( disable: 4100 )
#endif
//! @addtogroup features2d_hal_interface
//! @note Define your functions to override default implementations:
//! @code
//! #undef hal_add8u
//! #define hal_add8u my_add8u
//! @endcode
//! @{
/**
@brief Detects corners using the FAST algorithm, returns mask.
@param src_data,src_step Source image
@param dst_data,dst_step Destination mask
@param width,height Source image dimensions
@param type FAST type
*/
inline int hal_ni_FAST_dense(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step, int width, int height, cv::FastFeatureDetector::DetectorType type) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
//! @cond IGNORED
#define cv_hal_FAST_dense hal_ni_FAST_dense
//! @endcond
/**
@brief Non-maximum suppression for FAST_9_16.
@param src_data,src_step Source mask
@param dst_data,dst_step Destination mask after NMS
@param width,height Source mask dimensions
*/
inline int hal_ni_FAST_NMS(const uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step, int width, int height) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
//! @cond IGNORED
#define cv_hal_FAST_NMS hal_ni_FAST_NMS
//! @endcond
/**
@brief Detects corners using the FAST algorithm.
@param src_data,src_step Source image
@param width,height Source image dimensions
@param keypoints_data Pointer to keypoints
@param keypoints_count Count of keypoints
@param threshold Threshold for keypoint
@param nonmax_suppression Indicates if make nonmaxima suppression or not.
@param type FAST type
*/
inline int hal_ni_FAST(const uchar* src_data, size_t src_step, int width, int height, uchar* keypoints_data, size_t* keypoints_count, int threshold, bool nonmax_suppression, int /*cv::FastFeatureDetector::DetectorType*/ type) { return CV_HAL_ERROR_NOT_IMPLEMENTED; }
//! @cond IGNORED
#define cv_hal_FAST hal_ni_FAST
//! @endcond
//! @}
#if defined __GNUC__
# pragma GCC diagnostic pop
#elif defined _MSC_VER
# pragma warning( pop )
#endif
#include "custom_hal.hpp"
//! @cond IGNORED
#define CALL_HAL_RET(name, fun, retval, ...) \
int res = __CV_EXPAND(fun(__VA_ARGS__, &retval)); \
if (res == CV_HAL_ERROR_OK) \
return retval; \
else if (res != CV_HAL_ERROR_NOT_IMPLEMENTED) \
CV_Error_(cv::Error::StsInternal, \
("HAL implementation " CVAUX_STR(name) " ==> " CVAUX_STR(fun) " returned %d (0x%08x)", res, res));
#define CALL_HAL(name, fun, ...) \
{ \
int res = __CV_EXPAND(fun(__VA_ARGS__)); \
if (res == CV_HAL_ERROR_OK) \
return; \
else if (res != CV_HAL_ERROR_NOT_IMPLEMENTED) \
CV_Error_(cv::Error::StsInternal, \
("HAL implementation " CVAUX_STR(name) " ==> " CVAUX_STR(fun) " returned %d (0x%08x)", res, res)); \
}
//! @endcond
#endif

View File

@@ -0,0 +1,205 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
OpenCV wrapper of reference implementation of
[1] KAZE Features. Pablo F. Alcantarilla, Adrien Bartoli and Andrew J. Davison.
In European Conference on Computer Vision (ECCV), Fiorenze, Italy, October 2012
http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pdf
@author Eugene Khvedchenya <ekhvedchenya@gmail.com>
*/
#include "precomp.hpp"
#include "kaze/KAZEFeatures.h"
namespace cv
{
class KAZE_Impl CV_FINAL : public KAZE
{
public:
KAZE_Impl(bool _extended, bool _upright, float _threshold, int _octaves,
int _sublevels, KAZE::DiffusivityType _diffusivity)
: extended(_extended)
, upright(_upright)
, threshold(_threshold)
, octaves(_octaves)
, sublevels(_sublevels)
, diffusivity(_diffusivity)
{
}
virtual ~KAZE_Impl() CV_OVERRIDE {}
void setExtended(bool extended_) CV_OVERRIDE { extended = extended_; }
bool getExtended() const CV_OVERRIDE { return extended; }
void setUpright(bool upright_) CV_OVERRIDE { upright = upright_; }
bool getUpright() const CV_OVERRIDE { return upright; }
void setThreshold(double threshold_) CV_OVERRIDE { threshold = (float)threshold_; }
double getThreshold() const CV_OVERRIDE { return threshold; }
void setNOctaves(int octaves_) CV_OVERRIDE { octaves = octaves_; }
int getNOctaves() const CV_OVERRIDE { return octaves; }
void setNOctaveLayers(int octaveLayers_) CV_OVERRIDE { sublevels = octaveLayers_; }
int getNOctaveLayers() const CV_OVERRIDE { return sublevels; }
void setDiffusivity(KAZE::DiffusivityType diff_) CV_OVERRIDE{ diffusivity = diff_; }
KAZE::DiffusivityType getDiffusivity() const CV_OVERRIDE{ return diffusivity; }
// returns the descriptor size in bytes
int descriptorSize() const CV_OVERRIDE
{
return extended ? 128 : 64;
}
// returns the descriptor type
int descriptorType() const CV_OVERRIDE
{
return CV_32F;
}
// returns the default norm type
int defaultNorm() const CV_OVERRIDE
{
return NORM_L2;
}
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints) CV_OVERRIDE
{
CV_INSTRUMENT_REGION();
cv::Mat img = image.getMat();
if (img.channels() > 1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
if ( img.depth() == CV_32F )
img1_32 = img;
else if ( img.depth() == CV_8U )
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
else if ( img.depth() == CV_16U )
img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);
CV_Assert( ! img1_32.empty() );
KAZEOptions options;
options.img_width = img.cols;
options.img_height = img.rows;
options.extended = extended;
options.upright = upright;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
KAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat desc;
impl.Feature_Description(keypoints, desc);
desc.copyTo(descriptors);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
void write(FileStorage& fs) const CV_OVERRIDE
{
writeFormat(fs);
fs << "extended" << (int)extended;
fs << "upright" << (int)upright;
fs << "threshold" << threshold;
fs << "octaves" << octaves;
fs << "sublevels" << sublevels;
fs << "diffusivity" << diffusivity;
}
void read(const FileNode& fn) CV_OVERRIDE
{
extended = (int)fn["extended"] != 0;
upright = (int)fn["upright"] != 0;
threshold = (float)fn["threshold"];
octaves = (int)fn["octaves"];
sublevels = (int)fn["sublevels"];
diffusivity = static_cast<KAZE::DiffusivityType>((int)fn["diffusivity"]);
}
bool extended;
bool upright;
float threshold;
int octaves;
int sublevels;
KAZE::DiffusivityType diffusivity;
};
Ptr<KAZE> KAZE::create(bool extended, bool upright,
float threshold,
int octaves, int sublevels,
KAZE::DiffusivityType diffusivity)
{
return makePtr<KAZE_Impl>(extended, upright, threshold, octaves, sublevels, diffusivity);
}
String KAZE::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".KAZE");
}
}

View File

@@ -0,0 +1,65 @@
/**
* @file AKAZEConfig.h
* @brief AKAZE configuration file
* @date Feb 23, 2014
* @author Pablo F. Alcantarilla, Jesus Nuevo
*/
#ifndef __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
#define __OPENCV_FEATURES_2D_AKAZE_CONFIG_H__
namespace cv
{
/* ************************************************************************* */
/// AKAZE configuration options structure
struct AKAZEOptions {
AKAZEOptions()
: omax(4)
, nsublevels(4)
, img_width(0)
, img_height(0)
, soffset(1.6f)
, derivative_factor(1.5f)
, sderivatives(1.0)
, diffusivity(KAZE::DIFF_PM_G2)
, dthreshold(0.001f)
, min_dthreshold(0.00001f)
, descriptor(AKAZE::DESCRIPTOR_MLDB)
, descriptor_size(0)
, descriptor_channels(3)
, descriptor_pattern_size(10)
, kcontrast(0.001f)
, kcontrast_percentile(0.7f)
, kcontrast_nbins(300)
{
}
int omax; ///< Maximum octave evolution of the image 2^sigma (coarsest scale sigma units)
int nsublevels; ///< Default number of sublevels per scale level
int img_width; ///< Width of the input image
int img_height; ///< Height of the input image
float soffset; ///< Base scale offset (sigma units)
float derivative_factor; ///< Factor for the multiscale derivatives
float sderivatives; ///< Smoothing factor for the derivatives
KAZE::DiffusivityType diffusivity; ///< Diffusivity type
float dthreshold; ///< Detector response threshold to accept point
float min_dthreshold; ///< Minimum detector threshold to accept a point
AKAZE::DescriptorType descriptor; ///< Type of descriptor
int descriptor_size; ///< Size of the descriptor in bits. 0->Full size
int descriptor_channels; ///< Number of channels in the descriptor (1, 2, 3)
int descriptor_pattern_size; ///< Actual patch size is 2*pattern_size*point.scale
float kcontrast; ///< The contrast factor parameter
float kcontrast_percentile; ///< Percentile level for the contrast factor
int kcontrast_nbins; ///< Number of bins for the contrast factor histogram
};
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
/**
* @file AKAZE.h
* @brief Main class for detecting and computing binary descriptors in an
* accelerated nonlinear scale space
* @date Mar 27, 2013
* @author Pablo F. Alcantarilla, Jesus Nuevo
*/
#ifndef __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__
#define __OPENCV_FEATURES_2D_AKAZE_FEATURES_H__
/* ************************************************************************* */
// Includes
#include "AKAZEConfig.h"
namespace cv
{
/// A-KAZE nonlinear diffusion filtering evolution
template <typename MatType>
struct Evolution
{
Evolution() {
etime = 0.0f;
esigma = 0.0f;
octave = 0;
sublevel = 0;
sigma_size = 0;
octave_ratio = 0.0f;
border = 0;
}
template <typename T>
explicit Evolution(const Evolution<T> &other) {
size = other.size;
etime = other.etime;
esigma = other.esigma;
octave = other.octave;
sublevel = other.sublevel;
sigma_size = other.sigma_size;
octave_ratio = other.octave_ratio;
border = other.border;
other.Lx.copyTo(Lx);
other.Ly.copyTo(Ly);
other.Lt.copyTo(Lt);
other.Lsmooth.copyTo(Lsmooth);
other.Ldet.copyTo(Ldet);
}
MatType Lx, Ly; ///< First order spatial derivatives
MatType Lt; ///< Evolution image
MatType Lsmooth; ///< Smoothed image, used only for computing determinant, released afterwards
MatType Ldet; ///< Detector response
Size size; ///< Size of the layer
float etime; ///< Evolution time
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
int octave; ///< Image octave
int sublevel; ///< Image sublevel in each octave
int sigma_size; ///< Integer esigma. For computing the feature detector responses
float octave_ratio; ///< Scaling ratio of this octave. ratio = 2^octave
int border; ///< Width of border where descriptors cannot be computed
};
typedef Evolution<Mat> MEvolution;
typedef Evolution<UMat> UEvolution;
typedef std::vector<MEvolution> Pyramid;
typedef std::vector<UEvolution> UMatPyramid;
/* ************************************************************************* */
// AKAZE Class Declaration
class AKAZEFeatures {
private:
AKAZEOptions options_; ///< Configuration options for AKAZE
Pyramid evolution_; ///< Vector of nonlinear diffusion evolution
/// FED parameters
int ncycles_; ///< Number of cycles
bool reordering_; ///< Flag for reordering time steps
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
/// Matrices for the M-LDB descriptor computation
cv::Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.
cv::Mat descriptorBits_;
cv::Mat bitMask_;
/// Scale Space methods
void Allocate_Memory_Evolution();
void Find_Scale_Space_Extrema(std::vector<Mat>& keypoints_by_layers);
void Do_Subpixel_Refinement(std::vector<Mat>& keypoints_by_layers,
std::vector<KeyPoint>& kpts);
/// Feature description methods
void Compute_Keypoints_Orientation(std::vector<cv::KeyPoint>& kpts) const;
public:
/// Constructor with input arguments
AKAZEFeatures(const AKAZEOptions& options);
void Create_Nonlinear_Scale_Space(InputArray img);
void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
void Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, OutputArray desc);
};
/* ************************************************************************* */
/// Inline functions
void generateDescriptorSubsample(cv::Mat& sampleList, cv::Mat& comparisons,
int nbits, int pattern_size, int nchannels);
}
#endif

View File

@@ -0,0 +1,56 @@
/**
* @file KAZEConfig.h
* @brief Configuration file
* @date Dec 27, 2011
* @author Pablo F. Alcantarilla
*/
#ifndef __OPENCV_FEATURES_2D_KAZE_CONFIG_H__
#define __OPENCV_FEATURES_2D_KAZE_CONFIG_H__
// OpenCV Includes
#include "../precomp.hpp"
#include <opencv2/features2d.hpp>
namespace cv
{
//*************************************************************************************
struct KAZEOptions {
KAZEOptions()
: diffusivity(KAZE::DIFF_PM_G2)
, soffset(1.60f)
, omax(4)
, nsublevels(4)
, img_width(0)
, img_height(0)
, sderivatives(1.0f)
, dthreshold(0.001f)
, kcontrast(0.01f)
, kcontrast_percentille(0.7f)
, kcontrast_bins(300)
, upright(false)
, extended(false)
{
}
KAZE::DiffusivityType diffusivity;
float soffset;
int omax;
int nsublevels;
int img_width;
int img_height;
float sderivatives;
float dthreshold;
float kcontrast;
float kcontrast_percentille;
int kcontrast_bins;
bool upright;
bool extended;
};
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,64 @@
/**
* @file KAZE.h
* @brief Main program for detecting and computing descriptors in a nonlinear
* scale space
* @date Jan 21, 2012
* @author Pablo F. Alcantarilla
*/
#ifndef __OPENCV_FEATURES_2D_KAZE_FEATURES_H__
#define __OPENCV_FEATURES_2D_KAZE_FEATURES_H__
/* ************************************************************************* */
// Includes
#include "KAZEConfig.h"
#include "nldiffusion_functions.h"
#include "fed.h"
#include "TEvolution.h"
namespace cv
{
/* ************************************************************************* */
// KAZE Class Declaration
class KAZEFeatures
{
private:
/// Parameters of the Nonlinear diffusion class
KAZEOptions options_; ///< Configuration options for KAZE
std::vector<TEvolution> evolution_; ///< Vector of nonlinear diffusion evolution
/// Vector of keypoint vectors for finding extrema in multiple threads
std::vector<std::vector<cv::KeyPoint> > kpts_par_;
/// FED parameters
int ncycles_; ///< Number of cycles
bool reordering_; ///< Flag for reordering time steps
std::vector<std::vector<float > > tsteps_; ///< Vector of FED dynamic time steps
std::vector<int> nsteps_; ///< Vector of number of steps per cycle
public:
/// Constructor
KAZEFeatures(KAZEOptions& options);
/// Public methods for KAZE interface
void Allocate_Memory_Evolution(void);
int Create_Nonlinear_Scale_Space(const cv::Mat& img);
void Feature_Detection(std::vector<cv::KeyPoint>& kpts);
void Feature_Description(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options);
/// Feature Detection Methods
void Compute_KContrast(const cv::Mat& img, const float& kper);
void Compute_Multiscale_Derivatives(void);
void Compute_Detector_Response(void);
void Determinant_Hessian(std::vector<cv::KeyPoint>& kpts);
void Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts);
};
}
#endif

View File

@@ -0,0 +1,41 @@
/**
* @file TEvolution.h
* @brief Header file with the declaration of the TEvolution struct
* @date Jun 02, 2014
* @author Pablo F. Alcantarilla
*/
#ifndef __OPENCV_FEATURES_2D_TEVOLUTION_H__
#define __OPENCV_FEATURES_2D_TEVOLUTION_H__
namespace cv
{
/* ************************************************************************* */
/// KAZE/A-KAZE nonlinear diffusion filtering evolution
struct TEvolution
{
TEvolution() {
etime = 0.0f;
esigma = 0.0f;
octave = 0;
sublevel = 0;
sigma_size = 0;
}
Mat Lx, Ly; ///< First order spatial derivatives
Mat Lxx, Lxy, Lyy; ///< Second order spatial derivatives
Mat Lt; ///< Evolution image
Mat Lsmooth; ///< Smoothed image
Mat Ldet; ///< Detector response
float etime; ///< Evolution time
float esigma; ///< Evolution sigma. For linear diffusion t = sigma^2 / 2
int octave; ///< Image octave
int sublevel; ///< Image sublevel in each octave
int sigma_size; ///< Integer esigma. For computing the feature detector responses
};
}
#endif

View File

@@ -0,0 +1,192 @@
//=============================================================================
//
// fed.cpp
// Authors: Pablo F. Alcantarilla (1), Jesus Nuevo (2)
// Institutions: Georgia Institute of Technology (1)
// TrueVision Solutions (2)
// Date: 15/09/2013
// Email: pablofdezalc@gmail.com
//
// AKAZE Features Copyright 2013, Pablo F. Alcantarilla, Jesus Nuevo
// All Rights Reserved
// See LICENSE for the license information
//=============================================================================
/**
* @file fed.cpp
* @brief Functions for performing Fast Explicit Diffusion and building the
* nonlinear scale space
* @date Sep 15, 2013
* @author Pablo F. Alcantarilla, Jesus Nuevo
* @note This code is derived from FED/FJ library from Grewenig et al.,
* The FED/FJ library allows solving more advanced problems
* Please look at the following papers for more information about FED:
* [1] S. Grewenig, J. Weickert, C. Schroers, A. Bruhn. Cyclic Schemes for
* PDE-Based Image Analysis. Technical Report No. 327, Department of Mathematics,
* Saarland University, Saarbrücken, Germany, March 2013
* [2] S. Grewenig, J. Weickert, A. Bruhn. From box filtering to fast explicit diffusion.
* DAGM, 2010
*
*/
#include "../precomp.hpp"
#include "fed.h"
using namespace std;
//*************************************************************************************
//*************************************************************************************
/**
* @brief This function allocates an array of the least number of time steps such
* that a certain stopping time for the whole process can be obtained and fills
* it with the respective FED time step sizes for one cycle
* The function returns the number of time steps per cycle or 0 on failure
* @param T Desired process stopping time
* @param M Desired number of cycles
* @param tau_max Stability limit for the explicit scheme
* @param reordering Reordering flag
* @param tau The vector with the dynamic step sizes
*/
int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max,
const bool& reordering, std::vector<float>& tau) {
// All cycles have the same fraction of the stopping time
return fed_tau_by_cycle_time(T/(float)M,tau_max,reordering,tau);
}
//*************************************************************************************
//*************************************************************************************
/**
* @brief This function allocates an array of the least number of time steps such
* that a certain stopping time for the whole process can be obtained and fills it
* it with the respective FED time step sizes for one cycle
* The function returns the number of time steps per cycle or 0 on failure
* @param t Desired cycle stopping time
* @param tau_max Stability limit for the explicit scheme
* @param reordering Reordering flag
* @param tau The vector with the dynamic step sizes
*/
int fed_tau_by_cycle_time(const float& t, const float& tau_max,
const bool& reordering, std::vector<float> &tau) {
int n = 0; // Number of time steps
float scale = 0.0; // Ratio of t we search to maximal t
// Compute necessary number of time steps
n = cvCeil(sqrtf(3.0f*t/tau_max+0.25f)-0.5f-1.0e-8f);
scale = 3.0f*t/(tau_max*(float)(n*(n+1)));
// Call internal FED time step creation routine
return fed_tau_internal(n,scale,tau_max,reordering,tau);
}
//*************************************************************************************
//*************************************************************************************
/**
* @brief This function allocates an array of time steps and fills it with FED
* time step sizes
* The function returns the number of time steps per cycle or 0 on failure
* @param n Number of internal steps
* @param scale Ratio of t we search to maximal t
* @param tau_max Stability limit for the explicit scheme
* @param reordering Reordering flag
* @param tau The vector with the dynamic step sizes
*/
int fed_tau_internal(const int& n, const float& scale, const float& tau_max,
const bool& reordering, std::vector<float> &tau) {
float c = 0.0, d = 0.0; // Time savers
vector<float> tauh; // Helper vector for unsorted taus
if (n <= 0) {
return 0;
}
// Allocate memory for the time step size
tau = vector<float>(n);
if (reordering) {
tauh = vector<float>(n);
}
// Compute time saver
c = 1.0f / (4.0f * (float)n + 2.0f);
d = scale * tau_max / 2.0f;
// Set up originally ordered tau vector
for (int k = 0; k < n; ++k) {
float h = cosf((float)CV_PI * (2.0f * (float)k + 1.0f) * c);
if (reordering) {
tauh[k] = d / (h * h);
}
else {
tau[k] = d / (h * h);
}
}
// Permute list of time steps according to chosen reordering function
int kappa = 0, prime = 0;
if (reordering == true) {
// Choose kappa cycle with k = n/2
// This is a heuristic. We can use Leja ordering instead!!
kappa = n / 2;
// Get modulus for permutation
prime = n + 1;
while (!fed_is_prime_internal(prime)) {
prime++;
}
// Perform permutation
for (int k = 0, l = 0; l < n; ++k, ++l) {
int index = 0;
while ((index = ((k+1)*kappa) % prime - 1) >= n) {
k++;
}
tau[l] = tauh[index];
}
}
return n;
}
//*************************************************************************************
//*************************************************************************************
/**
* @brief This function checks if a number is prime or not
* @param number Number to check if it is prime or not
* @return true if the number is prime
*/
bool fed_is_prime_internal(const int& number) {
bool is_prime = false;
if (number <= 1) {
return false;
}
else if (number == 1 || number == 2 || number == 3 || number == 5 || number == 7) {
return true;
}
else if ((number % 2) == 0 || (number % 3) == 0 || (number % 5) == 0 || (number % 7) == 0) {
return false;
}
else {
is_prime = true;
int upperLimit = (int)sqrt(1.0f + number);
int divisor = 11;
while (divisor <= upperLimit ) {
if (number % divisor == 0)
{
is_prime = false;
}
divisor +=2;
}
return is_prime;
}
}

View File

@@ -0,0 +1,25 @@
#ifndef __OPENCV_FEATURES_2D_FED_H__
#define __OPENCV_FEATURES_2D_FED_H__
//******************************************************************************
//******************************************************************************
// Includes
#include <vector>
//*************************************************************************************
//*************************************************************************************
// Declaration of functions
int fed_tau_by_process_time(const float& T, const int& M, const float& tau_max,
const bool& reordering, std::vector<float>& tau);
int fed_tau_by_cycle_time(const float& t, const float& tau_max,
const bool& reordering, std::vector<float> &tau) ;
int fed_tau_internal(const int& n, const float& scale, const float& tau_max,
const bool& reordering, std::vector<float> &tau);
bool fed_is_prime_internal(const int& number);
//*************************************************************************************
//*************************************************************************************
#endif // __OPENCV_FEATURES_2D_FED_H__

View File

@@ -0,0 +1,542 @@
//=============================================================================
//
// nldiffusion_functions.cpp
// Author: Pablo F. Alcantarilla
// Institution: University d'Auvergne
// Address: Clermont Ferrand, France
// Date: 27/12/2011
// Email: pablofdezalc@gmail.com
//
// KAZE Features Copyright 2012, Pablo F. Alcantarilla
// All Rights Reserved
// See LICENSE for the license information
//=============================================================================
/**
* @file nldiffusion_functions.cpp
* @brief Functions for non-linear diffusion applications:
* 2D Gaussian Derivatives
* Perona and Malik conductivity equations
* Perona and Malik evolution
* @date Dec 27, 2011
* @author Pablo F. Alcantarilla
*/
#include "../precomp.hpp"
#include "nldiffusion_functions.h"
#include <iostream>
// Namespaces
/* ************************************************************************* */
namespace cv
{
using namespace std;
/* ************************************************************************* */
/**
* @brief This function smoothes an image with a Gaussian kernel
* @param src Input image
* @param dst Output image
* @param ksize_x Kernel size in X-direction (horizontal)
* @param ksize_y Kernel size in Y-direction (vertical)
* @param sigma Kernel standard deviation
*/
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma) {
int ksize_x_ = 0, ksize_y_ = 0;
// Compute an appropriate kernel size according to the specified sigma
if (sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0) {
ksize_x_ = cvCeil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f)));
ksize_y_ = ksize_x_;
}
// The kernel size must be and odd number
if ((ksize_x_ % 2) == 0) {
ksize_x_ += 1;
}
if ((ksize_y_ % 2) == 0) {
ksize_y_ += 1;
}
// Perform the Gaussian Smoothing with border replication
GaussianBlur(src, dst, Size(ksize_x_, ksize_y_), sigma, sigma, BORDER_REPLICATE);
}
/* ************************************************************************* */
/**
* @brief This function computes image derivatives with Scharr kernel
* @param src Input image
* @param dst Output image
* @param xorder Derivative order in X-direction (horizontal)
* @param yorder Derivative order in Y-direction (vertical)
* @note Scharr operator approximates better rotation invariance than
* other stencils such as Sobel. See Weickert and Scharr,
* A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance,
* Journal of Visual Communication and Image Representation 2002
*/
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder) {
Scharr(src, dst, CV_32F, xorder, yorder, 1.0, 0, BORDER_DEFAULT);
}
/* ************************************************************************* */
/**
* @brief This function computes the Perona and Malik conductivity coefficient g1
* g1 = exp(-|dL|^2/k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param k Contrast factor parameter
*/
void pm_g1(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
_dst.create(_Lx.size(), _Lx.type());
Mat Lx = _Lx.getMat();
Mat Ly = _Ly.getMat();
Mat dst = _dst.getMat();
Size sz = Lx.size();
float inv_k = 1.0f / (k*k);
for (int y = 0; y < sz.height; y++) {
const float* Lx_row = Lx.ptr<float>(y);
const float* Ly_row = Ly.ptr<float>(y);
float* dst_row = dst.ptr<float>(y);
for (int x = 0; x < sz.width; x++) {
dst_row[x] = (-inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
}
}
exp(dst, dst);
}
/* ************************************************************************* */
/**
* @brief This function computes the Perona and Malik conductivity coefficient g2
* g2 = 1 / (1 + dL^2 / k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param k Contrast factor parameter
*/
void pm_g2(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
CV_INSTRUMENT_REGION();
_dst.create(_Lx.size(), _Lx.type());
Mat Lx = _Lx.getMat();
Mat Ly = _Ly.getMat();
Mat dst = _dst.getMat();
Size sz = Lx.size();
dst.create(sz, Lx.type());
float k2inv = 1.0f / (k * k);
for(int y = 0; y < sz.height; y++) {
const float *Lx_row = Lx.ptr<float>(y);
const float *Ly_row = Ly.ptr<float>(y);
float* dst_row = dst.ptr<float>(y);
for(int x = 0; x < sz.width; x++) {
dst_row[x] = 1.0f / (1.0f + ((Lx_row[x] * Lx_row[x] + Ly_row[x] * Ly_row[x]) * k2inv));
}
}
}
/* ************************************************************************* */
/**
* @brief This function computes Weickert conductivity coefficient gw
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param k Contrast factor parameter
* @note For more information check the following paper: J. Weickert
* Applications of nonlinear diffusion in image processing and computer vision,
* Proceedings of Algorithmy 2000
*/
void weickert_diffusivity(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
_dst.create(_Lx.size(), _Lx.type());
Mat Lx = _Lx.getMat();
Mat Ly = _Ly.getMat();
Mat dst = _dst.getMat();
Size sz = Lx.size();
float inv_k = 1.0f / (k*k);
for (int y = 0; y < sz.height; y++) {
const float* Lx_row = Lx.ptr<float>(y);
const float* Ly_row = Ly.ptr<float>(y);
float* dst_row = dst.ptr<float>(y);
for (int x = 0; x < sz.width; x++) {
float dL = inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]);
dst_row[x] = -3.315f/(dL*dL*dL*dL);
}
}
exp(dst, dst);
dst = 1.0 - dst;
}
/* ************************************************************************* */
/**
* @brief This function computes Charbonnier conductivity coefficient gc
* gc = 1 / sqrt(1 + dL^2 / k^2)
* @param Lx First order image derivative in X-direction (horizontal)
* @param Ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param k Contrast factor parameter
* @note For more information check the following paper: J. Weickert
* Applications of nonlinear diffusion in image processing and computer vision,
* Proceedings of Algorithmy 2000
*/
void charbonnier_diffusivity(InputArray _Lx, InputArray _Ly, OutputArray _dst, float k) {
_dst.create(_Lx.size(), _Lx.type());
Mat Lx = _Lx.getMat();
Mat Ly = _Ly.getMat();
Mat dst = _dst.getMat();
Size sz = Lx.size();
float inv_k = 1.0f / (k*k);
for (int y = 0; y < sz.height; y++) {
const float* Lx_row = Lx.ptr<float>(y);
const float* Ly_row = Ly.ptr<float>(y);
float* dst_row = dst.ptr<float>(y);
for (int x = 0; x < sz.width; x++) {
float den = sqrt(1.0f+inv_k*(Lx_row[x]*Lx_row[x] + Ly_row[x]*Ly_row[x]));
dst_row[x] = 1.0f / den;
}
}
}
/* ************************************************************************* */
/**
* @brief This function computes a good empirical value for the k contrast factor
* given an input image, the percentile (0-1), the gradient scale and the number of
* bins in the histogram
* @param img Input image
* @param perc Percentile of the image gradient histogram (0-1)
* @param gscale Scale for computing the image gradient histogram
* @param nbins Number of histogram bins
* @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel
* @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel
* @return k contrast factor
*/
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y) {
CV_INSTRUMENT_REGION();
int nbin = 0, nelements = 0, nthreshold = 0, k = 0;
float kperc = 0.0, modg = 0.0;
float npoints = 0.0;
float hmax = 0.0;
// Create the array for the histogram
std::vector<int> hist(nbins, 0);
// Create the matrices
Mat gaussian = Mat::zeros(img.rows, img.cols, CV_32F);
Mat Lx = Mat::zeros(img.rows, img.cols, CV_32F);
Mat Ly = Mat::zeros(img.rows, img.cols, CV_32F);
// Perform the Gaussian convolution
gaussian_2D_convolution(img, gaussian, ksize_x, ksize_y, gscale);
// Compute the Gaussian derivatives Lx and Ly
Scharr(gaussian, Lx, CV_32F, 1, 0, 1, 0, cv::BORDER_DEFAULT);
Scharr(gaussian, Ly, CV_32F, 0, 1, 1, 0, cv::BORDER_DEFAULT);
// Skip the borders for computing the histogram
for (int i = 1; i < gaussian.rows - 1; i++) {
const float *lx = Lx.ptr<float>(i);
const float *ly = Ly.ptr<float>(i);
for (int j = 1; j < gaussian.cols - 1; j++) {
modg = lx[j]*lx[j] + ly[j]*ly[j];
// Get the maximum
if (modg > hmax) {
hmax = modg;
}
}
}
hmax = sqrt(hmax);
// Skip the borders for computing the histogram
for (int i = 1; i < gaussian.rows - 1; i++) {
const float *lx = Lx.ptr<float>(i);
const float *ly = Ly.ptr<float>(i);
for (int j = 1; j < gaussian.cols - 1; j++) {
modg = lx[j]*lx[j] + ly[j]*ly[j];
// Find the correspondent bin
if (modg != 0.0) {
nbin = (int)floor(nbins*(sqrt(modg) / hmax));
if (nbin == nbins) {
nbin--;
}
hist[nbin]++;
npoints++;
}
}
}
// Now find the perc of the histogram percentile
nthreshold = (int)(npoints*perc);
for (k = 0; nelements < nthreshold && k < nbins; k++) {
nelements = nelements + hist[k];
}
if (nelements < nthreshold) {
kperc = 0.03f;
}
else {
kperc = hmax*((float)(k) / (float)nbins);
}
return kperc;
}
/* ************************************************************************* */
/**
* @brief This function computes Scharr image derivatives
* @param src Input image
* @param dst Output image
* @param xorder Derivative order in X-direction (horizontal)
* @param yorder Derivative order in Y-direction (vertical)
* @param scale Scale factor for the derivative size
*/
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale) {
Mat kx, ky;
compute_derivative_kernels(kx, ky, xorder, yorder, scale);
sepFilter2D(src, dst, CV_32F, kx, ky);
}
/* ************************************************************************* */
/**
* @brief Compute derivative kernels for sizes different than 3
* @param _kx Horizontal kernel ues
* @param _ky Vertical kernel values
* @param dx Derivative order in X-direction (horizontal)
* @param dy Derivative order in Y-direction (vertical)
* @param scale_ Scale factor or derivative size
*/
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
CV_INSTRUMENT_REGION();
int ksize = 3 + 2 * (scale - 1);
// The standard Scharr kernel
if (scale == 1) {
getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F);
return;
}
_kx.create(ksize, 1, CV_32F, -1, true);
_ky.create(ksize, 1, CV_32F, -1, true);
Mat kx = _kx.getMat();
Mat ky = _ky.getMat();
std::vector<float> kerI;
float w = 10.0f / 3.0f;
float norm = 1.0f / (2.0f*scale*(w + 2.0f));
for (int k = 0; k < 2; k++) {
Mat* kernel = k == 0 ? &kx : &ky;
int order = k == 0 ? dx : dy;
kerI.assign(ksize, 0.0f);
if (order == 0) {
kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm;
}
else if (order == 1) {
kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
}
Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]);
temp.copyTo(*kernel);
}
}
class Nld_Step_Scalar_Invoker : public cv::ParallelLoopBody
{
public:
Nld_Step_Scalar_Invoker(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float _stepsize)
: _Ld(&Ld)
, _c(&c)
, _Lstep(&Lstep)
, stepsize(_stepsize)
{
}
virtual ~Nld_Step_Scalar_Invoker()
{
}
void operator()(const cv::Range& range) const CV_OVERRIDE
{
cv::Mat& Ld = *_Ld;
const cv::Mat& c = *_c;
cv::Mat& Lstep = *_Lstep;
for (int i = range.start; i < range.end; i++)
{
const float *c_prev = c.ptr<float>(i - 1);
const float *c_curr = c.ptr<float>(i);
const float *c_next = c.ptr<float>(i + 1);
const float *ld_prev = Ld.ptr<float>(i - 1);
const float *ld_curr = Ld.ptr<float>(i);
const float *ld_next = Ld.ptr<float>(i + 1);
float *dst = Lstep.ptr<float>(i);
for (int j = 1; j < Lstep.cols - 1; j++)
{
float xpos = (c_curr[j] + c_curr[j+1])*(ld_curr[j+1] - ld_curr[j]);
float xneg = (c_curr[j-1] + c_curr[j]) *(ld_curr[j] - ld_curr[j-1]);
float ypos = (c_curr[j] + c_next[j]) *(ld_next[j] - ld_curr[j]);
float yneg = (c_prev[j] + c_curr[j]) *(ld_curr[j] - ld_prev[j]);
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos - yneg);
}
}
}
private:
cv::Mat * _Ld;
const cv::Mat * _c;
cv::Mat * _Lstep;
float stepsize;
};
/* ************************************************************************* */
/**
* @brief This function performs a scalar non-linear diffusion step
* @param Ld2 Output image in the evolution
* @param c Conductivity image
* @param Lstep Previous image in the evolution
* @param stepsize The step size in time units
* @note Forward Euler Scheme 3x3 stencil
* The function c is a scalar value that depends on the gradient norm
* dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy
*/
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize) {
CV_INSTRUMENT_REGION();
cv::parallel_for_(cv::Range(1, Lstep.rows - 1), Nld_Step_Scalar_Invoker(Ld, c, Lstep, stepsize), (double)Ld.total()/(1 << 16));
float xneg, xpos, yneg, ypos;
float* dst = Lstep.ptr<float>(0);
const float* cprv = NULL;
const float* ccur = c.ptr<float>(0);
const float* cnxt = c.ptr<float>(1);
const float* ldprv = NULL;
const float* ldcur = Ld.ptr<float>(0);
const float* ldnxt = Ld.ptr<float>(1);
for (int j = 1; j < Lstep.cols - 1; j++) {
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
ypos = (ccur[j] + cnxt[j]) * (ldnxt[j] - ldcur[j]);
dst[j] = 0.5f*stepsize*(xpos - xneg + ypos);
}
dst = Lstep.ptr<float>(Lstep.rows - 1);
ccur = c.ptr<float>(Lstep.rows - 1);
cprv = c.ptr<float>(Lstep.rows - 2);
ldcur = Ld.ptr<float>(Lstep.rows - 1);
ldprv = Ld.ptr<float>(Lstep.rows - 2);
for (int j = 1; j < Lstep.cols - 1; j++) {
xpos = (ccur[j] + ccur[j+1]) * (ldcur[j+1] - ldcur[j]);
xneg = (ccur[j-1] + ccur[j]) * (ldcur[j] - ldcur[j-1]);
yneg = (cprv[j] + ccur[j]) * (ldcur[j] - ldprv[j]);
dst[j] = 0.5f*stepsize*(xpos - xneg - yneg);
}
ccur = c.ptr<float>(1);
ldcur = Ld.ptr<float>(1);
cprv = c.ptr<float>(0);
ldprv = Ld.ptr<float>(0);
int r0 = Lstep.cols - 1;
int r1 = Lstep.cols - 2;
for (int i = 1; i < Lstep.rows - 1; i++) {
cnxt = c.ptr<float>(i + 1);
ldnxt = Ld.ptr<float>(i + 1);
dst = Lstep.ptr<float>(i);
xpos = (ccur[0] + ccur[1]) * (ldcur[1] - ldcur[0]);
ypos = (ccur[0] + cnxt[0]) * (ldnxt[0] - ldcur[0]);
yneg = (cprv[0] + ccur[0]) * (ldcur[0] - ldprv[0]);
dst[0] = 0.5f*stepsize*(xpos + ypos - yneg);
xneg = (ccur[r1] + ccur[r0]) * (ldcur[r0] - ldcur[r1]);
ypos = (ccur[r0] + cnxt[r0]) * (ldnxt[r0] - ldcur[r0]);
yneg = (cprv[r0] + ccur[r0]) * (ldcur[r0] - ldprv[r0]);
dst[r0] = 0.5f*stepsize*(-xneg + ypos - yneg);
cprv = ccur;
ccur = cnxt;
ldprv = ldcur;
ldcur = ldnxt;
}
Ld += Lstep;
}
/* ************************************************************************* */
/**
* @brief This function downsamples the input image using OpenCV resize
* @param img Input image to be downsampled
* @param dst Output image with half of the resolution of the input image
*/
void halfsample_image(const cv::Mat& src, cv::Mat& dst) {
// Make sure the destination image is of the right size
CV_Assert(src.cols / 2 == dst.cols);
CV_Assert(src.rows / 2 == dst.rows);
resize(src, dst, dst.size(), 0, 0, cv::INTER_AREA);
}
/* ************************************************************************* */
/**
* @brief This function checks if a given pixel is a maximum in a local neighbourhood
* @param img Input image where we will perform the maximum search
* @param dsize Half size of the neighbourhood
* @param value Response value at (x,y) position
* @param row Image row coordinate
* @param col Image column coordinate
* @param same_img Flag to indicate if the image value at (x,y) is in the input image
* @return 1->is maximum, 0->otherwise
*/
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img) {
bool response = true;
for (int i = row - dsize; i <= row + dsize; i++) {
for (int j = col - dsize; j <= col + dsize; j++) {
if (i >= 0 && i < img.rows && j >= 0 && j < img.cols) {
if (same_img == true) {
if (i != row || j != col) {
if ((*(img.ptr<float>(i)+j)) > value) {
response = false;
return response;
}
}
}
else {
if ((*(img.ptr<float>(i)+j)) > value) {
response = false;
return response;
}
}
}
}
}
return response;
}
}

View File

@@ -0,0 +1,47 @@
/**
* @file nldiffusion_functions.h
* @brief Functions for non-linear diffusion applications:
* 2D Gaussian Derivatives
* Perona and Malik conductivity equations
* Perona and Malik evolution
* @date Dec 27, 2011
* @author Pablo F. Alcantarilla
*/
#ifndef __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
#define __OPENCV_FEATURES_2D_NLDIFFUSION_FUNCTIONS_H__
/* ************************************************************************* */
// Declaration of functions
namespace cv
{
// Gaussian 2D convolution
void gaussian_2D_convolution(const cv::Mat& src, cv::Mat& dst, int ksize_x, int ksize_y, float sigma);
// Diffusivity functions
void pm_g1(InputArray Lx, InputArray Ly, OutputArray dst, float k);
void pm_g2(InputArray Lx, InputArray Ly, OutputArray dst, float k);
void weickert_diffusivity(InputArray Lx, InputArray Ly, OutputArray dst, float k);
void charbonnier_diffusivity(InputArray Lx, InputArray Ly, OutputArray dst, float k);
float compute_k_percentile(const cv::Mat& img, float perc, float gscale, int nbins, int ksize_x, int ksize_y);
// Image derivatives
void compute_scharr_derivatives(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder, int scale);
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale);
void image_derivatives_scharr(const cv::Mat& src, cv::Mat& dst, int xorder, int yorder);
// Nonlinear diffusion filtering scalar step
void nld_step_scalar(cv::Mat& Ld, const cv::Mat& c, cv::Mat& Lstep, float stepsize);
// For non-maxima suppression
bool check_maximum_neighbourhood(const cv::Mat& img, int dsize, float value, int row, int col, bool same_img);
// Image downsampling
void halfsample_image(const cv::Mat& src, cv::Mat& dst);
}
#endif

View File

@@ -0,0 +1,42 @@
#ifndef __OPENCV_FEATURES_2D_KAZE_UTILS_H__
#define __OPENCV_FEATURES_2D_KAZE_UTILS_H__
/* ************************************************************************* */
/**
* @brief This function computes the value of a 2D Gaussian function
* @param x X Position
* @param y Y Position
* @param sig Standard Deviation
*/
inline float gaussian(float x, float y, float sigma) {
return expf(-(x*x + y*y) / (2.0f*sigma*sigma));
}
/* ************************************************************************* */
/**
* @brief This function checks descriptor limits
* @param x X Position
* @param y Y Position
* @param width Image width
* @param height Image height
*/
inline void checkDescriptorLimits(int &x, int &y, int width, int height) {
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
if (x > width - 1) {
x = width - 1;
}
if (y > height - 1) {
y = height - 1;
}
}
#endif

View File

@@ -0,0 +1,266 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
struct KeypointResponseGreaterThanOrEqualToThreshold
{
KeypointResponseGreaterThanOrEqualToThreshold(float _value) :
value(_value)
{
}
inline bool operator()(const KeyPoint& kpt) const
{
return kpt.response >= value;
}
float value;
};
struct KeypointResponseGreater
{
inline bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) const
{
return kp1.response > kp2.response;
}
};
// takes keypoints and culls them by the response
void KeyPointsFilter::retainBest(std::vector<KeyPoint>& keypoints, int n_points)
{
//this is only necessary if the keypoints size is greater than the number of desired points.
if( n_points >= 0 && keypoints.size() > (size_t)n_points )
{
if (n_points==0)
{
keypoints.clear();
return;
}
//first use nth element to partition the keypoints into the best and worst.
std::nth_element(keypoints.begin(), keypoints.begin() + n_points - 1, keypoints.end(), KeypointResponseGreater());
//this is the boundary response, and in the case of FAST may be ambiguous
float ambiguous_response = keypoints[n_points - 1].response;
//use std::partition to grab all of the keypoints with the boundary response.
std::vector<KeyPoint>::const_iterator new_end =
std::partition(keypoints.begin() + n_points, keypoints.end(),
KeypointResponseGreaterThanOrEqualToThreshold(ambiguous_response));
//resize the keypoints, given this new end point. nth_element and partition reordered the points inplace
keypoints.resize(new_end - keypoints.begin());
}
}
struct RoiPredicate
{
RoiPredicate( const Rect& _r ) : r(_r)
{}
bool operator()( const KeyPoint& keyPt ) const
{
return !r.contains( keyPt.pt );
}
Rect r;
};
void KeyPointsFilter::runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize )
{
if( borderSize > 0)
{
if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
keypoints.clear();
else
keypoints.erase( std::remove_if(keypoints.begin(), keypoints.end(),
RoiPredicate(Rect(Point(borderSize, borderSize),
Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
keypoints.end() );
}
}
struct SizePredicate
{
SizePredicate( float _minSize, float _maxSize ) : minSize(_minSize), maxSize(_maxSize)
{}
bool operator()( const KeyPoint& keyPt ) const
{
float size = keyPt.size;
return (size < minSize) || (size > maxSize);
}
float minSize, maxSize;
};
void KeyPointsFilter::runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize, float maxSize )
{
CV_Assert( minSize >= 0 );
CV_Assert( maxSize >= 0);
CV_Assert( minSize <= maxSize );
keypoints.erase( std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)),
keypoints.end() );
}
class MaskPredicate
{
public:
MaskPredicate( const Mat& _mask ) : mask(_mask) {}
bool operator() (const KeyPoint& key_pt) const
{
return mask.at<uchar>( (int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f) ) == 0;
}
private:
const Mat mask;
MaskPredicate& operator=(const MaskPredicate&) = delete;
};
void KeyPointsFilter::runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask )
{
CV_INSTRUMENT_REGION();
if( mask.empty() )
return;
keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
}
struct KeyPoint_LessThan
{
KeyPoint_LessThan(const std::vector<KeyPoint>& _kp) : kp(&_kp) {}
bool operator()(int i, int j) const
{
const KeyPoint& kp1 = (*kp)[i];
const KeyPoint& kp2 = (*kp)[j];
if( kp1.pt.x != kp2.pt.x )
return kp1.pt.x < kp2.pt.x;
if( kp1.pt.y != kp2.pt.y )
return kp1.pt.y < kp2.pt.y;
if( kp1.size != kp2.size )
return kp1.size > kp2.size;
if( kp1.angle != kp2.angle )
return kp1.angle < kp2.angle;
if( kp1.response != kp2.response )
return kp1.response > kp2.response;
if( kp1.octave != kp2.octave )
return kp1.octave > kp2.octave;
if( kp1.class_id != kp2.class_id )
return kp1.class_id > kp2.class_id;
return i < j;
}
const std::vector<KeyPoint>* kp;
};
void KeyPointsFilter::removeDuplicated( std::vector<KeyPoint>& keypoints )
{
int i, j, n = (int)keypoints.size();
std::vector<int> kpidx(n);
std::vector<uchar> mask(n, (uchar)1);
for( i = 0; i < n; i++ )
kpidx[i] = i;
std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
for( i = 1, j = 0; i < n; i++ )
{
KeyPoint& kp1 = keypoints[kpidx[i]];
KeyPoint& kp2 = keypoints[kpidx[j]];
if( kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y ||
kp1.size != kp2.size || kp1.angle != kp2.angle )
j = i;
else
mask[kpidx[i]] = 0;
}
for( i = j = 0; i < n; i++ )
{
if( mask[i] )
{
if( i != j )
keypoints[j] = keypoints[i];
j++;
}
}
keypoints.resize(j);
}
struct KeyPoint12_LessThan
{
bool operator()(const KeyPoint &kp1, const KeyPoint &kp2) const
{
if( kp1.pt.x != kp2.pt.x )
return kp1.pt.x < kp2.pt.x;
if( kp1.pt.y != kp2.pt.y )
return kp1.pt.y < kp2.pt.y;
if( kp1.size != kp2.size )
return kp1.size > kp2.size;
if( kp1.angle != kp2.angle )
return kp1.angle < kp2.angle;
if( kp1.response != kp2.response )
return kp1.response > kp2.response;
if( kp1.octave != kp2.octave )
return kp1.octave > kp2.octave;
return kp1.class_id > kp2.class_id;
}
};
void KeyPointsFilter::removeDuplicatedSorted( std::vector<KeyPoint>& keypoints )
{
int i, j, n = (int)keypoints.size();
if (n < 2) return;
std::sort(keypoints.begin(), keypoints.end(), KeyPoint12_LessThan());
for( i = 0, j = 1; j < n; ++j )
{
const KeyPoint& kp1 = keypoints[i];
const KeyPoint& kp2 = keypoints[j];
if( kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y ||
kp1.size != kp2.size || kp1.angle != kp2.angle ) {
keypoints[++i] = keypoints[j];
}
}
keypoints.resize(i + 1);
}
}

View File

@@ -0,0 +1,52 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
//
// Library initialization file
//
#include "precomp.hpp"
IPP_INITIALIZER_AUTO
/* End of file. */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,122 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
/**
* @brief This function computes the Perona and Malik conductivity coefficient g2
* g2 = 1 / (1 + dL^2 / k^2)
* @param lx First order image derivative in X-direction (horizontal)
* @param ly First order image derivative in Y-direction (vertical)
* @param dst Output image
* @param k Contrast factor parameter
*/
__kernel void
AKAZE_pm_g2(__global const float* lx, __global const float* ly, __global float* dst,
float k, int size)
{
int i = get_global_id(0);
// OpenCV plays with dimensions so we need explicit check for this
if (!(i < size))
{
return;
}
const float k2inv = 1.0f / (k * k);
dst[i] = 1.0f / (1.0f + ((lx[i] * lx[i] + ly[i] * ly[i]) * k2inv));
}
__kernel void
AKAZE_nld_step_scalar(__global const float* lt, int lt_step, int lt_offset, int rows, int cols,
__global const float* lf, __global float* dst, float step_size)
{
/* The labeling scheme for this five star stencil:
[ a ]
[ -1 c +1 ]
[ b ]
*/
// column-first indexing
int i = get_global_id(1);
int j = get_global_id(0);
// OpenCV plays with dimensions so we need explicit check for this
if (!(i < rows && j < cols))
{
return;
}
// get row indexes
int a = (i - 1) * cols;
int c = (i ) * cols;
int b = (i + 1) * cols;
// compute stencil
float res = 0.0f;
if (i == 0) // first rows
{
if (j == 0 || j == (cols - 1))
{
res = 0.0f;
} else
{
res = (lf[c + j] + lf[c + j + 1])*(lt[c + j + 1] - lt[c + j]) +
(lf[c + j] + lf[c + j - 1])*(lt[c + j - 1] - lt[c + j]) +
(lf[c + j] + lf[b + j ])*(lt[b + j ] - lt[c + j]);
}
} else if (i == (rows - 1)) // last row
{
if (j == 0 || j == (cols - 1))
{
res = 0.0f;
} else
{
res = (lf[c + j] + lf[c + j + 1])*(lt[c + j + 1] - lt[c + j]) +
(lf[c + j] + lf[c + j - 1])*(lt[c + j - 1] - lt[c + j]) +
(lf[c + j] + lf[a + j ])*(lt[a + j ] - lt[c + j]);
}
} else // inner rows
{
if (j == 0) // first column
{
res = (lf[c + 0] + lf[c + 1])*(lt[c + 1] - lt[c + 0]) +
(lf[c + 0] + lf[b + 0])*(lt[b + 0] - lt[c + 0]) +
(lf[c + 0] + lf[a + 0])*(lt[a + 0] - lt[c + 0]);
} else if (j == (cols - 1)) // last column
{
res = (lf[c + j] + lf[c + j - 1])*(lt[c + j - 1] - lt[c + j]) +
(lf[c + j] + lf[b + j ])*(lt[b + j ] - lt[c + j]) +
(lf[c + j] + lf[a + j ])*(lt[a + j ] - lt[c + j]);
} else // inner stencil
{
res = (lf[c + j] + lf[c + j + 1])*(lt[c + j + 1] - lt[c + j]) +
(lf[c + j] + lf[c + j - 1])*(lt[c + j - 1] - lt[c + j]) +
(lf[c + j] + lf[b + j ])*(lt[b + j ] - lt[c + j]) +
(lf[c + j] + lf[a + j ])*(lt[a + j ] - lt[c + j]);
}
}
dst[c + j] = res * step_size;
}
/**
* @brief Compute determinant from hessians
* @details Compute Ldet by (Lxx.mul(Lyy) - Lxy.mul(Lxy)) * sigma
*
* @param lxx spatial derivates
* @param lxy spatial derivates
* @param lyy spatial derivates
* @param dst output determinant
* @param sigma determinant will be scaled by this sigma
*/
__kernel void
AKAZE_compute_determinant(__global const float* lxx, __global const float* lxy, __global const float* lyy,
__global float* dst, float sigma, int size)
{
int i = get_global_id(0);
// OpenCV plays with dimensions so we need explicit check for this
if (!(i < size))
{
return;
}
dst[i] = (lxx[i] * lyy[i] - lxy[i] * lxy[i]) * sigma;
}

View File

@@ -0,0 +1,560 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Nathan, liujun@multicorewareinc.com
// Peng Xiao, pengxiao@outlook.com
// Baichuan Su, baichuan@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics:enable
#define MAX_FLOAT 3.40282e+038f
#ifndef T
#define T float
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
#ifndef MAX_DESC_LEN
#define MAX_DESC_LEN 64
#endif
#define BLOCK_SIZE_ODD (BLOCK_SIZE + 1)
#ifndef SHARED_MEM_SZ
# if (BLOCK_SIZE < MAX_DESC_LEN)
# define SHARED_MEM_SZ (kercn * (BLOCK_SIZE * MAX_DESC_LEN + BLOCK_SIZE * BLOCK_SIZE))
# else
# define SHARED_MEM_SZ (kercn * 2 * BLOCK_SIZE_ODD * BLOCK_SIZE)
# endif
#endif
#ifndef DIST_TYPE
#define DIST_TYPE 2
#endif
// dirty fix for non-template support
#if (DIST_TYPE == 2) // L1Dist
# ifdef T_FLOAT
typedef float result_type;
# if (8 == kercn)
typedef float8 value_type;
# define DIST(x, y) {value_type d = fabs((x) - (y)); result += d.s0 + d.s1 + d.s2 + d.s3 + d.s4 + d.s5 + d.s6 + d.s7;}
# elif (4 == kercn)
typedef float4 value_type;
# define DIST(x, y) {value_type d = fabs((x) - (y)); result += d.s0 + d.s1 + d.s2 + d.s3;}
# else
typedef float value_type;
# define DIST(x, y) result += fabs((x) - (y))
# endif
# else
typedef int result_type;
# if (8 == kercn)
typedef int8 value_type;
# define DIST(x, y) {value_type d = abs((x) - (y)); result += d.s0 + d.s1 + d.s2 + d.s3 + d.s4 + d.s5 + d.s6 + d.s7;}
# elif (4 == kercn)
typedef int4 value_type;
# define DIST(x, y) {value_type d = abs((x) - (y)); result += d.s0 + d.s1 + d.s2 + d.s3;}
# else
typedef int value_type;
# define DIST(x, y) result += abs((x) - (y))
# endif
# endif
# define DIST_RES(x) (x)
#elif (DIST_TYPE == 4) // L2Dist
typedef float result_type;
# if (8 == kercn)
typedef float8 value_type;
# define DIST(x, y) {value_type d = ((x) - (y)); result += dot(d.s0123, d.s0123) + dot(d.s4567, d.s4567);}
# elif (4 == kercn)
typedef float4 value_type;
# define DIST(x, y) {value_type d = ((x) - (y)); result += dot(d, d);}
# else
typedef float value_type;
# define DIST(x, y) {value_type d = ((x) - (y)); result = mad(d, d, result);}
# endif
# define DIST_RES(x) sqrt(x)
#elif (DIST_TYPE == 6) // Hamming
# if (8 == kercn)
typedef int8 value_type;
# elif (4 == kercn)
typedef int4 value_type;
# else
typedef int value_type;
# endif
typedef int result_type;
# define DIST(x, y) result += popcount( (x) ^ (y) )
# define DIST_RES(x) (x)
#endif
inline result_type reduce_block(
__local value_type *s_query,
__local value_type *s_train,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
DIST(s_query[lidy * BLOCK_SIZE_ODD + j], s_train[j * BLOCK_SIZE_ODD + lidx]);
}
return DIST_RES(result);
}
inline result_type reduce_block_match(
__local value_type *s_query,
__local value_type *s_train,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
DIST(s_query[lidy * BLOCK_SIZE_ODD + j], s_train[j * BLOCK_SIZE_ODD + lidx]);
}
return result;
}
inline result_type reduce_multi_block(
__local value_type *s_query,
__local value_type *s_train,
int block_index,
int lidx,
int lidy
)
{
result_type result = 0;
#pragma unroll
for (int j = 0 ; j < BLOCK_SIZE ; j++)
{
DIST(s_query[lidy * MAX_DESC_LEN + block_index * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + lidx]);
}
return result;
}
__kernel void BruteForceMatch_Match(
__global T *query,
__global T *train,
__global int *bestTrainIdx,
__global float *bestDistance,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int queryIdx = mad24(BLOCK_SIZE, groupidx, lidy);
const int queryOffset = min(queryIdx, query_rows - 1) * step;
__global TN *query_vec = (__global TN *)(query + queryOffset);
query_cols /= kercn;
__local float sharebuffer[SHARED_MEM_SZ];
__local value_type *s_query = (__local value_type *)sharebuffer;
#if 0 < MAX_DESC_LEN
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * MAX_DESC_LEN;
// load the query into local memory.
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; i++)
{
const int loadx = mad24(BLOCK_SIZE, i, lidx);
s_query[mad24(MAX_DESC_LEN, lidy, loadx)] = loadx < query_cols ? query_vec[loadx] : 0;
}
#else
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE_ODD * BLOCK_SIZE;
const int s_query_i = mad24(BLOCK_SIZE_ODD, lidy, lidx);
const int s_train_i = mad24(BLOCK_SIZE_ODD, lidx, lidy);
#endif
float myBestDistance = MAX_FLOAT;
int myBestTrainIdx = -1;
// loopUnrolledCached to find the best trainIdx and best distance.
for (int t = 0, endt = (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; t++)
{
result_type result = 0;
const int trainOffset = min(mad24(BLOCK_SIZE, t, lidy), train_rows - 1) * step;
__global TN *train_vec = (__global TN *)(train + trainOffset);
#if 0 < MAX_DESC_LEN
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; i++)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = mad24(BLOCK_SIZE, i, lidx);
s_train[mad24(BLOCK_SIZE, lidx, lidy)] = loadx < train_cols ? train_vec[loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_multi_block(s_query, s_train, i, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
#else
for (int i = 0, endq = (query_cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endq; i++)
{
const int loadx = mad24(i, BLOCK_SIZE, lidx);
//load query and train into local memory
if (loadx < query_cols)
{
s_query[s_query_i] = query_vec[loadx];
s_train[s_train_i] = train_vec[loadx];
}
else
{
s_query[s_query_i] = 0;
s_train[s_train_i] = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block_match(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
#endif
result = DIST_RES(result);
const int trainIdx = mad24(BLOCK_SIZE, t, lidx);
if (queryIdx < query_rows && trainIdx < train_rows && result < myBestDistance /*&& mask(queryIdx, trainIdx)*/)
{
myBestDistance = result;
myBestTrainIdx = trainIdx;
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (__local float *)sharebuffer;
__local int *s_trainIdx = (__local int *)(sharebuffer + BLOCK_SIZE_ODD * BLOCK_SIZE);
//findBestMatch
s_distance += lidy * BLOCK_SIZE_ODD;
s_trainIdx += lidy * BLOCK_SIZE_ODD;
s_distance[lidx] = myBestDistance;
s_trainIdx[lidx] = myBestTrainIdx;
barrier(CLK_LOCAL_MEM_FENCE);
//reduce -- now all reduce implement in each threads.
#pragma unroll
for (int k = 0 ; k < BLOCK_SIZE; k++)
{
if (myBestDistance > s_distance[k])
{
myBestDistance = s_distance[k];
myBestTrainIdx = s_trainIdx[k];
}
}
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
//radius_match
__kernel void BruteForceMatch_RadiusMatch(
__global T *query,
__global T *train,
float maxDistance,
__global int *bestTrainIdx,
__global float *bestDistance,
__global int *nMatches,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int bestTrainIdx_cols,
int step,
int ostep
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int groupidy = get_group_id(1);
const int queryIdx = mad24(BLOCK_SIZE, groupidy, lidy);
const int queryOffset = min(queryIdx, query_rows - 1) * step;
__global TN *query_vec = (__global TN *)(query + queryOffset);
const int trainIdx = mad24(BLOCK_SIZE, groupidx, lidx);
const int trainOffset = min(mad24(BLOCK_SIZE, groupidx, lidy), train_rows - 1) * step;
__global TN *train_vec = (__global TN *)(train + trainOffset);
query_cols /= kercn;
__local float sharebuffer[SHARED_MEM_SZ];
__local value_type *s_query = (__local value_type *)sharebuffer;
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE_ODD * BLOCK_SIZE;
result_type result = 0;
const int s_query_i = mad24(BLOCK_SIZE_ODD, lidy, lidx);
const int s_train_i = mad24(BLOCK_SIZE_ODD, lidx, lidy);
for (int i = 0 ; i < (query_cols + BLOCK_SIZE - 1) / BLOCK_SIZE ; ++i)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = mad24(BLOCK_SIZE, i, lidx);
if (loadx < query_cols)
{
s_query[s_query_i] = query_vec[loadx];
s_train[s_train_i] = train_vec[loadx];
}
else
{
s_query[s_query_i] = 0;
s_train[s_train_i] = 0;
}
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
if (queryIdx < query_rows && trainIdx < train_rows && convert_float(result) < maxDistance)
{
int ind = atom_inc(nMatches + queryIdx);
if(ind < bestTrainIdx_cols)
{
bestTrainIdx[mad24(queryIdx, ostep, ind)] = trainIdx;
bestDistance[mad24(queryIdx, ostep, ind)] = result;
}
}
}
__kernel void BruteForceMatch_knnMatch(
__global T *query,
__global T *train,
__global int2 *bestTrainIdx,
__global float2 *bestDistance,
int query_rows,
int query_cols,
int train_rows,
int train_cols,
int step
)
{
const int lidx = get_local_id(0);
const int lidy = get_local_id(1);
const int groupidx = get_group_id(0);
const int queryIdx = mad24(BLOCK_SIZE, groupidx, lidy);
const int queryOffset = min(queryIdx, query_rows - 1) * step;
__global TN *query_vec = (__global TN *)(query + queryOffset);
query_cols /= kercn;
__local float sharebuffer[SHARED_MEM_SZ];
__local value_type *s_query = (__local value_type *)sharebuffer;
#if 0 < MAX_DESC_LEN
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE * MAX_DESC_LEN;
// load the query into local memory.
#pragma unroll
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE; i ++)
{
int loadx = mad24(BLOCK_SIZE, i, lidx);
s_query[mad24(MAX_DESC_LEN, lidy, loadx)] = loadx < query_cols ? query_vec[loadx] : 0;
}
#else
__local value_type *s_train = (__local value_type *)sharebuffer + BLOCK_SIZE_ODD * BLOCK_SIZE;
const int s_query_i = mad24(BLOCK_SIZE_ODD, lidy, lidx);
const int s_train_i = mad24(BLOCK_SIZE_ODD, lidx, lidy);
#endif
float myBestDistance1 = MAX_FLOAT;
float myBestDistance2 = MAX_FLOAT;
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
for (int t = 0, endt = (train_rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt ; t++)
{
result_type result = 0;
int trainOffset = min(mad24(BLOCK_SIZE, t, lidy), train_rows - 1) * step;
__global TN *train_vec = (__global TN *)(train + trainOffset);
#if 0 < MAX_DESC_LEN
#pragma unroll
for (int i = 0 ; i < MAX_DESC_LEN / BLOCK_SIZE ; i++)
{
//load a BLOCK_SIZE * BLOCK_SIZE block into local train.
const int loadx = mad24(BLOCK_SIZE, i, lidx);
s_train[mad24(BLOCK_SIZE, lidx, lidy)] = loadx < train_cols ? train_vec[loadx] : 0;
//synchronize to make sure each elem for reduceIteration in share memory is written already.
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_multi_block(s_query, s_train, i, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
#else
for (int i = 0, endq = (query_cols + BLOCK_SIZE -1) / BLOCK_SIZE; i < endq ; i++)
{
const int loadx = mad24(BLOCK_SIZE, i, lidx);
//load query and train into local memory
if (loadx < query_cols)
{
s_query[s_query_i] = query_vec[loadx];
s_train[s_train_i] = train_vec[loadx];
}
else
{
s_query[s_query_i] = 0;
s_train[s_train_i] = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
result += reduce_block_match(s_query, s_train, lidx, lidy);
barrier(CLK_LOCAL_MEM_FENCE);
}
#endif
result = DIST_RES(result);
const int trainIdx = mad24(BLOCK_SIZE, t, lidx);
if (queryIdx < query_rows && trainIdx < train_rows)
{
if (result < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestDistance1 = result;
myBestTrainIdx1 = trainIdx;
}
else if (result < myBestDistance2)
{
myBestDistance2 = result;
myBestTrainIdx2 = trainIdx;
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
__local float *s_distance = (__local float *)sharebuffer;
__local int *s_trainIdx = (__local int *)(sharebuffer + BLOCK_SIZE_ODD * BLOCK_SIZE);
// find BestMatch
s_distance += lidy * BLOCK_SIZE_ODD;
s_trainIdx += lidy * BLOCK_SIZE_ODD;
s_distance[lidx] = myBestDistance1;
s_trainIdx[lidx] = myBestTrainIdx1;
float bestDistance1 = MAX_FLOAT;
float bestDistance2 = MAX_FLOAT;
int bestTrainIdx1 = -1;
int bestTrainIdx2 = -1;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance1)
{
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestDistance1 = val;
bestTrainIdx1 = s_trainIdx[i];
}
else if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
s_distance[lidx] = myBestDistance2;
s_trainIdx[lidx] = myBestTrainIdx2;
barrier(CLK_LOCAL_MEM_FENCE);
if (lidx == 0)
{
for (int i = 0 ; i < BLOCK_SIZE ; i++)
{
float val = s_distance[i];
if (val < bestDistance2)
{
bestDistance2 = val;
bestTrainIdx2 = s_trainIdx[i];
}
}
}
myBestDistance1 = bestDistance1;
myBestDistance2 = bestDistance2;
myBestTrainIdx1 = bestTrainIdx1;
myBestTrainIdx2 = bestTrainIdx2;
if (queryIdx < query_rows && lidx == 0)
{
bestTrainIdx[queryIdx] = (int2)(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = (float2)(myBestDistance1, myBestDistance2);
}
}

View File

@@ -0,0 +1,162 @@
// OpenCL port of the FAST corner detector.
// Copyright (C) 2014, Itseez Inc. See the license at http://opencv.org
inline int cornerScore(__global const uchar* img, int step)
{
int k, tofs, v = img[0], a0 = 0, b0;
int d[16];
#define LOAD2(idx, ofs) \
tofs = ofs; d[idx] = (short)(v - img[tofs]); d[idx+8] = (short)(v - img[-tofs])
LOAD2(0, 3);
LOAD2(1, -step+3);
LOAD2(2, -step*2+2);
LOAD2(3, -step*3+1);
LOAD2(4, -step*3);
LOAD2(5, -step*3-1);
LOAD2(6, -step*2-2);
LOAD2(7, -step-3);
#pragma unroll
for( k = 0; k < 16; k += 2 )
{
int a = min((int)d[(k+1)&15], (int)d[(k+2)&15]);
a = min(a, (int)d[(k+3)&15]);
a = min(a, (int)d[(k+4)&15]);
a = min(a, (int)d[(k+5)&15]);
a = min(a, (int)d[(k+6)&15]);
a = min(a, (int)d[(k+7)&15]);
a = min(a, (int)d[(k+8)&15]);
a0 = max(a0, min(a, (int)d[k&15]));
a0 = max(a0, min(a, (int)d[(k+9)&15]));
}
b0 = -a0;
#pragma unroll
for( k = 0; k < 16; k += 2 )
{
int b = max((int)d[(k+1)&15], (int)d[(k+2)&15]);
b = max(b, (int)d[(k+3)&15]);
b = max(b, (int)d[(k+4)&15]);
b = max(b, (int)d[(k+5)&15]);
b = max(b, (int)d[(k+6)&15]);
b = max(b, (int)d[(k+7)&15]);
b = max(b, (int)d[(k+8)&15]);
b0 = min(b0, max(b, (int)d[k]));
b0 = min(b0, max(b, (int)d[(k+9)&15]));
}
return -b0-1;
}
__kernel
void FAST_findKeypoints(
__global const uchar * _img, int step, int img_offset,
int img_rows, int img_cols,
volatile __global int* kp_loc,
int max_keypoints, int threshold )
{
int j = get_global_id(0) + 3;
int i = get_global_id(1) + 3;
if (i < img_rows - 3 && j < img_cols - 3)
{
__global const uchar* img = _img + mad24(i, step, j + img_offset);
int v = img[0], t0 = v - threshold, t1 = v + threshold;
int k, tofs, v0, v1;
int m0 = 0, m1 = 0;
#define UPDATE_MASK(idx, ofs) \
tofs = ofs; v0 = img[tofs]; v1 = img[-tofs]; \
m0 |= ((v0 < t0) << idx) | ((v1 < t0) << (8 + idx)); \
m1 |= ((v0 > t1) << idx) | ((v1 > t1) << (8 + idx))
UPDATE_MASK(0, 3);
if( (m0 | m1) == 0 )
return;
UPDATE_MASK(2, -step*2+2);
UPDATE_MASK(4, -step*3);
UPDATE_MASK(6, -step*2-2);
#define EVEN_MASK (1+4+16+64)
if( ((m0 | (m0 >> 8)) & EVEN_MASK) != EVEN_MASK &&
((m1 | (m1 >> 8)) & EVEN_MASK) != EVEN_MASK )
return;
UPDATE_MASK(1, -step+3);
UPDATE_MASK(3, -step*3+1);
UPDATE_MASK(5, -step*3-1);
UPDATE_MASK(7, -step-3);
if( ((m0 | (m0 >> 8)) & 255) != 255 &&
((m1 | (m1 >> 8)) & 255) != 255 )
return;
m0 |= m0 << 16;
m1 |= m1 << 16;
#define CHECK0(i) ((m0 & (511 << i)) == (511 << i))
#define CHECK1(i) ((m1 & (511 << i)) == (511 << i))
if( CHECK0(0) + CHECK0(1) + CHECK0(2) + CHECK0(3) +
CHECK0(4) + CHECK0(5) + CHECK0(6) + CHECK0(7) +
CHECK0(8) + CHECK0(9) + CHECK0(10) + CHECK0(11) +
CHECK0(12) + CHECK0(13) + CHECK0(14) + CHECK0(15) +
CHECK1(0) + CHECK1(1) + CHECK1(2) + CHECK1(3) +
CHECK1(4) + CHECK1(5) + CHECK1(6) + CHECK1(7) +
CHECK1(8) + CHECK1(9) + CHECK1(10) + CHECK1(11) +
CHECK1(12) + CHECK1(13) + CHECK1(14) + CHECK1(15) == 0 )
return;
{
int idx = atomic_inc(kp_loc);
if( idx < max_keypoints )
{
kp_loc[1 + 2*idx] = j;
kp_loc[2 + 2*idx] = i;
}
}
}
}
///////////////////////////////////////////////////////////////////////////
// nonmaxSupression
__kernel
void FAST_nonmaxSupression(
__global const int* kp_in, volatile __global int* kp_out,
__global const uchar * _img, int step, int img_offset,
int rows, int cols, int counter, int max_keypoints)
{
const int idx = get_global_id(0);
if (idx < counter)
{
int x = kp_in[1 + 2*idx];
int y = kp_in[2 + 2*idx];
__global const uchar* img = _img + mad24(y, step, x + img_offset);
int s = cornerScore(img, step);
if( (x < 4 || s > cornerScore(img-1, step)) +
(y < 4 || s > cornerScore(img-step, step)) != 2 )
return;
if( (x >= cols - 4 || s > cornerScore(img+1, step)) +
(y >= rows - 4 || s > cornerScore(img+step, step)) +
(x < 4 || y < 4 || s > cornerScore(img-step-1, step)) +
(x >= cols - 4 || y < 4 || s > cornerScore(img-step+1, step)) +
(x < 4 || y >= rows - 4 || s > cornerScore(img+step-1, step)) +
(x >= cols - 4 || y >= rows - 4 || s > cornerScore(img+step+1, step)) == 6)
{
int new_idx = atomic_inc(kp_out);
if( new_idx < max_keypoints )
{
kp_out[1 + 3*new_idx] = x;
kp_out[2 + 3*new_idx] = y;
kp_out[3 + 3*new_idx] = s;
}
}
}
}

View File

@@ -0,0 +1,254 @@
// OpenCL port of the ORB feature detector and descriptor extractor
// Copyright (C) 2014, Itseez Inc. See the license at http://opencv.org
//
// The original code has been contributed by Peter Andreas Entschev, peter@entschev.com
#define LAYERINFO_SIZE 1
#define LAYERINFO_OFS 0
#define KEYPOINT_SIZE 3
#define ORIENTED_KEYPOINT_SIZE 4
#define KEYPOINT_X 0
#define KEYPOINT_Y 1
#define KEYPOINT_Z 2
#define KEYPOINT_ANGLE 3
/////////////////////////////////////////////////////////////
#ifdef ORB_RESPONSES
__kernel void
ORB_HarrisResponses(__global const uchar* imgbuf, int imgstep, int imgoffset0,
__global const int* layerinfo, __global const int* keypoints,
__global float* responses, int nkeypoints )
{
int idx = get_global_id(0);
if( idx < nkeypoints )
{
__global const int* kpt = keypoints + idx*KEYPOINT_SIZE;
__global const int* layer = layerinfo + kpt[KEYPOINT_Z]*LAYERINFO_SIZE;
__global const uchar* img = imgbuf + imgoffset0 + layer[LAYERINFO_OFS] +
(kpt[KEYPOINT_Y] - blockSize/2)*imgstep + (kpt[KEYPOINT_X] - blockSize/2);
int i, j;
int a = 0, b = 0, c = 0;
for( i = 0; i < blockSize; i++, img += imgstep-blockSize )
{
for( j = 0; j < blockSize; j++, img++ )
{
int Ix = (img[1] - img[-1])*2 + img[-imgstep+1] - img[-imgstep-1] + img[imgstep+1] - img[imgstep-1];
int Iy = (img[imgstep] - img[-imgstep])*2 + img[imgstep-1] - img[-imgstep-1] + img[imgstep+1] - img[-imgstep+1];
a += Ix*Ix;
b += Iy*Iy;
c += Ix*Iy;
}
}
responses[idx] = ((float)a * b - (float)c * c - HARRIS_K * (float)(a + b) * (a + b))*scale_sq_sq;
}
}
#endif
/////////////////////////////////////////////////////////////
#ifdef ORB_ANGLES
#define _DBL_EPSILON 2.2204460492503131e-16f
#define atan2_p1 (0.9997878412794807f*57.29577951308232f)
#define atan2_p3 (-0.3258083974640975f*57.29577951308232f)
#define atan2_p5 (0.1555786518463281f*57.29577951308232f)
#define atan2_p7 (-0.04432655554792128f*57.29577951308232f)
inline float fastAtan2( float y, float x )
{
float ax = fabs(x), ay = fabs(y);
float a, c, c2;
if( ax >= ay )
{
c = ay/(ax + _DBL_EPSILON);
c2 = c*c;
a = (((atan2_p7*c2 + atan2_p5)*c2 + atan2_p3)*c2 + atan2_p1)*c;
}
else
{
c = ax/(ay + _DBL_EPSILON);
c2 = c*c;
a = 90.f - (((atan2_p7*c2 + atan2_p5)*c2 + atan2_p3)*c2 + atan2_p1)*c;
}
if( x < 0 )
a = 180.f - a;
if( y < 0 )
a = 360.f - a;
return a;
}
__kernel void
ORB_ICAngle(__global const uchar* imgbuf, int imgstep, int imgoffset0,
__global const int* layerinfo, __global const int* keypoints,
__global float* responses, const __global int* u_max,
int nkeypoints, int half_k )
{
int idx = get_global_id(0);
if( idx < nkeypoints )
{
__global const int* kpt = keypoints + idx*KEYPOINT_SIZE;
__global const int* layer = layerinfo + kpt[KEYPOINT_Z]*LAYERINFO_SIZE;
__global const uchar* center = imgbuf + imgoffset0 + layer[LAYERINFO_OFS] +
kpt[KEYPOINT_Y]*imgstep + kpt[KEYPOINT_X];
int u, v, m_01 = 0, m_10 = 0;
// Treat the center line differently, v=0
for( u = -half_k; u <= half_k; u++ )
m_10 += u * center[u];
// Go line by line in the circular patch
for( v = 1; v <= half_k; v++ )
{
// Proceed over the two lines
int v_sum = 0;
int d = u_max[v];
for( u = -d; u <= d; u++ )
{
int val_plus = center[u + v*imgstep], val_minus = center[u - v*imgstep];
v_sum += (val_plus - val_minus);
m_10 += u * (val_plus + val_minus);
}
m_01 += v * v_sum;
}
// we do not use OpenCL's atan2 intrinsic,
// because we want to get _exactly_ the same results as the CPU version
responses[idx] = fastAtan2((float)m_01, (float)m_10);
}
}
#endif
/////////////////////////////////////////////////////////////
#ifdef ORB_DESCRIPTORS
__kernel void
ORB_computeDescriptor(__global const uchar* imgbuf, int imgstep, int imgoffset0,
__global const int* layerinfo, __global const int* keypoints,
__global uchar* _desc, const __global int* pattern,
int nkeypoints, int dsize )
{
int idx = get_global_id(0);
if( idx < nkeypoints )
{
int i;
__global const int* kpt = keypoints + idx*ORIENTED_KEYPOINT_SIZE;
__global const int* layer = layerinfo + kpt[KEYPOINT_Z]*LAYERINFO_SIZE;
__global const uchar* center = imgbuf + imgoffset0 + layer[LAYERINFO_OFS] +
kpt[KEYPOINT_Y]*imgstep + kpt[KEYPOINT_X];
float angle = as_float(kpt[KEYPOINT_ANGLE]);
angle *= 0.01745329251994329547f;
float cosa;
float sina = sincos(angle, &cosa);
__global uchar* desc = _desc + idx*dsize;
#define GET_VALUE(idx) \
center[mad24(convert_int_rte(pattern[(idx)*2] * sina + pattern[(idx)*2+1] * cosa), imgstep, \
convert_int_rte(pattern[(idx)*2] * cosa - pattern[(idx)*2+1] * sina))]
for( i = 0; i < dsize; i++ )
{
int val;
#if WTA_K == 2
int t0, t1;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
pattern += 16*2;
#elif WTA_K == 3
int t0, t1, t2;
t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2);
val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0);
t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4;
t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6;
pattern += 12*2;
#elif WTA_K == 4
int t0, t1, t2, t3, k;
int a, b;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
t2 = GET_VALUE(2); t3 = GET_VALUE(3);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val = k;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
t2 = GET_VALUE(6); t3 = GET_VALUE(7);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 2;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
t2 = GET_VALUE(10); t3 = GET_VALUE(11);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 4;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
t2 = GET_VALUE(14); t3 = GET_VALUE(15);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 6;
pattern += 16*2;
#else
#error "unknown/undefined WTA_K value; should be 2, 3 or 4"
#endif
desc[i] = (uchar)val;
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,56 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include "opencv2/features2d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/hal/hal.hpp"
#include <algorithm>
#endif

View File

@@ -0,0 +1,557 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2020, Intel Corporation, all rights reserved.
/**********************************************************************************************\
Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
Below is the original copyright.
Patent US6711293 expired in March 2020.
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// All rights reserved.
// The following patent has been issued for methods embodied in this
// software: "Method and apparatus for identifying scale invariant features
// in an image and use of same for locating an object in an image," David
// G. Lowe, US Patent 6,711,293 (March 23, 2004). Provisional application
// filed March 8, 1999. Asignee: The University of British Columbia. For
// further details, contact David Lowe (lowe@cs.ubc.ca) or the
// University-Industry Liaison Office of the University of British
// Columbia.
// Note that restrictions imposed by this patent (and possibly others)
// exist independently of and may be in conflict with the freedoms granted
// in this license, which refers to copyright of the program, not patents
// for any methods that it implements. Both copyright and patent law must
// be obeyed to legally use and redistribute this program and it is not the
// purpose of this license to induce you to infringe any patents or other
// property right claims or to contest validity of any such claims. If you
// redistribute or use the program, then this license merely protects you
// from committing copyright infringement. It does not protect you from
// committing patent infringement. So, before you do anything with this
// program, make sure that you have permission to do so not merely in terms
// of copyright, but also in terms of patent law.
// Please note that this license is not to be understood as a guarantee
// either. If you use the program according to this license, but in
// conflict with patent law, it does not mean that the licensor will refund
// you for any losses that you incur if you are sued for your patent
// infringement.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright and
// patent notices, this list of conditions and the following
// disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Oregon State University nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**********************************************************************************************/
#include "precomp.hpp"
#include <opencv2/core/hal/hal.hpp>
#include <opencv2/core/utils/tls.hpp>
#include "sift.simd.hpp"
#include "sift.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
/*!
SIFT implementation.
The class implements SIFT algorithm by D. Lowe.
*/
class SIFT_Impl : public SIFT
{
public:
explicit SIFT_Impl( int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6, int descriptorType = CV_32F );
//! returns the descriptor size in floats (128)
int descriptorSize() const CV_OVERRIDE;
//! returns the descriptor type
int descriptorType() const CV_OVERRIDE;
//! returns the default norm type
int defaultNorm() const CV_OVERRIDE;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void detectAndCompute(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) CV_OVERRIDE;
void buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const;
void buildDoGPyramid( const std::vector<Mat>& pyr, std::vector<Mat>& dogpyr ) const;
void findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const;
protected:
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW double contrastThreshold;
CV_PROP_RW double edgeThreshold;
CV_PROP_RW double sigma;
CV_PROP_RW int descriptor_type;
};
Ptr<SIFT> SIFT::create( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
{
CV_TRACE_FUNCTION();
return makePtr<SIFT_Impl>(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma, CV_32F);
}
Ptr<SIFT> SIFT::create( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma, int _descriptorType )
{
CV_TRACE_FUNCTION();
// SIFT descriptor supports 32bit floating point and 8bit unsigned int.
CV_Assert(_descriptorType == CV_32F || _descriptorType == CV_8U);
return makePtr<SIFT_Impl>(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma, _descriptorType);
}
String SIFT::getDefaultName() const
{
return (Feature2D::getDefaultName() + ".SIFT");
}
static inline void
unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
{
octave = kpt.octave & 255;
layer = (kpt.octave >> 8) & 255;
octave = octave < 128 ? octave : (-128 | octave);
scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
}
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{
CV_TRACE_FUNCTION();
Mat gray, gray_fpt;
if( img.channels() == 3 || img.channels() == 4 )
{
cvtColor(img, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
}
else
img.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
float sig_diff;
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
Mat dbl;
#if DoG_TYPE_SHORT
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR_EXACT);
#else
resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR);
#endif
Mat result;
GaussianBlur(dbl, result, Size(), sig_diff, sig_diff);
return result;
}
else
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
Mat result;
GaussianBlur(gray_fpt, result, Size(), sig_diff, sig_diff);
return result;
}
}
void SIFT_Impl::buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const
{
CV_TRACE_FUNCTION();
std::vector<double> sig(nOctaveLayers + 3);
pyr.resize(nOctaves*(nOctaveLayers + 3));
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double k = std::pow( 2., 1. / nOctaveLayers );
for( int i = 1; i < nOctaveLayers + 3; i++ )
{
double sig_prev = std::pow(k, (double)(i-1))*sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
if( o == 0 && i == 0 )
dst = base;
// base of new octave is halved image from end of previous octave
else if( i == 0 )
{
const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
resize(src, dst, Size(src.cols/2, src.rows/2),
0, 0, INTER_NEAREST);
}
else
{
const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
GaussianBlur(src, dst, Size(), sig[i], sig[i]);
}
}
}
}
class buildDoGPyramidComputer : public ParallelLoopBody
{
public:
buildDoGPyramidComputer(
int _nOctaveLayers,
const std::vector<Mat>& _gpyr,
std::vector<Mat>& _dogpyr)
: nOctaveLayers(_nOctaveLayers),
gpyr(_gpyr),
dogpyr(_dogpyr) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
for( int a = begin; a < end; a++ )
{
const int o = a / (nOctaveLayers + 2);
const int i = a % (nOctaveLayers + 2);
const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
}
}
private:
int nOctaveLayers;
const std::vector<Mat>& gpyr;
std::vector<Mat>& dogpyr;
};
void SIFT_Impl::buildDoGPyramid( const std::vector<Mat>& gpyr, std::vector<Mat>& dogpyr ) const
{
CV_TRACE_FUNCTION();
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
parallel_for_(Range(0, nOctaves * (nOctaveLayers + 2)), buildDoGPyramidComputer(nOctaveLayers, gpyr, dogpyr));
}
class findScaleSpaceExtremaComputer : public ParallelLoopBody
{
public:
findScaleSpaceExtremaComputer(
int _o,
int _i,
int _threshold,
int _idx,
int _step,
int _cols,
int _nOctaveLayers,
double _contrastThreshold,
double _edgeThreshold,
double _sigma,
const std::vector<Mat>& _gauss_pyr,
const std::vector<Mat>& _dog_pyr,
TLSData<std::vector<KeyPoint> > &_tls_kpts_struct)
: o(_o),
i(_i),
threshold(_threshold),
idx(_idx),
step(_step),
cols(_cols),
nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold),
edgeThreshold(_edgeThreshold),
sigma(_sigma),
gauss_pyr(_gauss_pyr),
dog_pyr(_dog_pyr),
tls_kpts_struct(_tls_kpts_struct) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
std::vector<KeyPoint>& kpts = tls_kpts_struct.getRef();
CV_CPU_DISPATCH(findScaleSpaceExtrema, (o, i, threshold, idx, step, cols, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, gauss_pyr, dog_pyr, kpts, range),
CV_CPU_DISPATCH_MODES_ALL);
}
private:
int o, i;
int threshold;
int idx, step, cols;
int nOctaveLayers;
double contrastThreshold;
double edgeThreshold;
double sigma;
const std::vector<Mat>& gauss_pyr;
const std::vector<Mat>& dog_pyr;
TLSData<std::vector<KeyPoint> > &tls_kpts_struct;
};
//
// Detects features at extrema in DoG scale space. Bad features are discarded
// based on contrast and ratio of principal curvatures.
void SIFT_Impl::findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
std::vector<KeyPoint>& keypoints ) const
{
CV_TRACE_FUNCTION();
const int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
keypoints.clear();
TLSDataAccumulator<std::vector<KeyPoint> > tls_kpts_struct;
for( int o = 0; o < nOctaves; o++ )
for( int i = 1; i <= nOctaveLayers; i++ )
{
const int idx = o*(nOctaveLayers+2)+i;
const Mat& img = dog_pyr[idx];
const int step = (int)img.step1();
const int rows = img.rows, cols = img.cols;
parallel_for_(Range(SIFT_IMG_BORDER, rows-SIFT_IMG_BORDER),
findScaleSpaceExtremaComputer(
o, i, threshold, idx, step, cols,
nOctaveLayers,
contrastThreshold,
edgeThreshold,
sigma,
gauss_pyr, dog_pyr, tls_kpts_struct));
}
std::vector<std::vector<KeyPoint>*> kpt_vecs;
tls_kpts_struct.gather(kpt_vecs);
for (size_t i = 0; i < kpt_vecs.size(); ++i) {
keypoints.insert(keypoints.end(), kpt_vecs[i]->begin(), kpt_vecs[i]->end());
}
}
static
void calcSIFTDescriptor(
const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, Mat& dst, int row
)
{
CV_TRACE_FUNCTION();
CV_CPU_DISPATCH(calcSIFTDescriptor, (img, ptf, ori, scl, d, n, dst, row),
CV_CPU_DISPATCH_MODES_ALL);
}
class calcDescriptorsComputer : public ParallelLoopBody
{
public:
calcDescriptorsComputer(const std::vector<Mat>& _gpyr,
const std::vector<KeyPoint>& _keypoints,
Mat& _descriptors,
int _nOctaveLayers,
int _firstOctave)
: gpyr(_gpyr),
keypoints(_keypoints),
descriptors(_descriptors),
nOctaveLayers(_nOctaveLayers),
firstOctave(_firstOctave) { }
void operator()( const cv::Range& range ) const CV_OVERRIDE
{
CV_TRACE_FUNCTION();
const int begin = range.start;
const int end = range.end;
static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
for ( int i = begin; i<end; i++ )
{
KeyPoint kpt = keypoints[i];
int octave, layer;
float scale;
unpackOctave(kpt, octave, layer, scale);
CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
float angle = 360.f - kpt.angle;
if(std::abs(angle - 360.f) < FLT_EPSILON)
angle = 0.f;
calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors, i);
}
}
private:
const std::vector<Mat>& gpyr;
const std::vector<KeyPoint>& keypoints;
Mat& descriptors;
int nOctaveLayers;
int firstOctave;
};
static void calcDescriptors(const std::vector<Mat>& gpyr, const std::vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers, int firstOctave )
{
CV_TRACE_FUNCTION();
parallel_for_(Range(0, static_cast<int>(keypoints.size())), calcDescriptorsComputer(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave));
}
//////////////////////////////////////////////////////////////////////////////////////////
SIFT_Impl::SIFT_Impl( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma, int _descriptorType )
: nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma), descriptor_type(_descriptorType)
{
}
int SIFT_Impl::descriptorSize() const
{
return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
}
int SIFT_Impl::descriptorType() const
{
return descriptor_type;
}
int SIFT_Impl::defaultNorm() const
{
return NORM_L2;
}
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints)
{
CV_TRACE_FUNCTION();
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U )
CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
if( useProvidedKeypoints )
{
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
std::vector<Mat> gpyr;
int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
buildGaussianPyramid(base, gpyr, nOctaves);
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
if( !useProvidedKeypoints )
{
std::vector<Mat> dogpyr;
buildDoGPyramid(gpyr, dogpyr);
//t = (double)getTickCount();
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicatedSorted( keypoints );
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
else
{
// filter keypoints by mask
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, descriptor_type);
Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,213 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Niko Li, newlife20080214@gmail.com
// Jia Haipeng, jiahaipeng95@gmail.com
// Zero Lin, Zero.Lin@amd.com
// Zhang Ying, zhangying913@gmail.com
// Yao Wang, bitwangyaoyao@gmail.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#include "cvconfig.h"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
PARAM_TEST_CASE(BruteForceMatcher, int, int)
{
int distType;
int dim;
int queryDescCount;
int countFactor;
Mat query, train;
UMat uquery, utrain;
virtual void SetUp()
{
distType = GET_PARAM(0);
dim = GET_PARAM(1);
queryDescCount = 300; // must be even number because we split train data in some cases in two
countFactor = 4; // do not change it
cv::Mat queryBuf, trainBuf;
// Generate query descriptors randomly.
// Descriptor vector elements are integer values.
queryBuf.create(queryDescCount, dim, CV_32SC1);
rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
queryBuf.convertTo(queryBuf, CV_32FC1);
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation
// are (0.f, 1.f).
trainBuf.create(queryDescCount * countFactor, dim, CV_32FC1);
float step = 1.f / countFactor;
for (int qIdx = 0; qIdx < queryDescCount; qIdx++)
{
cv::Mat queryDescriptor = queryBuf.row(qIdx);
for (int c = 0; c < countFactor; c++)
{
int tIdx = qIdx * countFactor + c;
cv::Mat trainDescriptor = trainBuf.row(tIdx);
queryDescriptor.copyTo(trainDescriptor);
int elem = rng(dim);
float diff = rng.uniform(step * c, step * (c + 1));
trainDescriptor.at<float>(0, elem) += diff;
}
}
queryBuf.convertTo(query, CV_32F);
trainBuf.convertTo(train, CV_32F);
query.copyTo(uquery);
train.copyTo(utrain);
}
};
#ifdef __ANDROID__
OCL_TEST_P(BruteForceMatcher, DISABLED_Match_Single)
#else
OCL_TEST_P(BruteForceMatcher, Match_Single)
#endif
{
BFMatcher matcher(distType);
std::vector<cv::DMatch> matches;
matcher.match(uquery, utrain, matches);
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
int badCount = 0;
for (size_t i = 0; i < matches.size(); i++)
{
cv::DMatch match = matches[i];
if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor) || (match.imgIdx != 0))
badCount++;
}
ASSERT_EQ(0, badCount);
}
#ifdef __ANDROID__
OCL_TEST_P(BruteForceMatcher, DISABLED_KnnMatch_2_Single)
#else
OCL_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
#endif
{
const int knn = 2;
BFMatcher matcher(distType);
std::vector< std::vector<cv::DMatch> > matches;
matcher.knnMatch(uquery, utrain, matches, knn);
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
int badCount = 0;
for (size_t i = 0; i < matches.size(); i++)
{
if ((int)matches[i].size() != knn)
badCount++;
else
{
int localBadCount = 0;
for (int k = 0; k < knn; k++)
{
cv::DMatch match = matches[i][k];
if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0))
localBadCount++;
}
badCount += localBadCount > 0 ? 1 : 0;
}
}
ASSERT_EQ(0, badCount);
}
#ifdef __ANDROID__
OCL_TEST_P(BruteForceMatcher, DISABLED_RadiusMatch_Single)
#else
OCL_TEST_P(BruteForceMatcher, RadiusMatch_Single)
#endif
{
float radius = 1.f / countFactor;
BFMatcher matcher(distType);
std::vector< std::vector<cv::DMatch> > matches;
matcher.radiusMatch(uquery, utrain, matches, radius);
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
int badCount = 0;
for (size_t i = 0; i < matches.size(); i++)
{
if ((int)matches[i].size() != 1)
{
badCount++;
}
else
{
cv::DMatch match = matches[i][0];
if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor) || (match.imgIdx != 0))
badCount++;
}
}
ASSERT_EQ(0, badCount);
}
OCL_INSTANTIATE_TEST_CASE_P(Matcher, BruteForceMatcher, Combine( Values((int)NORM_L1, (int)NORM_L2),
Values(57, 64, 83, 128, 179, 256, 304) ) );
}//ocl
}//cvtest
#endif //HAVE_OPENCL

View File

@@ -0,0 +1,72 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "../test_precomp.hpp"
#include "cvconfig.h"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
#define TEST_IMAGES testing::Values(\
"detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
"../stitching/a3.png", \
"../stitching/s2.jpg")
PARAM_TEST_CASE(Feature2DFixture, Ptr<Feature2D>, std::string)
{
std::string filename;
Mat image, descriptors;
vector<KeyPoint> keypoints;
UMat uimage, udescriptors;
vector<KeyPoint> ukeypoints;
Ptr<Feature2D> feature;
virtual void SetUp()
{
feature = GET_PARAM(0);
filename = GET_PARAM(1);
image = readImage(filename);
ASSERT_FALSE(image.empty());
image.copyTo(uimage);
OCL_OFF(feature->detect(image, keypoints));
OCL_ON(feature->detect(uimage, ukeypoints));
// note: we use keypoints from CPU for GPU too, to test descriptors separately
OCL_OFF(feature->compute(image, keypoints, descriptors));
OCL_ON(feature->compute(uimage, keypoints, udescriptors));
}
};
OCL_TEST_P(Feature2DFixture, KeypointsSame)
{
EXPECT_EQ(keypoints.size(), ukeypoints.size());
for (size_t i = 0; i < keypoints.size(); ++i)
{
EXPECT_GE(KeyPoint::overlap(keypoints[i], ukeypoints[i]), 0.95);
EXPECT_NEAR(keypoints[i].angle, ukeypoints[i].angle, 0.05);
}
}
OCL_TEST_P(Feature2DFixture, DescriptorsSame)
{
EXPECT_MAT_NEAR(descriptors, udescriptors, 0.001);
}
OCL_INSTANTIATE_TEST_CASE_P(AKAZE, Feature2DFixture,
testing::Combine(testing::Values(AKAZE::create()), TEST_IMAGES));
OCL_INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, Feature2DFixture,
testing::Combine(testing::Values(AKAZE::create(AKAZE::DESCRIPTOR_KAZE)), TEST_IMAGES));
}//ocl
}//cvtest
#endif //HAVE_OPENCL

View File

@@ -0,0 +1,185 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
// #define GENERATE_DATA // generate data in debug mode
namespace opencv_test { namespace {
#ifndef GENERATE_DATA
static bool isSimilarKeypoints( const KeyPoint& p1, const KeyPoint& p2 )
{
const float maxPtDif = 1.f;
const float maxSizeDif = 1.f;
const float maxAngleDif = 2.f;
const float maxResponseDif = 0.1f;
float dist = (float)cv::norm( p1.pt - p2.pt );
return (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
(p1.octave & 0xffff) == (p2.octave & 0xffff) // do not care about sublayers and class_id
);
}
#endif
TEST(Features2d_AFFINE_FEATURE, regression)
{
Mat image = imread(cvtest::findDataFile("features2d/tsukuba.png"));
string xml = cvtest::TS::ptr()->get_data_path() + "asift/regression_cpp.xml.gz";
ASSERT_FALSE(image.empty());
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// Default ASIFT generates too large descriptors. This test uses small maxTilt to suppress the size of testdata.
Ptr<AffineFeature> ext = AffineFeature::create(SIFT::create(), 2, 0, 1.4142135623730951f, 144.0f);
Mat mpt, msize, mangle, mresponse, moctave, mclass_id;
#ifdef GENERATE_DATA
// calculate
vector<KeyPoint> calcKeypoints;
Mat calcDescriptors;
ext->detectAndCompute(gray, Mat(), calcKeypoints, calcDescriptors, false);
// create keypoints XML
FileStorage fs(xml, FileStorage::WRITE);
ASSERT_TRUE(fs.isOpened()) << xml;
std::cout << "Creating keypoints XML..." << std::endl;
mpt = Mat(calcKeypoints.size(), 2, CV_32F);
msize = Mat(calcKeypoints.size(), 1, CV_32F);
mangle = Mat(calcKeypoints.size(), 1, CV_32F);
mresponse = Mat(calcKeypoints.size(), 1, CV_32F);
moctave = Mat(calcKeypoints.size(), 1, CV_32S);
mclass_id = Mat(calcKeypoints.size(), 1, CV_32S);
for( size_t i = 0; i < calcKeypoints.size(); i++ )
{
const KeyPoint& key = calcKeypoints[i];
mpt.at<float>(i, 0) = key.pt.x;
mpt.at<float>(i, 1) = key.pt.y;
msize.at<float>(i, 0) = key.size;
mangle.at<float>(i, 0) = key.angle;
mresponse.at<float>(i, 0) = key.response;
moctave.at<int>(i, 0) = key.octave;
mclass_id.at<int>(i, 0) = key.class_id;
}
fs << "keypoints_pt" << mpt;
fs << "keypoints_size" << msize;
fs << "keypoints_angle" << mangle;
fs << "keypoints_response" << mresponse;
fs << "keypoints_octave" << moctave;
fs << "keypoints_class_id" << mclass_id;
// create descriptor XML
fs << "descriptors" << calcDescriptors;
fs.release();
#else
const float badCountsRatio = 0.01f;
const float badDescriptorDist = 1.0f;
const float maxBadKeypointsRatio = 0.15f;
const float maxBadDescriptorRatio = 0.15f;
// read keypoints
vector<KeyPoint> validKeypoints;
Mat validDescriptors;
FileStorage fs(xml, FileStorage::READ);
ASSERT_TRUE(fs.isOpened()) << xml;
fs["keypoints_pt"] >> mpt;
ASSERT_EQ(mpt.type(), CV_32F);
fs["keypoints_size"] >> msize;
ASSERT_EQ(msize.type(), CV_32F);
fs["keypoints_angle"] >> mangle;
ASSERT_EQ(mangle.type(), CV_32F);
fs["keypoints_response"] >> mresponse;
ASSERT_EQ(mresponse.type(), CV_32F);
fs["keypoints_octave"] >> moctave;
ASSERT_EQ(moctave.type(), CV_32S);
fs["keypoints_class_id"] >> mclass_id;
ASSERT_EQ(mclass_id.type(), CV_32S);
validKeypoints.resize(mpt.rows);
for( int i = 0; i < (int)validKeypoints.size(); i++ )
{
validKeypoints[i].pt.x = mpt.at<float>(i, 0);
validKeypoints[i].pt.y = mpt.at<float>(i, 1);
validKeypoints[i].size = msize.at<float>(i, 0);
validKeypoints[i].angle = mangle.at<float>(i, 0);
validKeypoints[i].response = mresponse.at<float>(i, 0);
validKeypoints[i].octave = moctave.at<int>(i, 0);
validKeypoints[i].class_id = mclass_id.at<int>(i, 0);
}
// read descriptors
fs["descriptors"] >> validDescriptors;
fs.release();
// calc and compare keypoints
vector<KeyPoint> calcKeypoints;
ext->detectAndCompute(gray, Mat(), calcKeypoints, noArray(), false);
float countRatio = (float)validKeypoints.size() / (float)calcKeypoints.size();
ASSERT_LT(countRatio, 1 + badCountsRatio) << "Bad keypoints count ratio.";
ASSERT_GT(countRatio, 1 - badCountsRatio) << "Bad keypoints count ratio.";
int badPointCount = 0, commonPointCount = max((int)validKeypoints.size(), (int)calcKeypoints.size());
for( size_t v = 0; v < validKeypoints.size(); v++ )
{
int nearestIdx = -1;
float minDist = std::numeric_limits<float>::max();
float angleDistOfNearest = std::numeric_limits<float>::max();
for( size_t c = 0; c < calcKeypoints.size(); c++ )
{
if( validKeypoints[v].class_id != calcKeypoints[c].class_id )
continue;
float curDist = (float)cv::norm( calcKeypoints[c].pt - validKeypoints[v].pt );
if( curDist < minDist )
{
minDist = curDist;
nearestIdx = (int)c;
angleDistOfNearest = abs( calcKeypoints[c].angle - validKeypoints[v].angle );
}
else if( curDist == minDist ) // the keypoints whose positions are same but angles are different
{
float angleDist = abs( calcKeypoints[c].angle - validKeypoints[v].angle );
if( angleDist < angleDistOfNearest )
{
nearestIdx = (int)c;
angleDistOfNearest = angleDist;
}
}
}
if( nearestIdx == -1 || !isSimilarKeypoints( validKeypoints[v], calcKeypoints[nearestIdx] ) )
badPointCount++;
}
float badKeypointsRatio = (float)badPointCount / (float)commonPointCount;
std::cout << "badKeypointsRatio: " << badKeypointsRatio << std::endl;
ASSERT_LT( badKeypointsRatio , maxBadKeypointsRatio ) << "Bad accuracy!";
// Calc and compare descriptors. This uses validKeypoints for extraction.
Mat calcDescriptors;
ext->detectAndCompute(gray, Mat(), validKeypoints, calcDescriptors, true);
int dim = validDescriptors.cols;
int badDescriptorCount = 0;
L1<float> distance;
for( int i = 0; i < (int)validKeypoints.size(); i++ )
{
float dist = distance( validDescriptors.ptr<float>(i), calcDescriptors.ptr<float>(i), dim );
if( dist > badDescriptorDist )
badDescriptorCount++;
}
float badDescriptorRatio = (float)badDescriptorCount / (float)validKeypoints.size();
std::cout << "badDescriptorRatio: " << badDescriptorRatio << std::endl;
ASSERT_LT( badDescriptorRatio, maxBadDescriptorRatio ) << "Too many descriptors mismatched.";
#endif
}
}} // namespace

View File

@@ -0,0 +1,138 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
class CV_AgastTest : public cvtest::BaseTest
{
public:
CV_AgastTest();
~CV_AgastTest();
protected:
void run(int);
};
CV_AgastTest::CV_AgastTest() {}
CV_AgastTest::~CV_AgastTest() {}
void CV_AgastTest::run( int )
{
for(int type=0; type <= 2; ++type) {
Mat image1 = imread(string(ts->get_data_path()) + "inpaint/orig.png");
Mat image2 = imread(string(ts->get_data_path()) + "cameracalibration/chess9.png");
string xml = string(ts->get_data_path()) + format("agast/result%d.xml", type);
if (image1.empty() || image2.empty())
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
Mat gray1, gray2;
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
AGAST(gray1, keypoints1, 30, true, static_cast<AgastFeatureDetector::DetectorType>(type));
AGAST(gray2, keypoints2, (type > 0 ? 30 : 20), true, static_cast<AgastFeatureDetector::DetectorType>(type));
for(size_t i = 0; i < keypoints1.size(); ++i)
{
const KeyPoint& kp = keypoints1[i];
cv::circle(image1, kp.pt, cvRound(kp.size/2), Scalar(255, 0, 0));
}
for(size_t i = 0; i < keypoints2.size(); ++i)
{
const KeyPoint& kp = keypoints2[i];
cv::circle(image2, kp.pt, cvRound(kp.size/2), Scalar(255, 0, 0));
}
Mat kps1(1, (int)(keypoints1.size() * sizeof(KeyPoint)), CV_8U, &keypoints1[0]);
Mat kps2(1, (int)(keypoints2.size() * sizeof(KeyPoint)), CV_8U, &keypoints2[0]);
FileStorage fs(xml, FileStorage::READ);
if (!fs.isOpened())
{
fs.open(xml, FileStorage::WRITE);
if (!fs.isOpened())
{
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
fs << "exp_kps1" << kps1;
fs << "exp_kps2" << kps2;
fs.release();
fs.open(xml, FileStorage::READ);
if (!fs.isOpened())
{
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
}
Mat exp_kps1, exp_kps2;
read( fs["exp_kps1"], exp_kps1, Mat() );
read( fs["exp_kps2"], exp_kps2, Mat() );
fs.release();
if ( exp_kps1.size != kps1.size || 0 != cvtest::norm(exp_kps1, kps1, NORM_L2) ||
exp_kps2.size != kps2.size || 0 != cvtest::norm(exp_kps2, kps2, NORM_L2))
{
ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
return;
}
/*cv::namedWindow("Img1"); cv::imshow("Img1", image1);
cv::namedWindow("Img2"); cv::imshow("Img2", image2);
cv::waitKey(0);*/
}
ts->set_failed_test_info(cvtest::TS::OK);
}
TEST(Features2d_AGAST, regression) { CV_AgastTest test; test.safe_run(); }
}} // namespace

View File

@@ -0,0 +1,48 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(Features2d_AKAZE, detect_and_compute_split)
{
Mat testImg(100, 100, CV_8U);
RNG rng(101);
rng.fill(testImg, RNG::UNIFORM, Scalar(0), Scalar(255), true);
Ptr<Feature2D> ext = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.001f, 1, 1, KAZE::DIFF_PM_G2);
vector<KeyPoint> detAndCompKps;
Mat desc;
ext->detectAndCompute(testImg, noArray(), detAndCompKps, desc);
vector<KeyPoint> detKps;
ext->detect(testImg, detKps);
ASSERT_EQ(detKps.size(), detAndCompKps.size());
for(size_t i = 0; i < detKps.size(); i++)
ASSERT_EQ(detKps[i].hash(), detAndCompKps[i].hash());
}
/**
* This test is here to guard propagation of NaNs that happens on this image. NaNs are guarded
* by debug asserts in AKAZE, which should fire for you if you are lucky.
*
* This test also reveals problems with uninitialized memory that happens only on this image.
* This is very hard to hit and depends a lot on particular allocator. Run this test in valgrind and check
* for uninitialized values if you think you are hitting this problem again.
*/
TEST(Features2d_AKAZE, uninitialized_and_nans)
{
Mat b1 = imread(cvtest::TS::ptr()->get_data_path() + "../stitching/b1.png");
ASSERT_FALSE(b1.empty());
vector<KeyPoint> keypoints;
Mat desc;
Ptr<Feature2D> akaze = AKAZE::create();
akaze->detectAndCompute(b1, noArray(), keypoints, desc);
}
}} // namespace

View File

@@ -0,0 +1,22 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(Features2d_BlobDetector, bug_6667)
{
cv::Mat image = cv::Mat(cv::Size(100, 100), CV_8UC1, cv::Scalar(255, 255, 255));
cv::circle(image, Point(50, 50), 20, cv::Scalar(0), -1);
SimpleBlobDetector::Params params;
params.minThreshold = 250;
params.maxThreshold = 260;
params.minRepeatability = 1; // https://github.com/opencv/opencv/issues/6667
std::vector<KeyPoint> keypoints;
Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);
detector->detect(image, keypoints);
ASSERT_NE((int) keypoints.size(), 0);
}
}} // namespace

View File

@@ -0,0 +1,108 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
class CV_BRISKTest : public cvtest::BaseTest
{
public:
CV_BRISKTest();
~CV_BRISKTest();
protected:
void run(int);
};
CV_BRISKTest::CV_BRISKTest() {}
CV_BRISKTest::~CV_BRISKTest() {}
void CV_BRISKTest::run( int )
{
Mat image1 = imread(string(ts->get_data_path()) + "inpaint/orig.png");
Mat image2 = imread(string(ts->get_data_path()) + "cameracalibration/chess9.png");
if (image1.empty() || image2.empty())
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
Mat gray1, gray2;
cvtColor(image1, gray1, COLOR_BGR2GRAY);
cvtColor(image2, gray2, COLOR_BGR2GRAY);
Ptr<FeatureDetector> detector = BRISK::create();
// Check parameter get/set functions.
BRISK* detectorTyped = dynamic_cast<BRISK*>(detector.get());
ASSERT_NE(nullptr, detectorTyped);
detectorTyped->setOctaves(3);
detectorTyped->setThreshold(30);
ASSERT_EQ(detectorTyped->getOctaves(), 3);
ASSERT_EQ(detectorTyped->getThreshold(), 30);
detectorTyped->setOctaves(4);
detectorTyped->setThreshold(29);
ASSERT_EQ(detectorTyped->getOctaves(), 4);
ASSERT_EQ(detectorTyped->getThreshold(), 29);
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
detector->detect(image1, keypoints1);
detector->detect(image2, keypoints2);
for(size_t i = 0; i < keypoints1.size(); ++i)
{
const KeyPoint& kp = keypoints1[i];
ASSERT_NE(kp.angle, -1);
}
for(size_t i = 0; i < keypoints2.size(); ++i)
{
const KeyPoint& kp = keypoints2[i];
ASSERT_NE(kp.angle, -1);
}
}
TEST(Features2d_BRISK, regression) { CV_BRISKTest test; test.safe_run(); }
}} // namespace

View File

@@ -0,0 +1,48 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
#include "test_invariance_utils.hpp"
#include "test_descriptors_invariance.impl.hpp"
namespace opencv_test { namespace {
const static std::string IMAGE_TSUKUBA = "features2d/tsukuba.png";
const static std::string IMAGE_BIKES = "detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
#define Value(...) Values(make_tuple(__VA_ARGS__))
/*
* Descriptors's rotation invariance check
*/
INSTANTIATE_TEST_CASE_P(SIFT, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, SIFT::create(), SIFT::create(), 0.98f));
INSTANTIATE_TEST_CASE_P(BRISK, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, BRISK::create(), BRISK::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(ORB, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, ORB::create(), ORB::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(AKAZE, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(), AKAZE::create(), 0.99f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DescriptorRotationInvariance,
Value(IMAGE_TSUKUBA, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.99f));
/*
* Descriptor's scale invariance check
*/
INSTANTIATE_TEST_CASE_P(SIFT, DescriptorScaleInvariance,
Value(IMAGE_BIKES, SIFT::create(0, 3, 0.09), SIFT::create(0, 3, 0.09), 0.78f));
INSTANTIATE_TEST_CASE_P(AKAZE, DescriptorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(), AKAZE::create(), 0.6f));
INSTANTIATE_TEST_CASE_P(AKAZE_DESCRIPTOR_KAZE, DescriptorScaleInvariance,
Value(IMAGE_BIKES, AKAZE::create(AKAZE::DESCRIPTOR_KAZE), AKAZE::create(AKAZE::DESCRIPTOR_KAZE), 0.55f));
}} // namespace

View File

@@ -0,0 +1,198 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_invariance_utils.hpp"
namespace opencv_test { namespace {
#define SHOW_DEBUG_LOG 1
typedef tuple<std::string, Ptr<FeatureDetector>, Ptr<DescriptorExtractor>, float>
String_FeatureDetector_DescriptorExtractor_Float_t;
static
void SetSuitableSIFTOctave(vector<KeyPoint>& keypoints,
int firstOctave = -1, int nOctaveLayers = 3, double sigma = 1.6)
{
for (size_t i = 0; i < keypoints.size(); i++ )
{
int octv, layer;
KeyPoint& kpt = keypoints[i];
double octv_layer = std::log(kpt.size / sigma) / std::log(2.) - 1;
octv = cvFloor(octv_layer);
layer = cvRound( (octv_layer - octv) * nOctaveLayers );
if (octv < firstOctave)
{
octv = firstOctave;
layer = 0;
}
kpt.octave = (layer << 8) | (octv & 255);
}
}
static
void rotateKeyPoints(const vector<KeyPoint>& src, const Mat& H, float angle, vector<KeyPoint>& dst)
{
// suppose that H is rotation given from rotateImage() and angle has value passed to rotateImage()
vector<Point2f> srcCenters, dstCenters;
KeyPoint::convert(src, srcCenters);
perspectiveTransform(srcCenters, dstCenters, H);
dst = src;
for(size_t i = 0; i < dst.size(); i++)
{
dst[i].pt = dstCenters[i];
float dstAngle = src[i].angle + angle;
if(dstAngle >= 360.f)
dstAngle -= 360.f;
dst[i].angle = dstAngle;
}
}
class DescriptorInvariance : public TestWithParam<String_FeatureDetector_DescriptorExtractor_Float_t>
{
protected:
virtual void SetUp() {
// Read test data
const std::string filename = cvtest::TS::ptr()->get_data_path() + get<0>(GetParam());
image0 = imread(filename);
ASSERT_FALSE(image0.empty()) << "couldn't read input image";
featureDetector = get<1>(GetParam());
descriptorExtractor = get<2>(GetParam());
minInliersRatio = get<3>(GetParam());
}
Ptr<FeatureDetector> featureDetector;
Ptr<DescriptorExtractor> descriptorExtractor;
float minInliersRatio;
Mat image0;
};
typedef DescriptorInvariance DescriptorScaleInvariance;
typedef DescriptorInvariance DescriptorRotationInvariance;
TEST_P(DescriptorRotationInvariance, rotation)
{
Mat image1, mask1;
const int borderSize = 16;
Mat mask0(image0.size(), CV_8UC1, Scalar(0));
mask0(Rect(borderSize, borderSize, mask0.cols - 2*borderSize, mask0.rows - 2*borderSize)).setTo(Scalar(255));
vector<KeyPoint> keypoints0;
Mat descriptors0;
featureDetector->detect(image0, keypoints0, mask0);
std::cout << "Keypoints: " << keypoints0.size() << std::endl;
EXPECT_GE(keypoints0.size(), 15u);
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(descriptorExtractor->defaultNorm());
const float minIntersectRatio = 0.5f;
const int maxAngle = 360, angleStep = 15;
for(int angle = 0; angle < maxAngle; angle += angleStep)
{
Mat H = rotateImage(image0, mask0, static_cast<float>(angle), image1, mask1);
vector<KeyPoint> keypoints1;
rotateKeyPoints(keypoints0, H, static_cast<float>(angle), keypoints1);
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints1[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints1[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
EXPECT_GE(descInliersRatio, minInliersRatio);
#if SHOW_DEBUG_LOG
std::cout
<< "angle = " << angle
<< ", inliers = " << descInliersCount
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
}
TEST_P(DescriptorScaleInvariance, scale)
{
vector<KeyPoint> keypoints0;
featureDetector->detect(image0, keypoints0);
std::cout << "Keypoints: " << keypoints0.size() << std::endl;
EXPECT_GE(keypoints0.size(), 15u);
Mat descriptors0;
descriptorExtractor->compute(image0, keypoints0, descriptors0);
BFMatcher bfmatcher(descriptorExtractor->defaultNorm());
for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
{
float scale = 1.f + scaleIdx * 0.5f;
Mat image1;
resize(image0, image1, Size(), 1./scale, 1./scale, INTER_LINEAR_EXACT);
vector<KeyPoint> keypoints1;
scaleKeyPoints(keypoints0, keypoints1, 1.0f/scale);
if (featureDetector->getDefaultName() == "Feature2D.SIFT")
{
SetSuitableSIFTOctave(keypoints1);
}
Mat descriptors1;
descriptorExtractor->compute(image1, keypoints1, descriptors1);
vector<DMatch> descMatches;
bfmatcher.match(descriptors0, descriptors1, descMatches);
const float minIntersectRatio = 0.5f;
int descInliersCount = 0;
for(size_t m = 0; m < descMatches.size(); m++)
{
const KeyPoint& transformed_p0 = keypoints0[descMatches[m].queryIdx];
const KeyPoint& p1 = keypoints0[descMatches[m].trainIdx];
if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
p1.pt, 0.5f * p1.size) >= minIntersectRatio)
{
descInliersCount++;
}
}
float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
EXPECT_GE(descInliersRatio, minInliersRatio);
#if SHOW_DEBUG_LOG
std::cout
<< "scale = " << scale
<< ", inliers = " << descInliersCount
<< ", descInliersRatio = " << static_cast<float>(descInliersCount) / keypoints0.size()
<< std::endl;
#endif
}
}
#undef SHOW_DEBUG_LOG
}} // namespace
namespace std {
using namespace opencv_test;
static inline void PrintTo(const String_FeatureDetector_DescriptorExtractor_Float_t& v, std::ostream* os)
{
*os << "(\"" << get<0>(v)
<< "\", " << get<3>(v)
<< ")";
}
} // namespace

View File

@@ -0,0 +1,211 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
namespace opencv_test { namespace {
const string FEATURES2D_DIR = "features2d";
const string IMAGE_FILENAME = "tsukuba.png";
const string DESCRIPTOR_DIR = FEATURES2D_DIR + "/descriptor_extractors";
}} // namespace
#include "test_descriptors_regression.impl.hpp"
namespace opencv_test { namespace {
/****************************************************************************************\
* Tests registrations *
\****************************************************************************************/
TEST( Features2d_DescriptorExtractor_SIFT, regression )
{
CV_DescriptorExtractorTest<L1<float> > test( "descriptor-sift", 1.0f,
SIFT::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_BRISK, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-brisk",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)2.f,
BRISK::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_ORB, regression )
{
// TODO adjust the parameters below
CV_DescriptorExtractorTest<Hamming> test( "descriptor-orb",
#if CV_NEON
(CV_DescriptorExtractorTest<Hamming>::DistanceType)25.f,
#else
(CV_DescriptorExtractorTest<Hamming>::DistanceType)12.f,
#endif
ORB::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-kaze", 0.03f,
KAZE::create(),
L2<float>(), KAZE::create() );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_AKAZE, regression )
{
CV_DescriptorExtractorTest<Hamming> test( "descriptor-akaze",
(CV_DescriptorExtractorTest<Hamming>::DistanceType)(486*0.05f),
AKAZE::create(),
Hamming(), AKAZE::create());
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_AKAZE_DESCRIPTOR_KAZE, regression )
{
CV_DescriptorExtractorTest< L2<float> > test( "descriptor-akaze-with-kaze-desc", 0.03f,
AKAZE::create(AKAZE::DESCRIPTOR_KAZE),
L2<float>(), AKAZE::create(AKAZE::DESCRIPTOR_KAZE));
test.safe_run();
}
TEST( Features2d_DescriptorExtractor, batch_ORB )
{
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
vector<Mat> imgs, descriptors;
vector<vector<KeyPoint> > keypoints;
int i, n = 6;
Ptr<ORB> orb = ORB::create();
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
imgs.push_back(img);
}
orb->detect(imgs, keypoints);
orb->compute(imgs, keypoints, descriptors);
ASSERT_EQ((int)keypoints.size(), n);
ASSERT_EQ((int)descriptors.size(), n);
for( i = 0; i < n; i++ )
{
EXPECT_GT((int)keypoints[i].size(), 100);
EXPECT_GT(descriptors[i].rows, 100);
}
}
TEST( Features2d_DescriptorExtractor, batch_SIFT )
{
string path = string(cvtest::TS::ptr()->get_data_path() + "detectors_descriptors_evaluation/images_datasets/graf");
vector<Mat> imgs, descriptors;
vector<vector<KeyPoint> > keypoints;
int i, n = 6;
Ptr<SIFT> sift = SIFT::create();
for( i = 0; i < n; i++ )
{
string imgname = format("%s/img%d.png", path.c_str(), i+1);
Mat img = imread(imgname, 0);
imgs.push_back(img);
}
sift->detect(imgs, keypoints);
sift->compute(imgs, keypoints, descriptors);
ASSERT_EQ((int)keypoints.size(), n);
ASSERT_EQ((int)descriptors.size(), n);
for( i = 0; i < n; i++ )
{
EXPECT_GT((int)keypoints[i].size(), 100);
EXPECT_GT(descriptors[i].rows, 100);
}
}
class DescriptorImage : public TestWithParam<std::string>
{
protected:
virtual void SetUp() {
pattern = GetParam();
}
std::string pattern;
};
TEST_P(DescriptorImage, no_crash)
{
vector<String> fnames;
glob(cvtest::TS::ptr()->get_data_path() + pattern, fnames, false);
sort(fnames.begin(), fnames.end());
Ptr<AKAZE> akaze_mldb = AKAZE::create(AKAZE::DESCRIPTOR_MLDB);
Ptr<AKAZE> akaze_mldb_upright = AKAZE::create(AKAZE::DESCRIPTOR_MLDB_UPRIGHT);
Ptr<AKAZE> akaze_mldb_256 = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 256);
Ptr<AKAZE> akaze_mldb_upright_256 = AKAZE::create(AKAZE::DESCRIPTOR_MLDB_UPRIGHT, 256);
Ptr<AKAZE> akaze_kaze = AKAZE::create(AKAZE::DESCRIPTOR_KAZE);
Ptr<AKAZE> akaze_kaze_upright = AKAZE::create(AKAZE::DESCRIPTOR_KAZE_UPRIGHT);
Ptr<ORB> orb = ORB::create();
Ptr<KAZE> kaze = KAZE::create();
Ptr<BRISK> brisk = BRISK::create();
size_t n = fnames.size();
vector<KeyPoint> keypoints;
Mat descriptors;
orb->setMaxFeatures(5000);
for(size_t i = 0; i < n; i++ )
{
printf("%d. image: %s:\n", (int)i, fnames[i].c_str());
if( strstr(fnames[i].c_str(), "MP.png") != 0 )
{
printf("\tskip\n");
continue;
}
bool checkCount = strstr(fnames[i].c_str(), "templ.png") == 0;
Mat img = imread(fnames[i], -1);
printf("\t%dx%d\n", img.cols, img.rows);
#define TEST_DETECTOR(name, descriptor) \
keypoints.clear(); descriptors.release(); \
printf("\t" name "\n"); fflush(stdout); \
descriptor->detectAndCompute(img, noArray(), keypoints, descriptors); \
printf("\t\t\t(%d keypoints, descriptor size = %d)\n", (int)keypoints.size(), descriptors.cols); fflush(stdout); \
if (checkCount) \
{ \
EXPECT_GT((int)keypoints.size(), 0); \
} \
ASSERT_EQ(descriptors.rows, (int)keypoints.size());
TEST_DETECTOR("AKAZE:MLDB", akaze_mldb);
TEST_DETECTOR("AKAZE:MLDB_UPRIGHT", akaze_mldb_upright);
TEST_DETECTOR("AKAZE:MLDB_256", akaze_mldb_256);
TEST_DETECTOR("AKAZE:MLDB_UPRIGHT_256", akaze_mldb_upright_256);
TEST_DETECTOR("AKAZE:KAZE", akaze_kaze);
TEST_DETECTOR("AKAZE:KAZE_UPRIGHT", akaze_kaze_upright);
TEST_DETECTOR("KAZE", kaze);
TEST_DETECTOR("ORB", orb);
TEST_DETECTOR("BRISK", brisk);
}
}
INSTANTIATE_TEST_CASE_P(Features2d, DescriptorImage,
testing::Values(
"shared/lena.png",
"shared/box*.png",
"shared/fruits*.png",
"shared/airplane.png",
"shared/graffiti.png",
"shared/1_itseez-0001*.png",
"shared/pic*.png",
"shared/templ.png"
)
);
}} // namespace

View File

@@ -0,0 +1,298 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
namespace opencv_test { namespace {
/****************************************************************************************\
* Regression tests for descriptor extractors. *
\****************************************************************************************/
static void writeMatInBin( const Mat& mat, const string& filename )
{
FILE* f = fopen( filename.c_str(), "wb");
if( f )
{
CV_Assert(4 == sizeof(int));
int type = mat.type();
fwrite( (void*)&mat.rows, sizeof(int), 1, f );
fwrite( (void*)&mat.cols, sizeof(int), 1, f );
fwrite( (void*)&type, sizeof(int), 1, f );
int dataSize = (int)(mat.step * mat.rows);
fwrite( (void*)&dataSize, sizeof(int), 1, f );
fwrite( (void*)mat.ptr(), 1, dataSize, f );
fclose(f);
}
}
static Mat readMatFromBin( const string& filename )
{
FILE* f = fopen( filename.c_str(), "rb" );
if( f )
{
CV_Assert(4 == sizeof(int));
int rows, cols, type, dataSize;
size_t elements_read1 = fread( (void*)&rows, sizeof(int), 1, f );
size_t elements_read2 = fread( (void*)&cols, sizeof(int), 1, f );
size_t elements_read3 = fread( (void*)&type, sizeof(int), 1, f );
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
int step = dataSize / rows / CV_ELEM_SIZE(type);
CV_Assert(step >= cols);
Mat returnMat = Mat(rows, step, type).colRange(0, cols);
size_t elements_read = fread( returnMat.ptr(), 1, dataSize, f );
CV_Assert(elements_read == (size_t)(dataSize));
fclose(f);
return returnMat;
}
return Mat();
}
template<class Distance>
class CV_DescriptorExtractorTest : public cvtest::BaseTest
{
public:
typedef typename Distance::ValueType ValueType;
typedef typename Distance::ResultType DistanceType;
CV_DescriptorExtractorTest( const string _name, DistanceType _maxDist, const Ptr<DescriptorExtractor>& _dextractor,
Distance d = Distance(), Ptr<FeatureDetector> _detector = Ptr<FeatureDetector>()):
name(_name), maxDist(_maxDist), dextractor(_dextractor), distance(d) , detector(_detector) {}
~CV_DescriptorExtractorTest()
{
}
protected:
virtual void createDescriptorExtractor() {}
void compareDescriptors( const Mat& validDescriptors, const Mat& calcDescriptors )
{
if( validDescriptors.size != calcDescriptors.size || validDescriptors.type() != calcDescriptors.type() )
{
ts->printf(cvtest::TS::LOG, "Valid and computed descriptors matrices must have the same size and type.\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
CV_Assert( DataType<ValueType>::type == validDescriptors.type() );
int dimension = validDescriptors.cols;
DistanceType curMaxDist = 0;
size_t exact_count = 0, failed_count = 0;
for( int y = 0; y < validDescriptors.rows; y++ )
{
DistanceType dist = distance( validDescriptors.ptr<ValueType>(y), calcDescriptors.ptr<ValueType>(y), dimension );
if (dist == 0)
exact_count++;
if( dist > curMaxDist )
{
if (dist > maxDist)
failed_count++;
curMaxDist = dist;
}
#if 0
if (dist > 0)
{
std::cout << "i=" << y << " fail_count=" << failed_count << " dist=" << dist << std::endl;
std::cout << "valid: " << validDescriptors.row(y) << std::endl;
std::cout << " calc: " << calcDescriptors.row(y) << std::endl;
}
#endif
}
float exact_percents = (100 * (float)exact_count / validDescriptors.rows);
float failed_percents = (100 * (float)failed_count / validDescriptors.rows);
std::stringstream ss;
ss << "Exact count (dist == 0): " << exact_count << " (" << (int)exact_percents << "%)" << std::endl
<< "Failed count (dist > " << maxDist << "): " << failed_count << " (" << (int)failed_percents << "%)" << std::endl
<< "Max distance between valid and computed descriptors (" << validDescriptors.size() << "): " << curMaxDist;
EXPECT_LE(failed_percents, 20.0f);
std::cout << ss.str() << std::endl;
}
void emptyDataTest()
{
assert( dextractor );
// One image.
Mat image;
vector<KeyPoint> keypoints;
Mat descriptors;
try
{
dextractor->compute( image, keypoints, descriptors );
}
catch(...)
{
ts->printf( cvtest::TS::LOG, "compute() on empty image and empty keypoints must not generate exception (1).\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
}
RNG rng;
image = cvtest::randomMat(rng, Size(50, 50), CV_8UC3, 0, 255, false);
try
{
dextractor->compute( image, keypoints, descriptors );
}
catch(...)
{
ts->printf( cvtest::TS::LOG, "compute() on nonempty image and empty keypoints must not generate exception (1).\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
}
// Several images.
vector<Mat> images;
vector<vector<KeyPoint> > keypointsCollection;
vector<Mat> descriptorsCollection;
try
{
dextractor->compute( images, keypointsCollection, descriptorsCollection );
}
catch(...)
{
ts->printf( cvtest::TS::LOG, "compute() on empty images and empty keypoints collection must not generate exception (2).\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
}
}
void regressionTest()
{
assert( dextractor );
// Read the test image.
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
Mat img = imread( imgFilename );
if( img.empty() )
{
ts->printf( cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str() );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
const std::string keypoints_filename = string(ts->get_data_path()) +
(detector.empty()
? (FEATURES2D_DIR + "/" + std::string("keypoints.xml.gz"))
: (DESCRIPTOR_DIR + "/" + name + "_keypoints.xml.gz"));
FileStorage fs(keypoints_filename, FileStorage::READ);
vector<KeyPoint> keypoints;
EXPECT_TRUE(fs.isOpened()) << "Keypoint testdata is missing. Re-computing and re-writing keypoints testdata...";
if (!fs.isOpened())
{
fs.open(keypoints_filename, FileStorage::WRITE);
ASSERT_TRUE(fs.isOpened()) << "File for writing keypoints can not be opened.";
if (detector.empty())
{
Ptr<ORB> fd = ORB::create();
fd->detect(img, keypoints);
}
else
{
detector->detect(img, keypoints);
}
write(fs, "keypoints", keypoints);
fs.release();
}
else
{
read(fs.getFirstTopLevelNode(), keypoints);
fs.release();
}
if(!detector.empty())
{
vector<KeyPoint> calcKeypoints;
detector->detect(img, calcKeypoints);
// TODO validate received keypoints
int diff = abs((int)calcKeypoints.size() - (int)keypoints.size());
if (diff > 0)
{
std::cout << "Keypoints difference: " << diff << std::endl;
EXPECT_LE(diff, (int)(keypoints.size() * 0.03f));
}
}
ASSERT_FALSE(keypoints.empty());
{
Mat calcDescriptors;
double t = (double)getTickCount();
dextractor->compute(img, keypoints, calcDescriptors);
t = getTickCount() - t;
ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows);
if (calcDescriptors.rows != (int)keypoints.size())
{
ts->printf( cvtest::TS::LOG, "Count of computed descriptors and keypoints count must be equal.\n" );
ts->printf( cvtest::TS::LOG, "Count of keypoints is %d.\n", (int)keypoints.size() );
ts->printf( cvtest::TS::LOG, "Count of computed descriptors is %d.\n", calcDescriptors.rows );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return;
}
if (calcDescriptors.cols != dextractor->descriptorSize() || calcDescriptors.type() != dextractor->descriptorType())
{
ts->printf( cvtest::TS::LOG, "Incorrect descriptor size or descriptor type.\n" );
ts->printf( cvtest::TS::LOG, "Expected size is %d.\n", dextractor->descriptorSize() );
ts->printf( cvtest::TS::LOG, "Calculated size is %d.\n", calcDescriptors.cols );
ts->printf( cvtest::TS::LOG, "Expected type is %d.\n", dextractor->descriptorType() );
ts->printf( cvtest::TS::LOG, "Calculated type is %d.\n", calcDescriptors.type() );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return;
}
// TODO read and write descriptor extractor parameters and check them
Mat validDescriptors = readDescriptors();
EXPECT_FALSE(validDescriptors.empty()) << "Descriptors testdata is missing. Re-writing descriptors testdata...";
if (!validDescriptors.empty())
{
compareDescriptors(validDescriptors, calcDescriptors);
}
else
{
ASSERT_TRUE(writeDescriptors(calcDescriptors)) << "Descriptors can not be written.";
}
}
}
void run(int)
{
createDescriptorExtractor();
if( !dextractor )
{
ts->printf(cvtest::TS::LOG, "Descriptor extractor is empty.\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
emptyDataTest();
regressionTest();
ts->set_failed_test_info( cvtest::TS::OK );
}
virtual Mat readDescriptors()
{
Mat res = readMatFromBin( string(ts->get_data_path()) + DESCRIPTOR_DIR + "/" + string(name) );
return res;
}
virtual bool writeDescriptors( Mat& descs )
{
writeMatInBin( descs, string(ts->get_data_path()) + DESCRIPTOR_DIR + "/" + string(name) );
return true;
}
string name;
const DistanceType maxDist;
Ptr<DescriptorExtractor> dextractor;
Distance distance;
Ptr<FeatureDetector> detector;
private:
CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
};
}} // namespace

Some files were not shown because too many files have changed in this diff Show More