feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
961
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts.hpp
vendored
Normal file
961
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts.hpp
vendored
Normal file
@ -0,0 +1,961 @@
|
||||
#ifndef OPENCV_TS_HPP
|
||||
#define OPENCV_TS_HPP
|
||||
|
||||
#ifndef __OPENCV_TESTS
|
||||
#define __OPENCV_TESTS 1
|
||||
#endif
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
|
||||
#include "opencv2/core/utils/trace.hpp"
|
||||
|
||||
#include "opencv2/core/hal/hal.hpp"
|
||||
|
||||
#include <stdarg.h> // for va_list
|
||||
|
||||
#include "cvconfig.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <cstdio>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
|
||||
|
||||
#ifndef OPENCV_32BIT_CONFIGURATION
|
||||
# if defined(INTPTR_MAX) && defined(INT32_MAX) && INTPTR_MAX == INT32_MAX
|
||||
# define OPENCV_32BIT_CONFIGURATION 1
|
||||
# elif defined(_WIN32) && !defined(_WIN64)
|
||||
# define OPENCV_32BIT_CONFIGURATION 1
|
||||
# endif
|
||||
#else
|
||||
# if OPENCV_32BIT_CONFIGURATION == 0
|
||||
# undef OPENCV_32BIT_CONFIGURATION
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// most part of OpenCV tests are fit into 200Mb limit, but some tests are not:
|
||||
// Note: due memory fragmentation real limits are usually lower on 20-25% (400Mb memory usage goes into mem_1Gb class)
|
||||
#define CV_TEST_TAG_MEMORY_512MB "mem_512mb" // used memory: 200..512Mb - enabled by default
|
||||
#define CV_TEST_TAG_MEMORY_1GB "mem_1gb" // used memory: 512Mb..1Gb - enabled by default
|
||||
#define CV_TEST_TAG_MEMORY_2GB "mem_2gb" // used memory: 1..2Gb - enabled by default on 64-bit configuration (32-bit - disabled)
|
||||
#define CV_TEST_TAG_MEMORY_6GB "mem_6gb" // used memory: 2..6Gb - disabled by default
|
||||
#define CV_TEST_TAG_MEMORY_14GB "mem_14gb" // used memory: 6..14Gb - disabled by default
|
||||
|
||||
// Large / huge video streams or complex workloads
|
||||
#define CV_TEST_TAG_LONG "long" // 5+ seconds on modern desktop machine (single thread)
|
||||
#define CV_TEST_TAG_VERYLONG "verylong" // 20+ seconds on modern desktop machine (single thread)
|
||||
|
||||
// Large / huge video streams or complex workloads for debug builds
|
||||
#define CV_TEST_TAG_DEBUG_LONG "debug_long" // 10+ seconds on modern desktop machine (single thread)
|
||||
#define CV_TEST_TAG_DEBUG_VERYLONG "debug_verylong" // 40+ seconds on modern desktop machine (single thread)
|
||||
|
||||
// Lets skip processing of high resolution images via instrumentation tools (valgrind/coverage/sanitizers).
|
||||
// It is enough to run lower resolution (VGA: 640x480) tests.
|
||||
#define CV_TEST_TAG_SIZE_HD "size_hd" // 720p+, enabled
|
||||
#define CV_TEST_TAG_SIZE_FULLHD "size_fullhd" // 1080p+, enabled (disable these tests for valgrind/coverage run)
|
||||
#define CV_TEST_TAG_SIZE_4K "size_4k" // 2160p+, enabled (disable these tests for valgrind/coverage run)
|
||||
|
||||
// Other misc test tags
|
||||
#define CV_TEST_TAG_TYPE_64F "type_64f" // CV_64F, enabled (disable these tests on low power embedded devices)
|
||||
|
||||
// Kernel-based image processing
|
||||
#define CV_TEST_TAG_FILTER_SMALL "filter_small" // Filtering with kernels <= 3x3
|
||||
#define CV_TEST_TAG_FILTER_MEDIUM "filter_medium" // Filtering with kernels: 3x3 < kernel <= 5x5
|
||||
#define CV_TEST_TAG_FILTER_LARGE "filter_large" // Filtering with kernels: 5x5 < kernel <= 9x9
|
||||
#define CV_TEST_TAG_FILTER_HUGE "filter_huge" // Filtering with kernels: > 9x9
|
||||
|
||||
// Other tests categories
|
||||
#define CV_TEST_TAG_OPENCL "opencl" // Tests with OpenCL
|
||||
|
||||
|
||||
|
||||
#ifdef WINRT
|
||||
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( disable: 4503 ) // decorated name length exceeded, name was truncated
|
||||
#endif
|
||||
|
||||
#define GTEST_DONT_DEFINE_FAIL 0
|
||||
#define GTEST_DONT_DEFINE_SUCCEED 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_EQ 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_NE 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_LE 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_LT 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_GE 0
|
||||
#define GTEST_DONT_DEFINE_ASSERT_GT 0
|
||||
#define GTEST_DONT_DEFINE_TEST 0
|
||||
|
||||
#ifndef GTEST_LANG_CXX11
|
||||
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && !(_MSVC_LANG < 201103))
|
||||
# define GTEST_LANG_CXX11 1
|
||||
# define GTEST_HAS_TR1_TUPLE 0
|
||||
# define GTEST_HAS_COMBINE 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__OPENCV_BUILD) && defined(__clang__)
|
||||
#pragma clang diagnostic ignored "-Winconsistent-missing-override"
|
||||
#endif
|
||||
#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5
|
||||
//#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||
#endif
|
||||
#include "opencv2/ts/ts_gtest.h"
|
||||
#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5
|
||||
//#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include "opencv2/ts/ts_ext.hpp"
|
||||
|
||||
#ifndef GTEST_USES_SIMPLE_RE
|
||||
# define GTEST_USES_SIMPLE_RE 0
|
||||
#endif
|
||||
#ifndef GTEST_USES_POSIX_RE
|
||||
# define GTEST_USES_POSIX_RE 0
|
||||
#endif
|
||||
|
||||
#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< testing::tuple< __VA_ARGS__ > >
|
||||
#define GET_PARAM(k) testing::get< k >(GetParam())
|
||||
|
||||
namespace cvtest
|
||||
{
|
||||
|
||||
using std::vector;
|
||||
using std::map;
|
||||
using std::string;
|
||||
using std::stringstream;
|
||||
using std::cout;
|
||||
using std::cerr;
|
||||
using std::endl;
|
||||
using std::min;
|
||||
using std::max;
|
||||
using std::numeric_limits;
|
||||
using std::pair;
|
||||
using std::make_pair;
|
||||
using testing::TestWithParam;
|
||||
using testing::Values;
|
||||
using testing::ValuesIn;
|
||||
using testing::Combine;
|
||||
|
||||
using cv::Mat;
|
||||
using cv::Mat_;
|
||||
using cv::UMat;
|
||||
using cv::InputArray;
|
||||
using cv::OutputArray;
|
||||
using cv::noArray;
|
||||
|
||||
using cv::Range;
|
||||
using cv::Point;
|
||||
using cv::Rect;
|
||||
using cv::Size;
|
||||
using cv::Scalar;
|
||||
using cv::RNG;
|
||||
|
||||
// Tuple stuff from Google Tests
|
||||
using testing::get;
|
||||
using testing::make_tuple;
|
||||
using testing::tuple;
|
||||
using testing::tuple_size;
|
||||
using testing::tuple_element;
|
||||
|
||||
|
||||
namespace details {
|
||||
class SkipTestExceptionBase: public cv::Exception
|
||||
{
|
||||
public:
|
||||
SkipTestExceptionBase(bool handlingTags);
|
||||
SkipTestExceptionBase(const cv::String& message, bool handlingTags);
|
||||
};
|
||||
}
|
||||
|
||||
class SkipTestException: public details::SkipTestExceptionBase
|
||||
{
|
||||
public:
|
||||
int dummy; // workaround for MacOSX Xcode 7.3 bug (don't make class "empty")
|
||||
SkipTestException() : details::SkipTestExceptionBase(false), dummy(0) {}
|
||||
SkipTestException(const cv::String& message) : details::SkipTestExceptionBase(message, false), dummy(0) { }
|
||||
};
|
||||
|
||||
/** Apply tag to the current test
|
||||
|
||||
Automatically apply corresponding additional tags (for example, 4K => FHD => HD => VGA).
|
||||
|
||||
If tag is in skip list, then SkipTestException is thrown
|
||||
*/
|
||||
void applyTestTag(const std::string& tag);
|
||||
|
||||
/** Run postponed checks of applied test tags
|
||||
|
||||
If tag is in skip list, then SkipTestException is thrown
|
||||
*/
|
||||
void checkTestTags();
|
||||
|
||||
void applyTestTag_(const std::string& tag);
|
||||
|
||||
static inline void applyTestTag(const std::string& tag1, const std::string& tag2)
|
||||
{ applyTestTag_(tag1); applyTestTag_(tag2); checkTestTags(); }
|
||||
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
|
||||
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); checkTestTags(); }
|
||||
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
|
||||
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); }
|
||||
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4, const std::string& tag5)
|
||||
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); applyTestTag_(tag5); checkTestTags(); }
|
||||
|
||||
|
||||
/** Append global skip test tags
|
||||
*/
|
||||
void registerGlobalSkipTag(const std::string& skipTag);
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2)
|
||||
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); }
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
|
||||
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); }
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
|
||||
{ registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4); }
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
|
||||
const std::string& tag5)
|
||||
{
|
||||
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
|
||||
registerGlobalSkipTag(tag5);
|
||||
}
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
|
||||
const std::string& tag5, const std::string& tag6)
|
||||
{
|
||||
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
|
||||
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6);
|
||||
}
|
||||
static inline void registerGlobalSkipTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4,
|
||||
const std::string& tag5, const std::string& tag6, const std::string& tag7)
|
||||
{
|
||||
registerGlobalSkipTag(tag1); registerGlobalSkipTag(tag2); registerGlobalSkipTag(tag3); registerGlobalSkipTag(tag4);
|
||||
registerGlobalSkipTag(tag5); registerGlobalSkipTag(tag6); registerGlobalSkipTag(tag7);
|
||||
}
|
||||
|
||||
|
||||
|
||||
class TS;
|
||||
|
||||
int64 readSeed(const char* str);
|
||||
|
||||
void randUni( RNG& rng, Mat& a, const Scalar& param1, const Scalar& param2 );
|
||||
|
||||
inline unsigned randInt( RNG& rng )
|
||||
{
|
||||
return (unsigned)rng;
|
||||
}
|
||||
|
||||
inline double randReal( RNG& rng )
|
||||
{
|
||||
return (double)rng;
|
||||
}
|
||||
|
||||
|
||||
const char* getTypeName( int type );
|
||||
int typeByName( const char* type_name );
|
||||
|
||||
string vec2str(const string& sep, const int* v, size_t nelems);
|
||||
|
||||
inline int clipInt( int val, int min_val, int max_val )
|
||||
{
|
||||
if( val < min_val )
|
||||
val = min_val;
|
||||
if( val > max_val )
|
||||
val = max_val;
|
||||
return val;
|
||||
}
|
||||
|
||||
double getMinVal(int depth);
|
||||
double getMaxVal(int depth);
|
||||
|
||||
Size randomSize(RNG& rng, double maxSizeLog);
|
||||
void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
|
||||
int randomType(RNG& rng, cv::_OutputArray::DepthMask typeMask, int minChannels, int maxChannels);
|
||||
Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi);
|
||||
Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi);
|
||||
void add(const Mat& a, double alpha, const Mat& b, double beta,
|
||||
Scalar gamma, Mat& c, int ctype, bool calcAbs=false);
|
||||
void multiply(const Mat& a, const Mat& b, Mat& c, double alpha=1);
|
||||
void divide(const Mat& a, const Mat& b, Mat& c, double alpha=1);
|
||||
|
||||
void convert(const Mat& src, cv::OutputArray dst, int dtype, double alpha=1, double beta=0);
|
||||
void copy(const Mat& src, Mat& dst, const Mat& mask=Mat(), bool invertMask=false);
|
||||
void set(Mat& dst, const Scalar& gamma, const Mat& mask=Mat());
|
||||
|
||||
// working with multi-channel arrays
|
||||
void extract( const Mat& a, Mat& plane, int coi );
|
||||
void insert( const Mat& plane, Mat& a, int coi );
|
||||
|
||||
// checks that the array does not have NaNs and/or Infs and all the elements are
|
||||
// within [min_val,max_val). idx is the index of the first "bad" element.
|
||||
int check( const Mat& data, double min_val, double max_val, vector<int>* idx );
|
||||
|
||||
// modifies values that are close to zero
|
||||
void patchZeros( Mat& mat, double level );
|
||||
|
||||
void transpose(const Mat& src, Mat& dst);
|
||||
void erode(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
|
||||
int borderType=0, const Scalar& borderValue=Scalar());
|
||||
void dilate(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
|
||||
int borderType=0, const Scalar& borderValue=Scalar());
|
||||
void filter2D(const Mat& src, Mat& dst, int ddepth, const Mat& kernel,
|
||||
Point anchor, double delta, int borderType,
|
||||
const Scalar& borderValue=Scalar());
|
||||
void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
|
||||
int borderType, const Scalar& borderValue=Scalar());
|
||||
Mat calcSobelKernel2D( int dx, int dy, int apertureSize, int origin=0 );
|
||||
Mat calcLaplaceKernel2D( int aperture_size );
|
||||
|
||||
void initUndistortMap( const Mat& a, const Mat& k, const Mat& R, const Mat& new_a, Size sz, Mat& mapx, Mat& mapy, int map_type );
|
||||
void initInverseRectificationMap( const Mat& a, const Mat& k, const Mat& R, const Mat& new_a, Size sz, Mat& mapx, Mat& mapy, int map_type );
|
||||
|
||||
void minMaxLoc(const Mat& src, double* minval, double* maxval,
|
||||
vector<int>* minloc, vector<int>* maxloc, const Mat& mask=Mat());
|
||||
double norm(InputArray src, int normType, InputArray mask=noArray());
|
||||
double norm(InputArray src1, InputArray src2, int normType, InputArray mask=noArray());
|
||||
Scalar mean(const Mat& src, const Mat& mask=Mat());
|
||||
double PSNR(InputArray src1, InputArray src2);
|
||||
|
||||
bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, double* realMaxDiff, vector<int>* idx);
|
||||
|
||||
// compares two arrays. max_diff is the maximum actual difference,
|
||||
// success_err_level is maximum allowed difference, idx is the index of the first
|
||||
// element for which difference is >success_err_level
|
||||
// (or index of element with the maximum difference)
|
||||
int cmpEps( const Mat& data, const Mat& refdata, double* max_diff,
|
||||
double success_err_level, vector<int>* idx,
|
||||
bool element_wise_relative_error );
|
||||
|
||||
// a wrapper for the previous function. in case of error prints the message to log file.
|
||||
int cmpEps2( TS* ts, const Mat& data, const Mat& refdata, double success_err_level,
|
||||
bool element_wise_relative_error, const char* desc );
|
||||
|
||||
int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
|
||||
double eps, const char* param_name );
|
||||
|
||||
void logicOp(const Mat& src1, const Mat& src2, Mat& dst, char c);
|
||||
void logicOp(const Mat& src, const Scalar& s, Mat& dst, char c);
|
||||
void min(const Mat& src1, const Mat& src2, Mat& dst);
|
||||
void min(const Mat& src, double s, Mat& dst);
|
||||
void max(const Mat& src1, const Mat& src2, Mat& dst);
|
||||
void max(const Mat& src, double s, Mat& dst);
|
||||
|
||||
void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop);
|
||||
void compare(const Mat& src, double s, Mat& dst, int cmpop);
|
||||
void gemm(const Mat& src1, const Mat& src2, double alpha,
|
||||
const Mat& src3, double beta, Mat& dst, int flags);
|
||||
void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& shift );
|
||||
double crossCorr(const Mat& src1, const Mat& src2);
|
||||
void threshold( const Mat& src, Mat& dst, double thresh, double maxval, int thresh_type );
|
||||
void minMaxIdx( InputArray _img, double* minVal, double* maxVal,
|
||||
Point* minLoc, Point* maxLoc, InputArray _mask );
|
||||
|
||||
struct MatInfo
|
||||
{
|
||||
MatInfo(const Mat& _m) : m(&_m) {}
|
||||
const Mat* m;
|
||||
};
|
||||
|
||||
std::ostream& operator << (std::ostream& out, const MatInfo& m);
|
||||
|
||||
struct MatComparator
|
||||
{
|
||||
public:
|
||||
MatComparator(double maxdiff, int context);
|
||||
|
||||
::testing::AssertionResult operator()(const char* expr1, const char* expr2,
|
||||
const Mat& m1, const Mat& m2);
|
||||
|
||||
double maxdiff;
|
||||
double realmaxdiff;
|
||||
vector<int> loc0;
|
||||
int context;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class BaseTest;
|
||||
class TS;
|
||||
|
||||
class BaseTest
|
||||
{
|
||||
public:
|
||||
// constructor(s) and destructor
|
||||
BaseTest();
|
||||
virtual ~BaseTest();
|
||||
|
||||
// the main procedure of the test
|
||||
virtual void run( int start_from );
|
||||
|
||||
// the wrapper for run that cares of exceptions
|
||||
virtual void safe_run( int start_from=0 );
|
||||
|
||||
const string& get_name() const { return name; }
|
||||
|
||||
// returns true if and only if the different test cases do not depend on each other
|
||||
// (so that test system could get right to a problematic test case)
|
||||
virtual bool can_do_fast_forward();
|
||||
|
||||
// deallocates all the memory.
|
||||
// called by init() (before initialization) and by the destructor
|
||||
virtual void clear();
|
||||
|
||||
protected:
|
||||
int test_case_count; // the total number of test cases
|
||||
|
||||
// read test params
|
||||
virtual int read_params( const cv::FileStorage& fs );
|
||||
|
||||
// returns the number of tests or -1 if it is unknown a-priori
|
||||
virtual int get_test_case_count();
|
||||
|
||||
// prepares data for the next test case. rng seed is updated by the function
|
||||
virtual int prepare_test_case( int test_case_idx );
|
||||
|
||||
// checks if the test output is valid and accurate
|
||||
virtual int validate_test_results( int test_case_idx );
|
||||
|
||||
// calls the tested function. the method is called from run_test_case()
|
||||
virtual void run_func(); // runs tested func(s)
|
||||
|
||||
// updates progress bar
|
||||
virtual int update_progress( int progress, int test_case_idx, int count, double dt );
|
||||
|
||||
// dump test case input parameters
|
||||
virtual void dump_test_case(int test_case_idx, std::ostream* out);
|
||||
|
||||
// finds test parameter
|
||||
cv::FileNode find_param( const cv::FileStorage& fs, const char* param_name );
|
||||
|
||||
// name of the test (it is possible to locate a test by its name)
|
||||
string name;
|
||||
|
||||
// pointer to the system that includes the test
|
||||
TS* ts;
|
||||
};
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Information about a failed test *
|
||||
\*****************************************************************************************/
|
||||
|
||||
struct TestInfo
|
||||
{
|
||||
TestInfo();
|
||||
|
||||
// pointer to the test
|
||||
BaseTest* test;
|
||||
|
||||
// failure code (TS::FAIL_*)
|
||||
int code;
|
||||
|
||||
// seed value right before the data for the failed test case is prepared.
|
||||
uint64 rng_seed;
|
||||
|
||||
// seed value right before running the test
|
||||
uint64 rng_seed0;
|
||||
|
||||
// index of test case, can be then passed to BaseTest::proceed_to_test_case()
|
||||
int test_case_idx;
|
||||
};
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Base Class for test system *
|
||||
\*****************************************************************************************/
|
||||
|
||||
// common parameters:
|
||||
struct TSParams
|
||||
{
|
||||
TSParams();
|
||||
|
||||
// RNG seed, passed to and updated by every test executed.
|
||||
uint64 rng_seed;
|
||||
|
||||
// whether to use IPP, MKL etc. or not
|
||||
bool use_optimized;
|
||||
|
||||
// extensivity of the tests, scale factor for test_case_count
|
||||
double test_case_count_scale;
|
||||
};
|
||||
|
||||
|
||||
class TS
|
||||
{
|
||||
TS();
|
||||
virtual ~TS();
|
||||
public:
|
||||
|
||||
enum
|
||||
{
|
||||
NUL=0,
|
||||
SUMMARY_IDX=0,
|
||||
SUMMARY=1 << SUMMARY_IDX,
|
||||
LOG_IDX=1,
|
||||
LOG=1 << LOG_IDX,
|
||||
CSV_IDX=2,
|
||||
CSV=1 << CSV_IDX,
|
||||
CONSOLE_IDX=3,
|
||||
CONSOLE=1 << CONSOLE_IDX,
|
||||
MAX_IDX=4
|
||||
};
|
||||
|
||||
static TS* ptr();
|
||||
|
||||
// initialize test system before running the first test
|
||||
virtual void init( const string& modulename );
|
||||
|
||||
// low-level printing functions that are used by individual tests and by the system itself
|
||||
virtual void printf( int streams, const char* fmt, ... );
|
||||
virtual void vprintf( int streams, const char* fmt, va_list arglist );
|
||||
|
||||
// updates the context: current test, test case, rng state
|
||||
virtual void update_context( BaseTest* test, int test_case_idx, bool update_ts_context );
|
||||
|
||||
const TestInfo* get_current_test_info() { return ¤t_test_info; }
|
||||
|
||||
// sets information about a failed test
|
||||
virtual void set_failed_test_info( int fail_code );
|
||||
|
||||
virtual void set_gtest_status();
|
||||
|
||||
// test error codes
|
||||
enum FailureCode
|
||||
{
|
||||
// everything is Ok
|
||||
OK=0,
|
||||
|
||||
// generic error: stub value to be used
|
||||
// temporarily if the error's cause is unknown
|
||||
FAIL_GENERIC=-1,
|
||||
|
||||
// the test is missing some essential data to proceed further
|
||||
FAIL_MISSING_TEST_DATA=-2,
|
||||
|
||||
// the tested function raised an error via cxcore error handler
|
||||
FAIL_ERROR_IN_CALLED_FUNC=-3,
|
||||
|
||||
// an exception has been raised;
|
||||
// for memory and arithmetic exception
|
||||
// there are two specialized codes (see below...)
|
||||
FAIL_EXCEPTION=-4,
|
||||
|
||||
// a memory exception
|
||||
// (access violation, access to missed page, stack overflow etc.)
|
||||
FAIL_MEMORY_EXCEPTION=-5,
|
||||
|
||||
// arithmetic exception (overflow, division by zero etc.)
|
||||
FAIL_ARITHM_EXCEPTION=-6,
|
||||
|
||||
// the tested function corrupted memory (no exception have been raised)
|
||||
FAIL_MEMORY_CORRUPTION_BEGIN=-7,
|
||||
FAIL_MEMORY_CORRUPTION_END=-8,
|
||||
|
||||
// the tested function (or test itself) do not deallocate some memory
|
||||
FAIL_MEMORY_LEAK=-9,
|
||||
|
||||
// the tested function returned invalid object, e.g. matrix, containing NaNs,
|
||||
// structure with NULL or out-of-range fields (while it should not)
|
||||
FAIL_INVALID_OUTPUT=-10,
|
||||
|
||||
// the tested function returned valid object, but it does not match
|
||||
// the original (or produced by the test) object
|
||||
FAIL_MISMATCH=-11,
|
||||
|
||||
// the tested function returned valid object (a single number or numerical array),
|
||||
// but it differs too much from the original (or produced by the test) object
|
||||
FAIL_BAD_ACCURACY=-12,
|
||||
|
||||
// the tested function hung. Sometimes, it can be determined by unexpectedly long
|
||||
// processing time (in this case there should be possibility to interrupt such a function
|
||||
FAIL_HANG=-13,
|
||||
|
||||
// unexpected response on passing bad arguments to the tested function
|
||||
// (the function crashed, proceed successfully (while it should not), or returned
|
||||
// error code that is different from what is expected)
|
||||
FAIL_BAD_ARG_CHECK=-14,
|
||||
|
||||
// the test data (in whole or for the particular test case) is invalid
|
||||
FAIL_INVALID_TEST_DATA=-15,
|
||||
|
||||
// the test has been skipped because it is not in the selected subset of the tests to run,
|
||||
// because it has been run already within the same run with the same parameters, or because
|
||||
// of some other reason and this is not considered as an error.
|
||||
// Normally TS::run() (or overridden method in the derived class) takes care of what
|
||||
// needs to be run, so this code should not occur.
|
||||
SKIPPED=1
|
||||
};
|
||||
|
||||
// get RNG to generate random input data for a test
|
||||
RNG& get_rng() { return rng; }
|
||||
|
||||
// returns the current error code
|
||||
TS::FailureCode get_err_code() { return TS::FailureCode(current_test_info.code); }
|
||||
|
||||
// returns the test extensivity scale
|
||||
double get_test_case_count_scale() { return params.test_case_count_scale; }
|
||||
|
||||
const string& get_data_path() const { return data_path; }
|
||||
|
||||
// returns textual description of failure code
|
||||
static string str_from_code( const TS::FailureCode code );
|
||||
|
||||
std::vector<std::string> data_search_path;
|
||||
std::vector<std::string> data_search_subdir;
|
||||
protected:
|
||||
|
||||
// these are allocated within a test to try to keep them valid in case of stack corruption
|
||||
RNG rng;
|
||||
|
||||
// information about the current test
|
||||
TestInfo current_test_info;
|
||||
|
||||
// the path to data files used by tests
|
||||
string data_path;
|
||||
|
||||
TSParams params;
|
||||
std::string output_buf[MAX_IDX];
|
||||
};
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Subclass of BaseTest for testing functions that process dense arrays *
|
||||
\*****************************************************************************************/
|
||||
|
||||
class ArrayTest : public BaseTest
|
||||
{
|
||||
public:
|
||||
// constructor(s) and destructor
|
||||
ArrayTest();
|
||||
virtual ~ArrayTest();
|
||||
|
||||
virtual void clear() CV_OVERRIDE;
|
||||
|
||||
protected:
|
||||
|
||||
virtual int read_params( const cv::FileStorage& fs ) CV_OVERRIDE;
|
||||
virtual int prepare_test_case( int test_case_idx ) CV_OVERRIDE;
|
||||
virtual int validate_test_results( int test_case_idx ) CV_OVERRIDE;
|
||||
|
||||
virtual void prepare_to_validation( int test_case_idx );
|
||||
virtual void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
|
||||
virtual void fill_array( int test_case_idx, int i, int j, Mat& arr );
|
||||
virtual void get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high );
|
||||
virtual double get_success_error_level( int test_case_idx, int i, int j );
|
||||
|
||||
bool cvmat_allowed;
|
||||
bool iplimage_allowed;
|
||||
bool optional_mask;
|
||||
bool element_wise_relative_error;
|
||||
|
||||
int min_log_array_size;
|
||||
int max_log_array_size;
|
||||
|
||||
enum { INPUT, INPUT_OUTPUT, OUTPUT, REF_INPUT_OUTPUT, REF_OUTPUT, TEMP, MASK, MAX_ARR };
|
||||
|
||||
vector<vector<void*> > test_array;
|
||||
vector<vector<Mat> > test_mat;
|
||||
float buf[4];
|
||||
};
|
||||
|
||||
|
||||
class BadArgTest : public BaseTest
|
||||
{
|
||||
public:
|
||||
// constructor(s) and destructor
|
||||
BadArgTest();
|
||||
virtual ~BadArgTest();
|
||||
|
||||
protected:
|
||||
virtual int run_test_case( int expected_code, const string& descr );
|
||||
virtual void run_func(void) CV_OVERRIDE = 0;
|
||||
int test_case_idx;
|
||||
|
||||
template<class F>
|
||||
int run_test_case( int expected_code, const string& _descr, F f)
|
||||
{
|
||||
int errcount = 0;
|
||||
bool thrown = false;
|
||||
const char* descr = _descr.c_str() ? _descr.c_str() : "";
|
||||
|
||||
try
|
||||
{
|
||||
f();
|
||||
}
|
||||
catch(const cv::Exception& e)
|
||||
{
|
||||
thrown = true;
|
||||
if( e.code != expected_code && e.code != cv::Error::StsAssert && e.code != cv::Error::StsError )
|
||||
{
|
||||
ts->printf(TS::LOG, "%s (test case #%d): the error code %d is different from the expected %d\n",
|
||||
descr, test_case_idx, e.code, expected_code);
|
||||
errcount = 1;
|
||||
}
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
thrown = true;
|
||||
ts->printf(TS::LOG, "%s (test case #%d): unknown exception was thrown (the function has likely crashed)\n",
|
||||
descr, test_case_idx);
|
||||
errcount = 1;
|
||||
}
|
||||
if(!thrown)
|
||||
{
|
||||
ts->printf(TS::LOG, "%s (test case #%d): no expected exception was thrown\n",
|
||||
descr, test_case_idx);
|
||||
errcount = 1;
|
||||
}
|
||||
test_case_idx++;
|
||||
|
||||
return errcount;
|
||||
}
|
||||
};
|
||||
|
||||
extern uint64 param_seed;
|
||||
|
||||
struct DefaultRngAuto
|
||||
{
|
||||
const uint64 old_state;
|
||||
|
||||
DefaultRngAuto() : old_state(cv::theRNG().state) { cv::theRNG().state = cvtest::param_seed; }
|
||||
~DefaultRngAuto() { cv::theRNG().state = old_state; }
|
||||
|
||||
DefaultRngAuto& operator=(const DefaultRngAuto&);
|
||||
};
|
||||
|
||||
|
||||
// test images generation functions
|
||||
void fillGradient(Mat& img, int delta = 5);
|
||||
void smoothBorder(Mat& img, const Scalar& color, int delta = 3);
|
||||
|
||||
// Utility functions
|
||||
|
||||
void addDataSearchPath(const std::string& path);
|
||||
void addDataSearchSubDirectory(const std::string& subdir);
|
||||
|
||||
/*! @brief Try to find requested data file
|
||||
|
||||
Search directories:
|
||||
|
||||
0. TS::data_search_path (search sub-directories are not used)
|
||||
1. OPENCV_TEST_DATA_PATH environment variable
|
||||
2. One of these:
|
||||
a. OpenCV testdata based on build location: "./" + "share/OpenCV/testdata"
|
||||
b. OpenCV testdata at install location: CMAKE_INSTALL_PREFIX + "share/OpenCV/testdata"
|
||||
|
||||
Search sub-directories:
|
||||
|
||||
- addDataSearchSubDirectory()
|
||||
- modulename from TS::init()
|
||||
|
||||
*/
|
||||
std::string findDataFile(const std::string& relative_path, bool required = true);
|
||||
|
||||
/*! @brief Try to find requested data directory
|
||||
@sa findDataFile
|
||||
*/
|
||||
std::string findDataDirectory(const std::string& relative_path, bool required = true);
|
||||
|
||||
// Test definitions
|
||||
|
||||
class SystemInfoCollector : public testing::EmptyTestEventListener
|
||||
{
|
||||
private:
|
||||
virtual void OnTestProgramStart(const testing::UnitTest&);
|
||||
};
|
||||
|
||||
#ifndef __CV_TEST_EXEC_ARGS
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
|
||||
#define __CV_TEST_EXEC_ARGS(...) \
|
||||
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
|
||||
#else
|
||||
#define __CV_TEST_EXEC_ARGS(...) \
|
||||
__VA_ARGS__;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void parseCustomOptions(int argc, char **argv);
|
||||
|
||||
#define CV_TEST_INIT0_NOOP (void)0
|
||||
|
||||
#define CV_TEST_MAIN(resourcesubdir, ...) CV_TEST_MAIN_EX(resourcesubdir, NOOP, __VA_ARGS__)
|
||||
|
||||
#define CV_TEST_MAIN_EX(resourcesubdir, INIT0, ...) \
|
||||
int main(int argc, char **argv) \
|
||||
{ \
|
||||
CV_TRACE_FUNCTION(); \
|
||||
{ CV_TRACE_REGION("INIT"); \
|
||||
using namespace cvtest; using namespace opencv_test; \
|
||||
TS* ts = TS::ptr(); \
|
||||
ts->init(resourcesubdir); \
|
||||
__CV_TEST_EXEC_ARGS(CV_TEST_INIT0_ ## INIT0) \
|
||||
::testing::InitGoogleTest(&argc, argv); \
|
||||
::testing::UnitTest::GetInstance()->listeners().Append(new SystemInfoCollector); \
|
||||
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
|
||||
parseCustomOptions(argc, argv); \
|
||||
} \
|
||||
return RUN_ALL_TESTS(); \
|
||||
}
|
||||
|
||||
// This usually only makes sense in perf tests with several implementations,
|
||||
// some of which are not available.
|
||||
#define CV_TEST_FAIL_NO_IMPL() do { \
|
||||
::testing::Test::RecordProperty("custom_status", "noimpl"); \
|
||||
FAIL() << "No equivalent implementation."; \
|
||||
} while (0)
|
||||
|
||||
} //namespace cvtest
|
||||
|
||||
#include "opencv2/ts/ts_perf.hpp"
|
||||
|
||||
namespace cvtest {
|
||||
using perf::MatDepth;
|
||||
using perf::MatType;
|
||||
}
|
||||
|
||||
#ifdef WINRT
|
||||
#ifndef __FSTREAM_EMULATED__
|
||||
#define __FSTREAM_EMULATED__
|
||||
#include <stdlib.h>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#undef ifstream
|
||||
#undef ofstream
|
||||
#define ifstream ifstream_emulated
|
||||
#define ofstream ofstream_emulated
|
||||
|
||||
namespace std {
|
||||
|
||||
class ifstream : public stringstream
|
||||
{
|
||||
FILE* f;
|
||||
public:
|
||||
ifstream(const char* filename, ios_base::openmode mode = ios_base::in)
|
||||
: f(NULL)
|
||||
{
|
||||
string modeStr("r");
|
||||
printf("Open file (read): %s\n", filename);
|
||||
if (mode & ios_base::binary)
|
||||
modeStr += "b";
|
||||
f = fopen(filename, modeStr.c_str());
|
||||
|
||||
if (f == NULL)
|
||||
{
|
||||
printf("Can't open file: %s\n", filename);
|
||||
return;
|
||||
}
|
||||
fseek(f, 0, SEEK_END);
|
||||
size_t sz = ftell(f);
|
||||
if (sz > 0)
|
||||
{
|
||||
char* buf = (char*) malloc(sz);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (fread(buf, 1, sz, f) == sz)
|
||||
{
|
||||
this->str(std::string(buf, sz));
|
||||
}
|
||||
free(buf);
|
||||
}
|
||||
}
|
||||
|
||||
~ifstream() { close(); }
|
||||
bool is_open() const { return f != NULL; }
|
||||
void close()
|
||||
{
|
||||
if (f)
|
||||
fclose(f);
|
||||
f = NULL;
|
||||
this->str("");
|
||||
}
|
||||
};
|
||||
|
||||
class ofstream : public stringstream
|
||||
{
|
||||
FILE* f;
|
||||
public:
|
||||
ofstream(const char* filename, ios_base::openmode mode = ios_base::out)
|
||||
: f(NULL)
|
||||
{
|
||||
open(filename, mode);
|
||||
}
|
||||
~ofstream() { close(); }
|
||||
void open(const char* filename, ios_base::openmode mode = ios_base::out)
|
||||
{
|
||||
string modeStr("w+");
|
||||
if (mode & ios_base::trunc)
|
||||
modeStr = "w";
|
||||
if (mode & ios_base::binary)
|
||||
modeStr += "b";
|
||||
f = fopen(filename, modeStr.c_str());
|
||||
printf("Open file (write): %s\n", filename);
|
||||
if (f == NULL)
|
||||
{
|
||||
printf("Can't open file (write): %s\n", filename);
|
||||
return;
|
||||
}
|
||||
}
|
||||
bool is_open() const { return f != NULL; }
|
||||
void close()
|
||||
{
|
||||
if (f)
|
||||
{
|
||||
fwrite(reinterpret_cast<const char *>(this->str().c_str()), this->str().size(), 1, f);
|
||||
fclose(f);
|
||||
}
|
||||
f = NULL;
|
||||
this->str("");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
#endif // __FSTREAM_EMULATED__
|
||||
#endif // WINRT
|
||||
|
||||
|
||||
namespace opencv_test {
|
||||
using namespace cvtest;
|
||||
using namespace cv;
|
||||
|
||||
#ifdef CV_CXX11
|
||||
#define CVTEST_GUARD_SYMBOL(name) \
|
||||
class required_namespace_specificatin_here_for_symbol_ ## name {}; \
|
||||
using name = required_namespace_specificatin_here_for_symbol_ ## name;
|
||||
#else
|
||||
#define CVTEST_GUARD_SYMBOL(name) /* nothing */
|
||||
#endif
|
||||
|
||||
CVTEST_GUARD_SYMBOL(norm)
|
||||
CVTEST_GUARD_SYMBOL(add)
|
||||
CVTEST_GUARD_SYMBOL(multiply)
|
||||
CVTEST_GUARD_SYMBOL(divide)
|
||||
CVTEST_GUARD_SYMBOL(transpose)
|
||||
CVTEST_GUARD_SYMBOL(copyMakeBorder)
|
||||
CVTEST_GUARD_SYMBOL(filter2D)
|
||||
CVTEST_GUARD_SYMBOL(compare)
|
||||
CVTEST_GUARD_SYMBOL(minMaxIdx)
|
||||
CVTEST_GUARD_SYMBOL(threshold)
|
||||
|
||||
extern bool required_opencv_test_namespace; // compilation check for non-refactored tests
|
||||
}
|
||||
|
||||
#endif // OPENCV_TS_HPP
|
125
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/cuda_perf.hpp
vendored
Normal file
125
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/cuda_perf.hpp
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_CUDA_PERF_UTILITY_HPP
|
||||
#define OPENCV_CUDA_PERF_UTILITY_HPP
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include "opencv2/ts/ts_perf.hpp"
|
||||
|
||||
namespace perf
|
||||
{
|
||||
#define ALL_BORDER_MODES BorderMode::all()
|
||||
#define ALL_INTERPOLATIONS Interpolation::all()
|
||||
|
||||
CV_ENUM(BorderMode, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
|
||||
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
|
||||
CV_ENUM(NormType, NORM_INF, NORM_L1, NORM_L2, NORM_HAMMING, NORM_MINMAX)
|
||||
|
||||
enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
|
||||
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
|
||||
|
||||
#define CUDA_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
|
||||
#define CUDA_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
|
||||
|
||||
#define GET_PARAM(k) testing::get< k >(GetParam())
|
||||
|
||||
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< testing::tuple< __VA_ARGS__ > > name
|
||||
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
|
||||
|
||||
DEF_PARAM_TEST_1(Sz, cv::Size);
|
||||
typedef perf::Size_MatType Sz_Type;
|
||||
DEF_PARAM_TEST(Sz_Depth, cv::Size, perf::MatDepth);
|
||||
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, perf::MatDepth, MatCn);
|
||||
|
||||
#define CUDA_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
|
||||
|
||||
#define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
|
||||
|
||||
#define CUDA_SANITY_CHECK(mat, ...) \
|
||||
do{ \
|
||||
cv::Mat gpu_##mat(mat); \
|
||||
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
#define CPU_SANITY_CHECK(mat, ...) \
|
||||
do{ \
|
||||
cv::Mat cpu_##mat(mat); \
|
||||
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
|
||||
|
||||
struct CvtColorInfo
|
||||
{
|
||||
int scn;
|
||||
int dcn;
|
||||
int code;
|
||||
|
||||
CvtColorInfo() {}
|
||||
explicit CvtColorInfo(int scn_, int dcn_, int code_) : scn(scn_), dcn(dcn_), code(code_) {}
|
||||
};
|
||||
void PrintTo(const CvtColorInfo& info, std::ostream* os);
|
||||
|
||||
void printCudaInfo();
|
||||
|
||||
void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors = cv::noArray());
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
#define CV_PERF_TEST_CUDA_MAIN(modulename) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
const char * impls[] = { "cuda", "plain" };\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, perf::printCudaInfo())\
|
||||
}
|
||||
#else
|
||||
#define CV_PERF_TEST_CUDA_MAIN(modulename) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
const char * plain_only[] = { "plain" };\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only)\
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // OPENCV_CUDA_PERF_UTILITY_HPP
|
369
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/cuda_test.hpp
vendored
Normal file
369
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/cuda_test.hpp
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_CUDA_TEST_UTILITY_HPP
|
||||
#define OPENCV_CUDA_TEST_UTILITY_HPP
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include <stdexcept>
|
||||
#include "opencv2/core/cuda.hpp"
|
||||
|
||||
namespace cvtest
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// random generators
|
||||
|
||||
int randomInt(int minVal, int maxVal);
|
||||
double randomDouble(double minVal, double maxVal);
|
||||
cv::Size randomSize(int minVal, int maxVal);
|
||||
cv::Scalar randomScalar(double minVal, double maxVal);
|
||||
cv::Mat randomMat(cv::Size size, int type, double minVal = 0.0, double maxVal = 255.0);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// GpuMat create
|
||||
|
||||
cv::cuda::GpuMat createMat(cv::Size size, int type, bool useRoi = false);
|
||||
cv::cuda::GpuMat loadMat(const cv::Mat& m, bool useRoi = false);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Image load
|
||||
|
||||
//! read image from testdata folder
|
||||
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
|
||||
|
||||
//! read image from testdata folder and convert it to specified type
|
||||
cv::Mat readImageType(const std::string& fname, int type);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Gpu devices
|
||||
|
||||
//! return true if device supports specified feature and gpu module was built with support the feature.
|
||||
bool supportFeature(const cv::cuda::DeviceInfo& info, cv::cuda::FeatureSet feature);
|
||||
|
||||
class DeviceManager
|
||||
{
|
||||
public:
|
||||
static DeviceManager& instance();
|
||||
|
||||
void load(int i);
|
||||
void loadAll();
|
||||
|
||||
const std::vector<cv::cuda::DeviceInfo>& values() const { return devices_; }
|
||||
|
||||
private:
|
||||
std::vector<cv::cuda::DeviceInfo> devices_;
|
||||
};
|
||||
|
||||
#define ALL_DEVICES testing::ValuesIn(cvtest::DeviceManager::instance().values())
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Additional assertion
|
||||
|
||||
void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
|
||||
|
||||
cv::Mat getMat(cv::InputArray arr);
|
||||
|
||||
testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
|
||||
|
||||
#undef EXPECT_MAT_NEAR
|
||||
#define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
|
||||
#define ASSERT_MAT_NEAR(m1, m2, eps) ASSERT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
|
||||
|
||||
#define EXPECT_SCALAR_NEAR(s1, s2, eps) \
|
||||
{ \
|
||||
EXPECT_NEAR(s1[0], s2[0], eps); \
|
||||
EXPECT_NEAR(s1[1], s2[1], eps); \
|
||||
EXPECT_NEAR(s1[2], s2[2], eps); \
|
||||
EXPECT_NEAR(s1[3], s2[3], eps); \
|
||||
}
|
||||
#define ASSERT_SCALAR_NEAR(s1, s2, eps) \
|
||||
{ \
|
||||
ASSERT_NEAR(s1[0], s2[0], eps); \
|
||||
ASSERT_NEAR(s1[1], s2[1], eps); \
|
||||
ASSERT_NEAR(s1[2], s2[2], eps); \
|
||||
ASSERT_NEAR(s1[3], s2[3], eps); \
|
||||
}
|
||||
|
||||
#define EXPECT_POINT2_NEAR(p1, p2, eps) \
|
||||
{ \
|
||||
EXPECT_NEAR(p1.x, p2.x, eps); \
|
||||
EXPECT_NEAR(p1.y, p2.y, eps); \
|
||||
}
|
||||
#define ASSERT_POINT2_NEAR(p1, p2, eps) \
|
||||
{ \
|
||||
ASSERT_NEAR(p1.x, p2.x, eps); \
|
||||
ASSERT_NEAR(p1.y, p2.y, eps); \
|
||||
}
|
||||
|
||||
#define EXPECT_POINT3_NEAR(p1, p2, eps) \
|
||||
{ \
|
||||
EXPECT_NEAR(p1.x, p2.x, eps); \
|
||||
EXPECT_NEAR(p1.y, p2.y, eps); \
|
||||
EXPECT_NEAR(p1.z, p2.z, eps); \
|
||||
}
|
||||
#define ASSERT_POINT3_NEAR(p1, p2, eps) \
|
||||
{ \
|
||||
ASSERT_NEAR(p1.x, p2.x, eps); \
|
||||
ASSERT_NEAR(p1.y, p2.y, eps); \
|
||||
ASSERT_NEAR(p1.z, p2.z, eps); \
|
||||
}
|
||||
|
||||
double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
|
||||
|
||||
#undef EXPECT_MAT_SIMILAR
|
||||
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
|
||||
{ \
|
||||
ASSERT_EQ(mat1.type(), mat2.type()); \
|
||||
ASSERT_EQ(mat1.size(), mat2.size()); \
|
||||
EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
|
||||
}
|
||||
#define ASSERT_MAT_SIMILAR(mat1, mat2, eps) \
|
||||
{ \
|
||||
ASSERT_EQ(mat1.type(), mat2.type()); \
|
||||
ASSERT_EQ(mat1.size(), mat2.size()); \
|
||||
ASSERT_LE(checkSimilarity(mat1, mat2), eps); \
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Helper structs for value-parameterized tests
|
||||
|
||||
#define CUDA_TEST_P(test_case_name, test_name) \
|
||||
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
|
||||
: public test_case_name { \
|
||||
public: \
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
|
||||
virtual void TestBody(); \
|
||||
private: \
|
||||
void UnsafeTestBody(); \
|
||||
static int AddToRegistry() { \
|
||||
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
|
||||
GetTestCasePatternHolder<test_case_name>(\
|
||||
#test_case_name, \
|
||||
::testing::internal::CodeLocation(\
|
||||
__FILE__, __LINE__))->AddTestPattern(\
|
||||
#test_case_name, \
|
||||
#test_name, \
|
||||
new ::testing::internal::TestMetaFactory< \
|
||||
GTEST_TEST_CLASS_NAME_(\
|
||||
test_case_name, test_name)>()); \
|
||||
return 0; \
|
||||
} \
|
||||
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
|
||||
}; \
|
||||
int GTEST_TEST_CLASS_NAME_(test_case_name, \
|
||||
test_name)::gtest_registering_dummy_ = \
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
|
||||
{ \
|
||||
try \
|
||||
{ \
|
||||
UnsafeTestBody(); \
|
||||
} \
|
||||
catch (...) \
|
||||
{ \
|
||||
cv::cuda::resetDevice(); \
|
||||
throw; \
|
||||
} \
|
||||
} \
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
|
||||
|
||||
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
|
||||
|
||||
// Depth
|
||||
|
||||
using perf::MatDepth;
|
||||
|
||||
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
|
||||
|
||||
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
|
||||
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
|
||||
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
|
||||
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
|
||||
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
|
||||
std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
|
||||
\
|
||||
std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
|
||||
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
|
||||
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
|
||||
std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
|
||||
\
|
||||
std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
|
||||
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
|
||||
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
|
||||
std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
|
||||
\
|
||||
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
|
||||
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
|
||||
std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
|
||||
\
|
||||
std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
|
||||
std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
|
||||
\
|
||||
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
|
||||
|
||||
// Type
|
||||
|
||||
using perf::MatType;
|
||||
|
||||
//! return vector with types from specified range.
|
||||
std::vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end);
|
||||
|
||||
//! return vector with all types (depth: CV_8U-CV_64F, channels: 1-4).
|
||||
const std::vector<MatType>& all_types();
|
||||
|
||||
#define ALL_TYPES testing::ValuesIn(all_types())
|
||||
#define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
|
||||
|
||||
// ROI
|
||||
|
||||
class UseRoi
|
||||
{
|
||||
public:
|
||||
inline UseRoi(bool val = false) : val_(val) {}
|
||||
|
||||
inline operator bool() const { return val_; }
|
||||
|
||||
private:
|
||||
bool val_;
|
||||
};
|
||||
|
||||
void PrintTo(const UseRoi& useRoi, std::ostream* os);
|
||||
|
||||
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
|
||||
|
||||
// Direct/Inverse
|
||||
|
||||
class Inverse
|
||||
{
|
||||
public:
|
||||
inline Inverse(bool val = false) : val_(val) {}
|
||||
|
||||
inline operator bool() const { return val_; }
|
||||
|
||||
private:
|
||||
bool val_;
|
||||
};
|
||||
|
||||
void PrintTo(const Inverse& useRoi, std::ostream* os);
|
||||
|
||||
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
|
||||
|
||||
// Param class
|
||||
|
||||
#define IMPLEMENT_PARAM_CLASS(name, type) \
|
||||
class name \
|
||||
{ \
|
||||
public: \
|
||||
name ( type arg = type ()) : val_(arg) {} \
|
||||
operator type () const {return val_;} \
|
||||
private: \
|
||||
type val_; \
|
||||
}; \
|
||||
inline void PrintTo( name param, std::ostream* os) \
|
||||
{ \
|
||||
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
|
||||
}
|
||||
|
||||
IMPLEMENT_PARAM_CLASS(Channels, int)
|
||||
|
||||
#define ALL_CHANNELS testing::Values(Channels(1), Channels(2), Channels(3), Channels(4))
|
||||
#define IMAGE_CHANNELS testing::Values(Channels(1), Channels(3), Channels(4))
|
||||
|
||||
// Flags and enums
|
||||
|
||||
CV_ENUM(NormCode, NORM_INF, NORM_L1, NORM_L2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX)
|
||||
|
||||
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
|
||||
|
||||
CV_ENUM(BorderType, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
|
||||
#define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT), BorderType(cv::BORDER_WRAP))
|
||||
|
||||
CV_FLAGS(WarpFlags, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, WARP_INVERSE_MAP)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Features2D
|
||||
|
||||
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
|
||||
|
||||
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual)
|
||||
|
||||
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
|
||||
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Other
|
||||
|
||||
void dumpImage(const std::string& fileName, const cv::Mat& image);
|
||||
void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
|
||||
|
||||
void parseCudaDeviceOptions(int argc, char **argv);
|
||||
void printCudaInfo();
|
||||
}
|
||||
|
||||
namespace cv { namespace cuda
|
||||
{
|
||||
void PrintTo(const DeviceInfo& info, std::ostream* os);
|
||||
}}
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
#define CV_TEST_INIT0_CUDA cvtest::parseCudaDeviceOptions(argc, argv), cvtest::printCudaInfo(), cv::setUseOptimized(false)
|
||||
|
||||
#define CV_CUDA_TEST_MAIN(resourcesubdir, ...) \
|
||||
CV_TEST_MAIN_EX(resourcesubdir, CUDA, __VA_ARGS__)
|
||||
|
||||
#else // HAVE_CUDA
|
||||
|
||||
#define CV_CUDA_TEST_MAIN(resourcesubdir) \
|
||||
int main() \
|
||||
{ \
|
||||
printf("OpenCV was built without CUDA support\n"); \
|
||||
return 0; \
|
||||
}
|
||||
|
||||
#endif // HAVE_CUDA
|
||||
|
||||
|
||||
#endif // OPENCV_CUDA_TEST_UTILITY_HPP
|
140
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ocl_perf.hpp
vendored
Normal file
140
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ocl_perf.hpp
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_TS_OCL_PERF_HPP
|
||||
#define OPENCV_TS_OCL_PERF_HPP
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include "ocl_test.hpp"
|
||||
#include "ts_perf.hpp"
|
||||
|
||||
namespace cvtest {
|
||||
namespace ocl {
|
||||
|
||||
using namespace perf;
|
||||
|
||||
#define OCL_PERF_STRATEGY PERF_STRATEGY_SIMPLE
|
||||
|
||||
#define OCL_PERF_TEST(fixture, name) SIMPLE_PERF_TEST(fixture, name)
|
||||
#define OCL_PERF_TEST_P(fixture, name, params) SIMPLE_PERF_TEST_P(fixture, name, params)
|
||||
|
||||
#define SIMPLE_PERF_TEST(fixture, name) \
|
||||
class OCL##_##fixture##_##name : \
|
||||
public ::perf::TestBase \
|
||||
{ \
|
||||
public: \
|
||||
OCL##_##fixture##_##name() { } \
|
||||
protected: \
|
||||
virtual void PerfTestBody(); \
|
||||
}; \
|
||||
TEST_F(OCL##_##fixture##_##name, name) { CV_TRACE_REGION("PERF_TEST: " #fixture "_" #name); declare.strategy(OCL_PERF_STRATEGY); RunPerfTestBody(); } \
|
||||
void OCL##_##fixture##_##name::PerfTestBody()
|
||||
|
||||
#define SIMPLE_PERF_TEST_P(fixture, name, params) \
|
||||
class OCL##_##fixture##_##name : \
|
||||
public fixture \
|
||||
{ \
|
||||
public: \
|
||||
OCL##_##fixture##_##name() { } \
|
||||
protected: \
|
||||
virtual void PerfTestBody(); \
|
||||
}; \
|
||||
TEST_P(OCL##_##fixture##_##name, name) { CV_TRACE_REGION("PERF_TEST_P: " #fixture "_" #name); declare.strategy(OCL_PERF_STRATEGY); RunPerfTestBody(); } \
|
||||
INSTANTIATE_TEST_CASE_P(/*none*/, OCL##_##fixture##_##name, params); \
|
||||
void OCL##_##fixture##_##name::PerfTestBody()
|
||||
|
||||
#define OCL_SIZE_1 szVGA
|
||||
#define OCL_SIZE_2 sz720p
|
||||
#define OCL_SIZE_3 sz1080p
|
||||
#define OCL_SIZE_4 sz2160p
|
||||
|
||||
#define OCL_TEST_SIZES ::testing::Values(OCL_SIZE_1, OCL_SIZE_2, OCL_SIZE_3, OCL_SIZE_4)
|
||||
#define OCL_TEST_TYPES ::testing::Values(CV_8UC1, CV_32FC1, CV_8UC4, CV_32FC4)
|
||||
#define OCL_TEST_TYPES_14 OCL_TEST_TYPES
|
||||
#define OCL_TEST_TYPES_134 ::testing::Values(CV_8UC1, CV_32FC1, CV_8UC3, CV_32FC3, CV_8UC4, CV_32FC4)
|
||||
|
||||
#define OCL_PERF_ENUM ::testing::Values
|
||||
|
||||
//! deprecated
|
||||
#define OCL_TEST_CYCLE() \
|
||||
for (cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer())
|
||||
//! deprecated
|
||||
#define OCL_TEST_CYCLE_N(n) \
|
||||
for (declare.iterations(n), cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer())
|
||||
//! deprecated
|
||||
#define OCL_TEST_CYCLE_MULTIRUN(runsNum) \
|
||||
for (declare.runs(runsNum), cvtest::ocl::perf::safeFinish(); next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer()) \
|
||||
for (int r = 0; r < runsNum; cvtest::ocl::perf::safeFinish(), ++r)
|
||||
|
||||
#undef PERF_SAMPLE_BEGIN
|
||||
#undef PERF_SAMPLE_END
|
||||
#define PERF_SAMPLE_BEGIN() \
|
||||
cvtest::ocl::perf::safeFinish(); \
|
||||
for(; next() && startTimer(); cvtest::ocl::perf::safeFinish(), stopTimer()) \
|
||||
{ \
|
||||
CV_TRACE_REGION("iteration");
|
||||
#define PERF_SAMPLE_END() \
|
||||
}
|
||||
|
||||
|
||||
namespace perf {
|
||||
|
||||
// Check for current device limitation
|
||||
void checkDeviceMaxMemoryAllocSize(const Size& size, int type, int factor = 1);
|
||||
|
||||
// Initialize Mat with random numbers. Range is depends on the data type.
|
||||
// TODO Parameter type is actually OutputArray
|
||||
void randu(InputOutputArray dst);
|
||||
|
||||
inline void safeFinish()
|
||||
{
|
||||
if (cv::ocl::useOpenCL())
|
||||
cv::ocl::finish();
|
||||
}
|
||||
|
||||
} // namespace perf
|
||||
using namespace perf;
|
||||
|
||||
} // namespace cvtest::ocl
|
||||
} // namespace cvtest
|
||||
|
||||
#endif // OPENCV_TS_OCL_PERF_HPP
|
392
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ocl_test.hpp
vendored
Normal file
392
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ocl_test.hpp
vendored
Normal file
@ -0,0 +1,392 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef OPENCV_TS_OCL_TEST_HPP
|
||||
#define OPENCV_TS_OCL_TEST_HPP
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/types_c.h"
|
||||
#include "opencv2/core/ocl.hpp"
|
||||
|
||||
namespace cvtest {
|
||||
namespace ocl {
|
||||
|
||||
using namespace cv;
|
||||
using namespace testing;
|
||||
|
||||
inline std::vector<UMat> ToUMat(const std::vector<Mat>& src)
|
||||
{
|
||||
std::vector<UMat> dst;
|
||||
dst.resize(src.size());
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
src[i].copyTo(dst[i]);
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
inline UMat ToUMat(const Mat& src)
|
||||
{
|
||||
UMat dst;
|
||||
src.copyTo(dst);
|
||||
return dst;
|
||||
}
|
||||
|
||||
inline UMat ToUMat(InputArray src)
|
||||
{
|
||||
UMat dst;
|
||||
src.getMat().copyTo(dst);
|
||||
return dst;
|
||||
}
|
||||
|
||||
extern int test_loop_times;
|
||||
|
||||
#define MAX_VALUE 357
|
||||
|
||||
#define EXPECT_MAT_NORM(mat, eps) \
|
||||
do \
|
||||
{ \
|
||||
EXPECT_LE(TestUtils::checkNorm1(mat), eps) \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#undef EXPECT_MAT_NEAR
|
||||
#define EXPECT_MAT_NEAR(mat1, mat2, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(mat1.type(), mat2.type()); \
|
||||
ASSERT_EQ(mat1.size(), mat2.size()); \
|
||||
EXPECT_LE(TestUtils::checkNorm2(mat1, mat2), eps) \
|
||||
<< "Size: " << mat1.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define EXPECT_MAT_NEAR_RELATIVE(mat1, mat2, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ((mat1).type(), (mat2).type()); \
|
||||
ASSERT_EQ((mat1).size(), (mat2).size()); \
|
||||
EXPECT_LE(TestUtils::checkNormRelative((mat1), (mat2)), eps) \
|
||||
<< "Size: " << (mat1).size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define EXPECT_MAT_N_DIFF(mat1, mat2, num) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(mat1.type(), mat2.type()); \
|
||||
ASSERT_EQ(mat1.size(), mat2.size()); \
|
||||
Mat diff; \
|
||||
absdiff(mat1, mat2, diff); \
|
||||
EXPECT_LE(countNonZero(diff.reshape(1)), num) \
|
||||
<< "Size: " << mat1.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define OCL_EXPECT_MAT_N_DIFF(name, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
|
||||
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
|
||||
Mat diff, binary, binary_8; \
|
||||
absdiff(name ## _roi, u ## name ## _roi, diff); \
|
||||
Mat mask(diff.size(), CV_8UC(dst.channels()), cv::Scalar::all(255)); \
|
||||
if (mask.cols > 2 && mask.rows > 2) \
|
||||
mask(cv::Rect(1, 1, mask.cols - 2, mask.rows - 2)).setTo(0); \
|
||||
cv::threshold(diff, binary, (double)eps, 255, cv::THRESH_BINARY); \
|
||||
EXPECT_LE(countNonZero(binary.reshape(1)), (int)(binary.cols*binary.rows*5/1000)) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
binary.convertTo(binary_8, mask.type()); \
|
||||
binary_8 = binary_8 & mask; \
|
||||
EXPECT_LE(countNonZero(binary_8.reshape(1)), (int)((binary_8.cols+binary_8.rows)/100)) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define OCL_EXPECT_MATS_NEAR(name, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
|
||||
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
|
||||
EXPECT_LE(TestUtils::checkNorm2(name ## _roi, u ## name ## _roi), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
Point _offset; \
|
||||
Size _wholeSize; \
|
||||
u ## name ## _roi.locateROI(_wholeSize, _offset); \
|
||||
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
|
||||
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
|
||||
ASSERT_EQ(name.type(), u ## name.type()); \
|
||||
ASSERT_EQ(name.size(), u ## name.size()); \
|
||||
EXPECT_LE(TestUtils::checkNorm2(name, u ## name, _mask), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define OCL_EXPECT_MATS_NEAR_RELATIVE(name, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
|
||||
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
|
||||
EXPECT_LE(TestUtils::checkNormRelative(name ## _roi, u ## name ## _roi), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
Point _offset; \
|
||||
Size _wholeSize; \
|
||||
name ## _roi.locateROI(_wholeSize, _offset); \
|
||||
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
|
||||
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
|
||||
ASSERT_EQ(name.type(), u ## name.type()); \
|
||||
ASSERT_EQ(name.size(), u ## name.size()); \
|
||||
EXPECT_LE(TestUtils::checkNormRelative(name, u ## name, _mask), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
//for sparse matrix
|
||||
#define OCL_EXPECT_MATS_NEAR_RELATIVE_SPARSE(name, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(name ## _roi.type(), u ## name ## _roi.type()); \
|
||||
ASSERT_EQ(name ## _roi.size(), u ## name ## _roi.size()); \
|
||||
EXPECT_LE(TestUtils::checkNormRelativeSparse(name ## _roi, u ## name ## _roi), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
Point _offset; \
|
||||
Size _wholeSize; \
|
||||
name ## _roi.locateROI(_wholeSize, _offset); \
|
||||
Mat _mask(name.size(), CV_8UC1, Scalar::all(255)); \
|
||||
_mask(Rect(_offset, name ## _roi.size())).setTo(Scalar::all(0)); \
|
||||
ASSERT_EQ(name.type(), u ## name.type()); \
|
||||
ASSERT_EQ(name.size(), u ## name.size()); \
|
||||
EXPECT_LE(TestUtils::checkNormRelativeSparse(name, u ## name, _mask), eps) \
|
||||
<< "Size: " << name ## _roi.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#undef EXPECT_MAT_SIMILAR
|
||||
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
|
||||
do \
|
||||
{ \
|
||||
ASSERT_EQ(mat1.type(), mat2.type()); \
|
||||
ASSERT_EQ(mat1.size(), mat2.size()); \
|
||||
EXPECT_LE(checkSimilarity(mat1, mat2), eps) \
|
||||
<< "Size: " << mat1.size() << std::endl; \
|
||||
} while ((void)0, 0)
|
||||
|
||||
using perf::MatDepth;
|
||||
using perf::MatType;
|
||||
|
||||
#define OCL_RNG_SEED 123456
|
||||
|
||||
struct TestUtils
|
||||
{
|
||||
cv::RNG rng;
|
||||
|
||||
TestUtils()
|
||||
{
|
||||
rng = cv::RNG(OCL_RNG_SEED);
|
||||
}
|
||||
|
||||
int randomInt(int minVal, int maxVal)
|
||||
{
|
||||
return rng.uniform(minVal, maxVal);
|
||||
}
|
||||
|
||||
double randomDouble(double minVal, double maxVal)
|
||||
{
|
||||
return rng.uniform(minVal, maxVal);
|
||||
}
|
||||
|
||||
double randomDoubleLog(double minVal, double maxVal)
|
||||
{
|
||||
double logMin = log((double)minVal + 1);
|
||||
double logMax = log((double)maxVal + 1);
|
||||
double pow = rng.uniform(logMin, logMax);
|
||||
double v = exp(pow) - 1;
|
||||
CV_Assert(v >= minVal && (v < maxVal || (v == minVal && v == maxVal)));
|
||||
return v;
|
||||
}
|
||||
|
||||
Size randomSize(int minVal, int maxVal)
|
||||
{
|
||||
#if 1
|
||||
return cv::Size((int)randomDoubleLog(minVal, maxVal), (int)randomDoubleLog(minVal, maxVal));
|
||||
#else
|
||||
return cv::Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
|
||||
#endif
|
||||
}
|
||||
|
||||
Size randomSize(int minValX, int maxValX, int minValY, int maxValY)
|
||||
{
|
||||
#if 1
|
||||
return cv::Size((int)randomDoubleLog(minValX, maxValX), (int)randomDoubleLog(minValY, maxValY));
|
||||
#else
|
||||
return cv::Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
|
||||
#endif
|
||||
}
|
||||
|
||||
Scalar randomScalar(double minVal, double maxVal)
|
||||
{
|
||||
return Scalar(randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal));
|
||||
}
|
||||
|
||||
Mat randomMat(Size size, int type, double minVal, double maxVal, bool useRoi = false)
|
||||
{
|
||||
RNG dataRng(rng.next());
|
||||
return cvtest::randomMat(dataRng, size, type, minVal, maxVal, useRoi);
|
||||
}
|
||||
|
||||
struct Border
|
||||
{
|
||||
int top, bot, lef, rig;
|
||||
};
|
||||
|
||||
Border randomBorder(int minValue = 0, int maxValue = MAX_VALUE)
|
||||
{
|
||||
Border border = {
|
||||
(int)randomDoubleLog(minValue, maxValue),
|
||||
(int)randomDoubleLog(minValue, maxValue),
|
||||
(int)randomDoubleLog(minValue, maxValue),
|
||||
(int)randomDoubleLog(minValue, maxValue)
|
||||
};
|
||||
return border;
|
||||
}
|
||||
|
||||
void randomSubMat(Mat& whole, Mat& subMat, const Size& roiSize, const Border& border, int type, double minVal, double maxVal)
|
||||
{
|
||||
Size wholeSize = Size(roiSize.width + border.lef + border.rig, roiSize.height + border.top + border.bot);
|
||||
whole = randomMat(wholeSize, type, minVal, maxVal, false);
|
||||
subMat = whole(Rect(border.lef, border.top, roiSize.width, roiSize.height));
|
||||
}
|
||||
|
||||
// If the two vectors are not equal, it will return the difference in vector size
|
||||
// Else it will return (total diff of each 1 and 2 rects covered pixels)/(total 1 rects covered pixels)
|
||||
// The smaller, the better matched
|
||||
static double checkRectSimilarity(const cv::Size & sz, std::vector<cv::Rect>& ob1, std::vector<cv::Rect>& ob2);
|
||||
|
||||
//! read image from testdata folder.
|
||||
static cv::Mat readImage(const String &fileName, int flags = cv::IMREAD_COLOR);
|
||||
static cv::Mat readImageType(const String &fname, int type);
|
||||
|
||||
static double checkNorm1(InputArray m, InputArray mask = noArray());
|
||||
static double checkNorm2(InputArray m1, InputArray m2, InputArray mask = noArray());
|
||||
static double checkSimilarity(InputArray m1, InputArray m2);
|
||||
static void showDiff(InputArray _src, InputArray _gold, InputArray _actual, double eps, bool alwaysShow);
|
||||
|
||||
static inline double checkNormRelative(InputArray m1, InputArray m2, InputArray mask = noArray())
|
||||
{
|
||||
return cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask) /
|
||||
std::max((double)std::numeric_limits<float>::epsilon(),
|
||||
(double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF)));
|
||||
}
|
||||
|
||||
static inline double checkNormRelativeSparse(InputArray m1, InputArray m2, InputArray mask = noArray())
|
||||
{
|
||||
double norm_inf = cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask);
|
||||
double norm_rel = norm_inf /
|
||||
std::max((double)std::numeric_limits<float>::epsilon(),
|
||||
(double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF)));
|
||||
return std::min(norm_inf, norm_rel);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#define TEST_DECLARE_INPUT_PARAMETER(name) Mat name, name ## _roi; UMat u ## name, u ## name ## _roi
|
||||
#define TEST_DECLARE_OUTPUT_PARAMETER(name) TEST_DECLARE_INPUT_PARAMETER(name)
|
||||
|
||||
#define UMAT_UPLOAD_INPUT_PARAMETER(name) \
|
||||
do \
|
||||
{ \
|
||||
name.copyTo(u ## name); \
|
||||
Size _wholeSize; Point ofs; name ## _roi.locateROI(_wholeSize, ofs); \
|
||||
u ## name ## _roi = u ## name(Rect(ofs.x, ofs.y, name ## _roi.size().width, name ## _roi.size().height)); \
|
||||
} while ((void)0, 0)
|
||||
|
||||
#define UMAT_UPLOAD_OUTPUT_PARAMETER(name) UMAT_UPLOAD_INPUT_PARAMETER(name)
|
||||
|
||||
template <typename T>
|
||||
struct TSTestWithParam : public TestUtils, public ::testing::TestWithParam<T>
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
#undef PARAM_TEST_CASE
|
||||
#define PARAM_TEST_CASE(name, ...) struct name : public ::cvtest::ocl::TSTestWithParam< testing::tuple< __VA_ARGS__ > >
|
||||
|
||||
#ifndef IMPLEMENT_PARAM_CLASS
|
||||
#define IMPLEMENT_PARAM_CLASS(name, type) \
|
||||
class name \
|
||||
{ \
|
||||
public: \
|
||||
name ( type arg = type ()) : val_(arg) {} \
|
||||
operator type () const {return val_;} \
|
||||
private: \
|
||||
type val_; \
|
||||
}; \
|
||||
inline void PrintTo( name param, std::ostream* os) \
|
||||
{ \
|
||||
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
|
||||
}
|
||||
|
||||
IMPLEMENT_PARAM_CLASS(Channels, int)
|
||||
#endif // IMPLEMENT_PARAM_CLASS
|
||||
|
||||
#define OCL_TEST_P TEST_P
|
||||
#define OCL_TEST_F(name, ...) typedef name OCL_##name; TEST_F(OCL_##name, __VA_ARGS__)
|
||||
#define OCL_TEST(name, ...) TEST(OCL_##name, __VA_ARGS__)
|
||||
|
||||
#define OCL_OFF(...) cv::ocl::setUseOpenCL(false); __VA_ARGS__ ;
|
||||
#define OCL_ON(...) cv::ocl::setUseOpenCL(true); __VA_ARGS__ ;
|
||||
|
||||
#define OCL_ALL_DEPTHS Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F)
|
||||
#define OCL_ALL_DEPTHS_16F Values(CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F, CV_16F)
|
||||
#define OCL_ALL_CHANNELS Values(1, 2, 3, 4)
|
||||
|
||||
CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LINEAR_EXACT)
|
||||
CV_ENUM(ThreshOp, THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV)
|
||||
CV_ENUM(BorderType, BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101)
|
||||
|
||||
#define OCL_INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
|
||||
INSTANTIATE_TEST_CASE_P(OCL_ ## prefix, test_case_name, generator)
|
||||
|
||||
} } // namespace cvtest::ocl
|
||||
|
||||
namespace opencv_test {
|
||||
namespace ocl {
|
||||
using namespace cvtest::ocl;
|
||||
}} // namespace
|
||||
|
||||
#endif // OPENCV_TS_OCL_TEST_HPP
|
196
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_ext.hpp
vendored
Normal file
196
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_ext.hpp
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
// Copyright (C) 2014, Intel, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#ifndef OPENCV_TS_EXT_HPP
|
||||
#define OPENCV_TS_EXT_HPP
|
||||
|
||||
namespace cvtest {
|
||||
void checkIppStatus();
|
||||
extern bool skipUnstableTests;
|
||||
extern bool runBigDataTests;
|
||||
extern int testThreads;
|
||||
extern int debugLevel; //< 0 - no debug, 1 - basic test debug information, >1 - extra debug information
|
||||
|
||||
void testSetUp();
|
||||
void testTearDown();
|
||||
|
||||
bool checkBigDataTests();
|
||||
|
||||
}
|
||||
|
||||
// check for required "opencv_test" namespace
|
||||
#if !defined(CV_TEST_SKIP_NAMESPACE_CHECK) && defined(__OPENCV_BUILD)
|
||||
#define CV__TEST_NAMESPACE_CHECK required_opencv_test_namespace = true;
|
||||
#else
|
||||
#define CV__TEST_NAMESPACE_CHECK // nothing
|
||||
#endif
|
||||
|
||||
#define CV__TEST_INIT \
|
||||
CV__TEST_NAMESPACE_CHECK \
|
||||
::cvtest::testSetUp();
|
||||
#define CV__TEST_CLEANUP ::cvtest::testTearDown();
|
||||
#define CV__TEST_BODY_IMPL(name) \
|
||||
{ \
|
||||
CV__TRACE_APP_FUNCTION_NAME(name); \
|
||||
try { \
|
||||
CV__TEST_INIT \
|
||||
Body(); \
|
||||
CV__TEST_CLEANUP \
|
||||
} \
|
||||
catch (const cvtest::details::SkipTestExceptionBase& e) \
|
||||
{ \
|
||||
printf("[ SKIP ] %s\n", e.what()); \
|
||||
} \
|
||||
} \
|
||||
|
||||
|
||||
#undef TEST
|
||||
#define TEST_(test_case_name, test_name, parent_class, bodyMethodName, BODY_IMPL) \
|
||||
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
|
||||
public:\
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
|
||||
private:\
|
||||
virtual void TestBody() CV_OVERRIDE;\
|
||||
virtual void bodyMethodName();\
|
||||
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
|
||||
};\
|
||||
\
|
||||
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
|
||||
::test_info_ =\
|
||||
::testing::internal::MakeAndRegisterTestInfo(\
|
||||
#test_case_name, #test_name, NULL, NULL, \
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__), \
|
||||
(::testing::internal::GetTestTypeId()), \
|
||||
parent_class::SetUpTestCase, \
|
||||
parent_class::TearDownTestCase, \
|
||||
new ::testing::internal::TestFactoryImpl<\
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() BODY_IMPL( #test_case_name "_" #test_name ) \
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::bodyMethodName()
|
||||
|
||||
#define TEST(test_case_name, test_name) TEST_(test_case_name, test_name, ::testing::Test, Body, CV__TEST_BODY_IMPL)
|
||||
|
||||
#define CV__TEST_BIGDATA_BODY_IMPL(name) \
|
||||
{ \
|
||||
if (!cvtest::checkBigDataTests()) \
|
||||
{ \
|
||||
return; \
|
||||
} \
|
||||
CV__TRACE_APP_FUNCTION_NAME(name); \
|
||||
try { \
|
||||
CV__TEST_INIT \
|
||||
Body(); \
|
||||
CV__TEST_CLEANUP \
|
||||
} \
|
||||
catch (const cvtest::details::SkipTestExceptionBase& e) \
|
||||
{ \
|
||||
printf("[ SKIP ] %s\n", e.what()); \
|
||||
} \
|
||||
} \
|
||||
|
||||
// Special type of tests which require / use or validate processing of huge amount of data (>= 2Gb)
|
||||
#if defined(_M_X64) || defined(_M_ARM64) || defined(__x86_64__) || defined(__aarch64__)
|
||||
#define BIGDATA_TEST(test_case_name, test_name) TEST_(BigData_ ## test_case_name, test_name, ::testing::Test, Body, CV__TEST_BIGDATA_BODY_IMPL)
|
||||
#else
|
||||
#define BIGDATA_TEST(test_case_name, test_name) TEST_(BigData_ ## test_case_name, DISABLED_ ## test_name, ::testing::Test, Body, CV__TEST_BIGDATA_BODY_IMPL)
|
||||
#endif
|
||||
|
||||
#undef TEST_F
|
||||
#define TEST_F(test_fixture, test_name)\
|
||||
class GTEST_TEST_CLASS_NAME_(test_fixture, test_name) : public test_fixture {\
|
||||
public:\
|
||||
GTEST_TEST_CLASS_NAME_(test_fixture, test_name)() {}\
|
||||
private:\
|
||||
virtual void TestBody() CV_OVERRIDE;\
|
||||
virtual void Body(); \
|
||||
static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
|
||||
GTEST_TEST_CLASS_NAME_(test_fixture, test_name));\
|
||||
};\
|
||||
\
|
||||
::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_fixture, test_name)\
|
||||
::test_info_ =\
|
||||
::testing::internal::MakeAndRegisterTestInfo(\
|
||||
#test_fixture, #test_name, NULL, NULL, \
|
||||
::testing::internal::CodeLocation(__FILE__, __LINE__), \
|
||||
(::testing::internal::GetTypeId<test_fixture>()), \
|
||||
test_fixture::SetUpTestCase, \
|
||||
test_fixture::TearDownTestCase, \
|
||||
new ::testing::internal::TestFactoryImpl<\
|
||||
GTEST_TEST_CLASS_NAME_(test_fixture, test_name)>);\
|
||||
void GTEST_TEST_CLASS_NAME_(test_fixture, test_name)::TestBody() CV__TEST_BODY_IMPL( #test_fixture "_" #test_name ) \
|
||||
void GTEST_TEST_CLASS_NAME_(test_fixture, test_name)::Body()
|
||||
|
||||
// Don't use directly
|
||||
#define CV__TEST_P(test_case_name, test_name, bodyMethodName, BODY_IMPL/*(name_str)*/) \
|
||||
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
|
||||
: public test_case_name { \
|
||||
public: \
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
|
||||
private: \
|
||||
virtual void bodyMethodName(); \
|
||||
virtual void TestBody() CV_OVERRIDE; \
|
||||
static int AddToRegistry() { \
|
||||
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
|
||||
GetTestCasePatternHolder<test_case_name>(\
|
||||
#test_case_name, \
|
||||
::testing::internal::CodeLocation(\
|
||||
__FILE__, __LINE__))->AddTestPattern(\
|
||||
#test_case_name, \
|
||||
#test_name, \
|
||||
new ::testing::internal::TestMetaFactory< \
|
||||
GTEST_TEST_CLASS_NAME_(\
|
||||
test_case_name, test_name)>()); \
|
||||
return 0; \
|
||||
} \
|
||||
static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
|
||||
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
|
||||
}; \
|
||||
int GTEST_TEST_CLASS_NAME_(test_case_name, \
|
||||
test_name)::gtest_registering_dummy_ = \
|
||||
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() BODY_IMPL( #test_case_name "_" #test_name ) \
|
||||
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::bodyMethodName()
|
||||
|
||||
#undef TEST_P
|
||||
#define TEST_P(test_case_name, test_name) CV__TEST_P(test_case_name, test_name, Body, CV__TEST_BODY_IMPL)
|
||||
|
||||
|
||||
#define CV_TEST_EXPECT_EXCEPTION_MESSAGE(statement, msg) \
|
||||
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
|
||||
if (::testing::internal::AlwaysTrue()) { \
|
||||
const char* msg_ = msg; \
|
||||
bool hasException = false; \
|
||||
try { \
|
||||
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
|
||||
} \
|
||||
catch (const cv::Exception& e) { \
|
||||
if (NULL == strstr(e.what(), msg_)) \
|
||||
ADD_FAILURE() << "Unexpected cv::Exception is raised: " << #statement << "\n Expected message substring: '" << msg_ << "'. Actual message:\n" << e.what(); \
|
||||
hasException = true; \
|
||||
} \
|
||||
catch (const std::exception& e) { \
|
||||
ADD_FAILURE() << "Unexpected std::exception is raised: " << #statement << "\n" << e.what(); \
|
||||
hasException = true; \
|
||||
} \
|
||||
catch (...) { \
|
||||
ADD_FAILURE() << "Unexpected C++ exception is raised: " << #statement; \
|
||||
hasException = true; \
|
||||
} \
|
||||
if (!hasException) { \
|
||||
goto GTEST_CONCAT_TOKEN_(gtest_label_test_, __LINE__); \
|
||||
} \
|
||||
} else \
|
||||
GTEST_CONCAT_TOKEN_(gtest_label_test_, __LINE__): \
|
||||
ADD_FAILURE() << "Failed: Expected: " #statement " throws an '" << msg << "' exception.\n" \
|
||||
" Actual: it doesn't."
|
||||
|
||||
|
||||
#endif // OPENCV_TS_EXT_HPP
|
22233
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_gtest.h
vendored
Normal file
22233
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_gtest.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
731
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_perf.hpp
vendored
Normal file
731
3rdparty/opencv-4.5.4/modules/ts/include/opencv2/ts/ts_perf.hpp
vendored
Normal file
@ -0,0 +1,731 @@
|
||||
#ifndef OPENCV_TS_PERF_HPP
|
||||
#define OPENCV_TS_PERF_HPP
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include "ts_ext.hpp"
|
||||
|
||||
#include <functional>
|
||||
|
||||
#if !(defined(LOGD) || defined(LOGI) || defined(LOGW) || defined(LOGE))
|
||||
# if defined(__ANDROID__) && defined(USE_ANDROID_LOGGING)
|
||||
# include <android/log.h>
|
||||
|
||||
# define PERF_TESTS_LOG_TAG "OpenCV_perf"
|
||||
# define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, PERF_TESTS_LOG_TAG, __VA_ARGS__))
|
||||
# define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, PERF_TESTS_LOG_TAG, __VA_ARGS__))
|
||||
# define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, PERF_TESTS_LOG_TAG, __VA_ARGS__))
|
||||
# define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, PERF_TESTS_LOG_TAG, __VA_ARGS__))
|
||||
# else
|
||||
# define LOGD(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
# define LOGI(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
# define LOGW(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
# define LOGE(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// declare major namespaces to avoid errors on unknown namespace
|
||||
namespace cv { namespace cuda {} namespace ocl {} }
|
||||
namespace cvtest { }
|
||||
|
||||
namespace perf
|
||||
{
|
||||
|
||||
// Tuple stuff from Google Tests
|
||||
using testing::get;
|
||||
using testing::make_tuple;
|
||||
using testing::tuple;
|
||||
using testing::tuple_size;
|
||||
using testing::tuple_element;
|
||||
|
||||
class TestBase;
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Predefined typical frame sizes and typical test parameters *
|
||||
\*****************************************************************************************/
|
||||
const static cv::Size szQVGA = cv::Size(320, 240);
|
||||
const static cv::Size szVGA = cv::Size(640, 480);
|
||||
const static cv::Size szSVGA = cv::Size(800, 600);
|
||||
const static cv::Size szXGA = cv::Size(1024, 768);
|
||||
const static cv::Size szSXGA = cv::Size(1280, 1024);
|
||||
const static cv::Size szWQHD = cv::Size(2560, 1440);
|
||||
|
||||
const static cv::Size sznHD = cv::Size(640, 360);
|
||||
const static cv::Size szqHD = cv::Size(960, 540);
|
||||
const static cv::Size sz240p = szQVGA;
|
||||
const static cv::Size sz720p = cv::Size(1280, 720);
|
||||
const static cv::Size sz1080p = cv::Size(1920, 1080);
|
||||
const static cv::Size sz1440p = szWQHD;
|
||||
const static cv::Size sz2160p = cv::Size(3840, 2160);//UHDTV1 4K
|
||||
const static cv::Size sz4320p = cv::Size(7680, 4320);//UHDTV2 8K
|
||||
|
||||
const static cv::Size sz3MP = cv::Size(2048, 1536);
|
||||
const static cv::Size sz5MP = cv::Size(2592, 1944);
|
||||
const static cv::Size sz2K = cv::Size(2048, 2048);
|
||||
|
||||
const static cv::Size szODD = cv::Size(127, 61);
|
||||
|
||||
const static cv::Size szSmall24 = cv::Size(24, 24);
|
||||
const static cv::Size szSmall32 = cv::Size(32, 32);
|
||||
const static cv::Size szSmall64 = cv::Size(64, 64);
|
||||
const static cv::Size szSmall128 = cv::Size(128, 128);
|
||||
|
||||
#define SZ_ALL_VGA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA)
|
||||
#define SZ_ALL_GA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA)
|
||||
#define SZ_ALL_HD ::testing::Values(::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
|
||||
#define SZ_ALL_SMALL ::testing::Values(::perf::szSmall24, ::perf::szSmall32, ::perf::szSmall64, ::perf::szSmall128)
|
||||
#define SZ_ALL ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA, ::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
|
||||
#define SZ_TYPICAL ::testing::Values(::perf::szVGA, ::perf::szqHD, ::perf::sz720p, ::perf::szODD)
|
||||
|
||||
|
||||
#define TYPICAL_MAT_SIZES ::perf::szVGA, ::perf::sz720p, ::perf::sz1080p, ::perf::szODD
|
||||
#define TYPICAL_MAT_TYPES CV_8UC1, CV_8UC4, CV_32FC1
|
||||
#define TYPICAL_MATS testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( TYPICAL_MAT_TYPES ) )
|
||||
#define TYPICAL_MATS_C1 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC1, CV_32FC1 ) )
|
||||
#define TYPICAL_MATS_C4 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC4 ) )
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* MatType - printable wrapper over integer 'type' of Mat *
|
||||
\*****************************************************************************************/
|
||||
class MatType
|
||||
{
|
||||
public:
|
||||
MatType(int val=0) : _type(val) {}
|
||||
operator int() const {return _type;}
|
||||
|
||||
private:
|
||||
int _type;
|
||||
};
|
||||
|
||||
/*****************************************************************************************\
|
||||
* CV_ENUM and CV_FLAGS - macro to create printable wrappers for defines and enums *
|
||||
\*****************************************************************************************/
|
||||
|
||||
#define CV_ENUM(class_name, ...) \
|
||||
namespace { \
|
||||
using namespace cv;using namespace cv::cuda; using namespace cv::ocl; \
|
||||
struct class_name { \
|
||||
class_name(int val = 0) : val_(val) {} \
|
||||
operator int() const { return val_; } \
|
||||
void PrintTo(std::ostream* os) const { \
|
||||
const int vals[] = { __VA_ARGS__ }; \
|
||||
const char* svals = #__VA_ARGS__; \
|
||||
for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
|
||||
while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
|
||||
int start = pos; \
|
||||
while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
|
||||
++pos; \
|
||||
if (val_ == vals[i]) { \
|
||||
*os << std::string(svals + start, svals + pos); \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
*os << "UNKNOWN"; \
|
||||
} \
|
||||
static ::testing::internal::ParamGenerator<class_name> all() { \
|
||||
const class_name vals[] = { __VA_ARGS__ }; \
|
||||
return ::testing::ValuesIn(vals); \
|
||||
} \
|
||||
private: int val_; \
|
||||
}; \
|
||||
static inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
|
||||
|
||||
#define CV_FLAGS(class_name, ...) \
|
||||
namespace { \
|
||||
struct class_name { \
|
||||
class_name(int val = 0) : val_(val) {} \
|
||||
operator int() const { return val_; } \
|
||||
void PrintTo(std::ostream* os) const { \
|
||||
using namespace cv;using namespace cv::cuda; using namespace cv::ocl; \
|
||||
const int vals[] = { __VA_ARGS__ }; \
|
||||
const char* svals = #__VA_ARGS__; \
|
||||
int value = val_; \
|
||||
bool first = true; \
|
||||
for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
|
||||
while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
|
||||
int start = pos; \
|
||||
while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
|
||||
++pos; \
|
||||
if ((value & vals[i]) == vals[i]) { \
|
||||
value &= ~vals[i]; \
|
||||
if (first) first = false; else *os << "|"; \
|
||||
*os << std::string(svals + start, svals + pos); \
|
||||
if (!value) return; \
|
||||
} \
|
||||
} \
|
||||
if (first) *os << "UNKNOWN"; \
|
||||
} \
|
||||
private: int val_; \
|
||||
}; \
|
||||
static inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
|
||||
|
||||
CV_ENUM(MatDepth, CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F, CV_16F)
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Regression control utility for performance testing *
|
||||
\*****************************************************************************************/
|
||||
enum ERROR_TYPE
|
||||
{
|
||||
ERROR_ABSOLUTE = 0,
|
||||
ERROR_RELATIVE = 1
|
||||
};
|
||||
|
||||
class Regression
|
||||
{
|
||||
public:
|
||||
static Regression& add(TestBase* test, const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
|
||||
static Regression& addMoments(TestBase* test, const std::string& name, const cv::Moments & array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
|
||||
static Regression& addKeypoints(TestBase* test, const std::string& name, const std::vector<cv::KeyPoint>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
|
||||
static Regression& addMatches(TestBase* test, const std::string& name, const std::vector<cv::DMatch>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
|
||||
static void Init(const std::string& testSuitName, const std::string& ext = ".xml");
|
||||
|
||||
Regression& operator() (const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
|
||||
|
||||
private:
|
||||
static Regression& instance();
|
||||
Regression();
|
||||
~Regression();
|
||||
|
||||
Regression(const Regression&);
|
||||
Regression& operator=(const Regression&);
|
||||
|
||||
cv::RNG regRNG;//own random numbers generator to make collection and verification work identical
|
||||
std::string storageInPath;
|
||||
std::string storageOutPath;
|
||||
cv::FileStorage storageIn;
|
||||
cv::FileStorage storageOut;
|
||||
cv::FileNode rootIn;
|
||||
std::string currentTestNodeName;
|
||||
std::string suiteName;
|
||||
|
||||
cv::FileStorage& write();
|
||||
|
||||
static std::string getCurrentTestNodeName();
|
||||
static bool isVector(cv::InputArray a);
|
||||
static double getElem(cv::Mat& m, int x, int y, int cn = 0);
|
||||
|
||||
void init(const std::string& testSuitName, const std::string& ext);
|
||||
void write(cv::InputArray array);
|
||||
void write(cv::Mat m);
|
||||
void verify(cv::FileNode node, cv::InputArray array, double eps, ERROR_TYPE err);
|
||||
void verify(cv::FileNode node, cv::Mat actual, double eps, std::string argname, ERROR_TYPE err);
|
||||
};
|
||||
|
||||
#define SANITY_CHECK(array, ...) ::perf::Regression::add(this, #array, array , ## __VA_ARGS__)
|
||||
#define SANITY_CHECK_MOMENTS(array, ...) ::perf::Regression::addMoments(this, #array, array , ## __VA_ARGS__)
|
||||
#define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__)
|
||||
#define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__)
|
||||
#define SANITY_CHECK_NOTHING() this->setVerified()
|
||||
|
||||
class GpuPerf
|
||||
{
|
||||
public:
|
||||
static bool targetDevice();
|
||||
};
|
||||
|
||||
#define PERF_RUN_CUDA() ::perf::GpuPerf::targetDevice()
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Container for performance metrics *
|
||||
\*****************************************************************************************/
|
||||
typedef struct performance_metrics
|
||||
{
|
||||
size_t bytesIn;
|
||||
size_t bytesOut;
|
||||
unsigned int samples;
|
||||
unsigned int outliers;
|
||||
double gmean;
|
||||
double gstddev;//stddev for log(time)
|
||||
double mean;
|
||||
double stddev;
|
||||
double median;
|
||||
double min;
|
||||
double frequency;
|
||||
int terminationReason;
|
||||
|
||||
enum
|
||||
{
|
||||
TERM_ITERATIONS = 0,
|
||||
TERM_TIME = 1,
|
||||
TERM_INTERRUPT = 2,
|
||||
TERM_EXCEPTION = 3,
|
||||
TERM_SKIP_TEST = 4, // there are some limitations and test should be skipped
|
||||
TERM_UNKNOWN = -1
|
||||
};
|
||||
|
||||
performance_metrics();
|
||||
void clear();
|
||||
} performance_metrics;
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Strategy for performance measuring *
|
||||
\*****************************************************************************************/
|
||||
enum PERF_STRATEGY
|
||||
{
|
||||
PERF_STRATEGY_DEFAULT = -1,
|
||||
PERF_STRATEGY_BASE = 0,
|
||||
PERF_STRATEGY_SIMPLE = 1
|
||||
};
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Base fixture for performance tests *
|
||||
\*****************************************************************************************/
|
||||
#ifdef CV_COLLECT_IMPL_DATA
|
||||
// Implementation collection processing class.
|
||||
// Accumulates and shapes implementation data.
|
||||
typedef struct ImplData
|
||||
{
|
||||
bool ipp;
|
||||
bool icv;
|
||||
bool ipp_mt;
|
||||
bool ocl;
|
||||
bool plain;
|
||||
std::vector<int> implCode;
|
||||
std::vector<cv::String> funName;
|
||||
|
||||
ImplData()
|
||||
{
|
||||
Reset();
|
||||
}
|
||||
|
||||
void Reset()
|
||||
{
|
||||
cv::setImpl(0);
|
||||
ipp = icv = ocl = ipp_mt = false;
|
||||
implCode.clear();
|
||||
funName.clear();
|
||||
}
|
||||
|
||||
void GetImpl()
|
||||
{
|
||||
flagsToVars(cv::getImpl(implCode, funName));
|
||||
}
|
||||
|
||||
std::vector<cv::String> GetCallsForImpl(int impl)
|
||||
{
|
||||
std::vector<cv::String> out;
|
||||
|
||||
for(int i = 0; i < (int)implCode.size(); i++)
|
||||
{
|
||||
if(impl == implCode[i])
|
||||
out.push_back(funName[i]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
// Remove duplicate entries
|
||||
void ShapeUp()
|
||||
{
|
||||
std::vector<int> savedCode;
|
||||
std::vector<cv::String> savedName;
|
||||
|
||||
for(int i = 0; i < (int)implCode.size(); i++)
|
||||
{
|
||||
bool match = false;
|
||||
for(int j = 0; j < (int)savedCode.size(); j++)
|
||||
{
|
||||
if(implCode[i] == savedCode[j] && !funName[i].compare(savedName[j]))
|
||||
{
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!match)
|
||||
{
|
||||
savedCode.push_back(implCode[i]);
|
||||
savedName.push_back(funName[i]);
|
||||
}
|
||||
}
|
||||
|
||||
implCode = savedCode;
|
||||
funName = savedName;
|
||||
}
|
||||
|
||||
// convert flags register to more handy variables
|
||||
void flagsToVars(int flags)
|
||||
{
|
||||
#if defined(HAVE_IPP_ICV)
|
||||
ipp = 0;
|
||||
icv = ((flags&CV_IMPL_IPP) > 0);
|
||||
#else
|
||||
ipp = ((flags&CV_IMPL_IPP) > 0);
|
||||
icv = 0;
|
||||
#endif
|
||||
ipp_mt = ((flags&CV_IMPL_MT) > 0);
|
||||
ocl = ((flags&CV_IMPL_OCL) > 0);
|
||||
plain = (flags == 0);
|
||||
}
|
||||
|
||||
} ImplData;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_INSTRUMENTATION
|
||||
class InstumentData
|
||||
{
|
||||
public:
|
||||
static ::cv::String treeToString();
|
||||
static void printTree();
|
||||
};
|
||||
#endif
|
||||
|
||||
class TestBase: public ::testing::Test
|
||||
{
|
||||
public:
|
||||
TestBase();
|
||||
|
||||
static void Init(int argc, const char* const argv[]);
|
||||
static void Init(const std::vector<std::string> & availableImpls,
|
||||
int argc, const char* const argv[]);
|
||||
static void RecordRunParameters();
|
||||
static std::string getDataPath(const std::string& relativePath);
|
||||
static std::string getSelectedImpl();
|
||||
|
||||
static enum PERF_STRATEGY getCurrentModulePerformanceStrategy();
|
||||
static enum PERF_STRATEGY setModulePerformanceStrategy(enum PERF_STRATEGY strategy);
|
||||
|
||||
class PerfSkipTestException: public cvtest::SkipTestException
|
||||
{
|
||||
public:
|
||||
int dummy; // workaround for MacOSX Xcode 7.3 bug (don't make class "empty")
|
||||
PerfSkipTestException() : dummy(0) {}
|
||||
};
|
||||
|
||||
protected:
|
||||
virtual void PerfTestBody() = 0;
|
||||
|
||||
virtual void SetUp() CV_OVERRIDE;
|
||||
virtual void TearDown() CV_OVERRIDE;
|
||||
|
||||
bool startTimer(); // bool is dummy for conditional loop
|
||||
void stopTimer();
|
||||
bool next();
|
||||
|
||||
PERF_STRATEGY getCurrentPerformanceStrategy() const;
|
||||
|
||||
enum WarmUpType
|
||||
{
|
||||
WARMUP_READ,
|
||||
WARMUP_WRITE,
|
||||
WARMUP_RNG,
|
||||
WARMUP_NONE
|
||||
};
|
||||
|
||||
void reportMetrics(bool toJUnitXML = false);
|
||||
static void warmup(cv::InputOutputArray a, WarmUpType wtype = WARMUP_READ);
|
||||
|
||||
performance_metrics& calcMetrics();
|
||||
|
||||
void RunPerfTestBody();
|
||||
|
||||
#ifdef CV_COLLECT_IMPL_DATA
|
||||
ImplData implConf;
|
||||
#endif
|
||||
#ifdef ENABLE_INSTRUMENTATION
|
||||
InstumentData instrConf;
|
||||
#endif
|
||||
|
||||
private:
|
||||
typedef std::vector<std::pair<int, cv::Size> > SizeVector;
|
||||
typedef std::vector<int64> TimeVector;
|
||||
|
||||
SizeVector inputData;
|
||||
SizeVector outputData;
|
||||
unsigned int getTotalInputSize() const;
|
||||
unsigned int getTotalOutputSize() const;
|
||||
|
||||
enum PERF_STRATEGY testStrategy;
|
||||
|
||||
TimeVector times;
|
||||
int64 lastTime;
|
||||
int64 totalTime;
|
||||
int64 timeLimit;
|
||||
static int64 timeLimitDefault;
|
||||
static unsigned int iterationsLimitDefault;
|
||||
|
||||
unsigned int minIters;
|
||||
unsigned int nIters;
|
||||
unsigned int currentIter;
|
||||
unsigned int runsPerIteration;
|
||||
unsigned int perfValidationStage;
|
||||
|
||||
performance_metrics metrics;
|
||||
void validateMetrics();
|
||||
|
||||
static int64 _timeadjustment;
|
||||
static int64 _calibrate();
|
||||
|
||||
static void warmup_impl(cv::Mat m, WarmUpType wtype);
|
||||
static int getSizeInBytes(cv::InputArray a);
|
||||
static cv::Size getSize(cv::InputArray a);
|
||||
static void declareArray(SizeVector& sizes, cv::InputOutputArray a, WarmUpType wtype);
|
||||
|
||||
class _declareHelper
|
||||
{
|
||||
public:
|
||||
_declareHelper& in(cv::InputOutputArray a1, WarmUpType wtype = WARMUP_READ);
|
||||
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype = WARMUP_READ);
|
||||
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype = WARMUP_READ);
|
||||
_declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype = WARMUP_READ);
|
||||
|
||||
_declareHelper& out(cv::InputOutputArray a1, WarmUpType wtype = WARMUP_WRITE);
|
||||
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype = WARMUP_WRITE);
|
||||
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype = WARMUP_WRITE);
|
||||
_declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype = WARMUP_WRITE);
|
||||
|
||||
_declareHelper& iterations(unsigned int n);
|
||||
_declareHelper& time(double timeLimitSecs);
|
||||
_declareHelper& tbb_threads(int n = -1);
|
||||
_declareHelper& runs(unsigned int runsNumber);
|
||||
|
||||
_declareHelper& strategy(enum PERF_STRATEGY s);
|
||||
private:
|
||||
TestBase* test;
|
||||
_declareHelper(TestBase* t);
|
||||
_declareHelper(const _declareHelper&);
|
||||
_declareHelper& operator=(const _declareHelper&);
|
||||
friend class TestBase;
|
||||
};
|
||||
friend class _declareHelper;
|
||||
|
||||
bool verified;
|
||||
|
||||
public:
|
||||
_declareHelper declare;
|
||||
|
||||
void setVerified() { this->verified = true; }
|
||||
};
|
||||
|
||||
template<typename T> class TestBaseWithParam: public TestBase, public ::testing::WithParamInterface<T> {};
|
||||
|
||||
typedef tuple<cv::Size, MatType> Size_MatType_t;
|
||||
typedef TestBaseWithParam<Size_MatType_t> Size_MatType;
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Print functions for googletest *
|
||||
\*****************************************************************************************/
|
||||
void PrintTo(const MatType& t, std::ostream* os);
|
||||
|
||||
} //namespace perf
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
void PrintTo(const String& str, ::std::ostream* os);
|
||||
void PrintTo(const Size& sz, ::std::ostream* os);
|
||||
|
||||
} //namespace cv
|
||||
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Macro definitions for performance tests *
|
||||
\*****************************************************************************************/
|
||||
|
||||
#define CV__PERF_TEST_BODY_IMPL(name) \
|
||||
{ \
|
||||
CV__TEST_NAMESPACE_CHECK \
|
||||
CV__TRACE_APP_FUNCTION_NAME("PERF_TEST: " name); \
|
||||
try { \
|
||||
::cvtest::testSetUp(); \
|
||||
RunPerfTestBody(); \
|
||||
} \
|
||||
catch (cvtest::details::SkipTestExceptionBase& e) \
|
||||
{ \
|
||||
printf("[ SKIP ] %s\n", e.what()); \
|
||||
} \
|
||||
::cvtest::testTearDown(); \
|
||||
}
|
||||
|
||||
#define PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name) \
|
||||
test_case_name##_##test_name##_perf_namespace_proxy
|
||||
|
||||
// Defines a performance test.
|
||||
//
|
||||
// The first parameter is the name of the test case, and the second
|
||||
// parameter is the name of the test within the test case.
|
||||
//
|
||||
// The user should put his test code between braces after using this
|
||||
// macro. Example:
|
||||
//
|
||||
// PERF_TEST(FooTest, InitializesCorrectly) {
|
||||
// Foo foo;
|
||||
// EXPECT_TRUE(foo.StatusIsOK());
|
||||
// }
|
||||
#define PERF_TEST(test_case_name, test_name)\
|
||||
TEST_(test_case_name, test_name, ::perf::TestBase, PerfTestBody, CV__PERF_TEST_BODY_IMPL)
|
||||
|
||||
// Defines a performance test that uses a test fixture.
|
||||
//
|
||||
// The first parameter is the name of the test fixture class, which
|
||||
// also doubles as the test case name. The second parameter is the
|
||||
// name of the test within the test case.
|
||||
//
|
||||
// A test fixture class must be declared earlier. The user should put
|
||||
// his test code between braces after using this macro. Example:
|
||||
//
|
||||
// class FooTest : public ::perf::TestBase {
|
||||
// protected:
|
||||
// virtual void SetUp() { TestBase::SetUp(); b_.AddElement(3); }
|
||||
//
|
||||
// Foo a_;
|
||||
// Foo b_;
|
||||
// };
|
||||
//
|
||||
// PERF_TEST_F(FooTest, InitializesCorrectly) {
|
||||
// EXPECT_TRUE(a_.StatusIsOK());
|
||||
// }
|
||||
//
|
||||
// PERF_TEST_F(FooTest, ReturnsElementCountCorrectly) {
|
||||
// EXPECT_EQ(0, a_.size());
|
||||
// EXPECT_EQ(1, b_.size());
|
||||
// }
|
||||
#define PERF_TEST_F(fixture, testname) \
|
||||
namespace PERF_PROXY_NAMESPACE_NAME_(fixture, testname) {\
|
||||
class TestBase {/*compile error for this class means that you are trying to use perf::TestBase as a fixture*/};\
|
||||
class fixture : public ::fixture {\
|
||||
public:\
|
||||
fixture() {}\
|
||||
protected:\
|
||||
virtual void PerfTestBody();\
|
||||
};\
|
||||
TEST_F(fixture, testname){ CV__PERF_TEST_BODY_IMPL(#fixture "_" #testname); }\
|
||||
}\
|
||||
void PERF_PROXY_NAMESPACE_NAME_(fixture, testname)::fixture::PerfTestBody()
|
||||
|
||||
// Defines a parametrized performance test.
|
||||
//
|
||||
// @Note PERF_TEST_P() below violates behavior of original Google Tests - there is no tests instantiation in original TEST_P()
|
||||
// This macro is intended for usage with separate INSTANTIATE_TEST_CASE_P macro
|
||||
#define PERF_TEST_P_(test_case_name, test_name) CV__TEST_P(test_case_name, test_name, PerfTestBody, CV__PERF_TEST_BODY_IMPL)
|
||||
|
||||
// Defines a parametrized performance test.
|
||||
//
|
||||
// @Note Original TEST_P() macro doesn't instantiate tests with parameters. To keep original usage use PERF_TEST_P_() macro
|
||||
//
|
||||
// The first parameter is the name of the test fixture class, which
|
||||
// also doubles as the test case name. The second parameter is the
|
||||
// name of the test within the test case.
|
||||
//
|
||||
// The user should put his test code between braces after using this
|
||||
// macro. Example:
|
||||
//
|
||||
// typedef ::perf::TestBaseWithParam<cv::Size> FooTest;
|
||||
//
|
||||
// PERF_TEST_P(FooTest, DoTestingRight, ::testing::Values(::perf::szVGA, ::perf::sz720p) {
|
||||
// cv::Mat b(GetParam(), CV_8U, cv::Scalar(10));
|
||||
// cv::Mat a(GetParam(), CV_8U, cv::Scalar(20));
|
||||
// cv::Mat c(GetParam(), CV_8U, cv::Scalar(0));
|
||||
//
|
||||
// declare.in(a, b).out(c).time(0.5);
|
||||
//
|
||||
// TEST_CYCLE() cv::add(a, b, c);
|
||||
//
|
||||
// SANITY_CHECK(c);
|
||||
// }
|
||||
#define PERF_TEST_P(fixture, name, params) \
|
||||
class fixture##_##name : public fixture {\
|
||||
public:\
|
||||
fixture##_##name() {}\
|
||||
protected:\
|
||||
virtual void PerfTestBody();\
|
||||
};\
|
||||
CV__TEST_P(fixture##_##name, name, PerfTestBodyDummy, CV__PERF_TEST_BODY_IMPL){} \
|
||||
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
|
||||
void fixture##_##name::PerfTestBody()
|
||||
|
||||
#ifndef __CV_TEST_EXEC_ARGS
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
|
||||
#define __CV_TEST_EXEC_ARGS(...) \
|
||||
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
|
||||
#else
|
||||
#define __CV_TEST_EXEC_ARGS(...) \
|
||||
__VA_ARGS__;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \
|
||||
CV_TRACE_FUNCTION(); \
|
||||
{ CV_TRACE_REGION("INIT"); \
|
||||
::perf::Regression::Init(#modulename); \
|
||||
::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls), \
|
||||
argc, argv); \
|
||||
::testing::InitGoogleTest(&argc, argv); \
|
||||
::testing::UnitTest::GetInstance()->listeners().Append(new cvtest::SystemInfoCollector); \
|
||||
::testing::Test::RecordProperty("cv_module_name", #modulename); \
|
||||
::perf::TestBase::RecordRunParameters(); \
|
||||
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
|
||||
} \
|
||||
return RUN_ALL_TESTS();
|
||||
|
||||
// impls must be an array, not a pointer; "plain" should always be one of the implementations
|
||||
#define CV_PERF_TEST_MAIN_WITH_IMPLS(modulename, impls, ...) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, __VA_ARGS__)\
|
||||
}
|
||||
|
||||
#define CV_PERF_TEST_MAIN(modulename, ...) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
const char * plain_only[] = { "plain" };\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only, __VA_ARGS__)\
|
||||
}
|
||||
|
||||
//! deprecated
|
||||
#define TEST_CYCLE_N(n) for(declare.iterations(n); next() && startTimer(); stopTimer())
|
||||
//! deprecated
|
||||
#define TEST_CYCLE() for(; next() && startTimer(); stopTimer())
|
||||
//! deprecated
|
||||
#define TEST_CYCLE_MULTIRUN(runsNum) for(declare.runs(runsNum); next() && startTimer(); stopTimer()) for(int r = 0; r < runsNum; ++r)
|
||||
|
||||
#define PERF_SAMPLE_BEGIN() \
|
||||
for(; next() && startTimer(); stopTimer()) \
|
||||
{ \
|
||||
CV_TRACE_REGION("iteration");
|
||||
#define PERF_SAMPLE_END() \
|
||||
}
|
||||
|
||||
namespace perf
|
||||
{
|
||||
namespace comparators
|
||||
{
|
||||
|
||||
template<typename T>
|
||||
struct RectLess_
|
||||
{
|
||||
bool operator()(const cv::Rect_<T>& r1, const cv::Rect_<T>& r2) const
|
||||
{
|
||||
return r1.x < r2.x ||
|
||||
(r1.x == r2.x && r1.y < r2.y) ||
|
||||
(r1.x == r2.x && r1.y == r2.y && r1.width < r2.width) ||
|
||||
(r1.x == r2.x && r1.y == r2.y && r1.width == r2.width && r1.height < r2.height);
|
||||
}
|
||||
};
|
||||
|
||||
typedef RectLess_<int> RectLess;
|
||||
|
||||
struct KeypointGreater
|
||||
{
|
||||
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
|
||||
{
|
||||
if (kp1.response > kp2.response) return true;
|
||||
if (kp1.response < kp2.response) return false;
|
||||
if (kp1.size > kp2.size) return true;
|
||||
if (kp1.size < kp2.size) return false;
|
||||
if (kp1.octave > kp2.octave) return true;
|
||||
if (kp1.octave < kp2.octave) return false;
|
||||
if (kp1.pt.y < kp2.pt.y) return false;
|
||||
if (kp1.pt.y > kp2.pt.y) return true;
|
||||
return kp1.pt.x < kp2.pt.x;
|
||||
}
|
||||
};
|
||||
|
||||
} //namespace comparators
|
||||
|
||||
void sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors);
|
||||
} //namespace perf
|
||||
|
||||
#endif //OPENCV_TS_PERF_HPP
|
Reference in New Issue
Block a user