feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,145 @@
#include "../test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
//////////////////////////Mog2_Update///////////////////////////////////
namespace
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
IMPLEMENT_PARAM_CLASS(DetectShadow, bool)
IMPLEMENT_PARAM_CLASS(UseFloat, bool)
}
PARAM_TEST_CASE(Mog2_Update, UseGray, DetectShadow,UseFloat)
{
bool useGray;
bool detectShadow;
bool useFloat;
virtual void SetUp()
{
useGray = GET_PARAM(0);
detectShadow = GET_PARAM(1);
useFloat = GET_PARAM(2);
}
};
OCL_TEST_P(Mog2_Update, Accuracy)
{
string inputFile = string(TS::ptr()->get_data_path()) + "video/768x576.avi";
VideoCapture cap(inputFile);
if (!cap.isOpened())
throw SkipTestException("Video file can not be opened");
Ptr<BackgroundSubtractorMOG2> mog2_cpu = createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractorMOG2> mog2_ocl = createBackgroundSubtractorMOG2();
mog2_cpu->setDetectShadows(detectShadow);
mog2_ocl->setDetectShadows(detectShadow);
Mat frame, foreground;
UMat u_foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
Mat temp;
cvtColor(frame, temp, COLOR_BGR2GRAY);
swap(temp, frame);
}
if(useFloat)
{
Mat temp;
frame.convertTo(temp,CV_32F);
swap(temp,frame);
}
OCL_OFF(mog2_cpu->apply(frame, foreground));
OCL_ON (mog2_ocl->apply(frame, u_foreground));
if (detectShadow)
EXPECT_MAT_SIMILAR(foreground, u_foreground, 15e-3);
else
EXPECT_MAT_NEAR(foreground, u_foreground, 0);
}
}
//////////////////////////Mog2_getBackgroundImage///////////////////////////////////
PARAM_TEST_CASE(Mog2_getBackgroundImage, DetectShadow, UseFloat)
{
bool detectShadow;
bool useFloat;
virtual void SetUp()
{
detectShadow = GET_PARAM(0);
useFloat = GET_PARAM(1);
}
};
OCL_TEST_P(Mog2_getBackgroundImage, Accuracy)
{
string inputFile = string(TS::ptr()->get_data_path()) + "video/768x576.avi";
VideoCapture cap(inputFile);
if (!cap.isOpened())
throw SkipTestException("Video file can not be opened");
Ptr<BackgroundSubtractorMOG2> mog2_cpu = createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractorMOG2> mog2_ocl = createBackgroundSubtractorMOG2();
mog2_cpu->setDetectShadows(detectShadow);
mog2_ocl->setDetectShadows(detectShadow);
Mat frame, foreground;
UMat u_foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if(useFloat)
{
Mat temp;
frame.convertTo(temp,CV_32F);
swap(temp,frame);
}
OCL_OFF(mog2_cpu->apply(frame, foreground));
OCL_ON (mog2_ocl->apply(frame, u_foreground));
}
Mat background;
OCL_OFF(mog2_cpu->getBackgroundImage(background));
UMat u_background;
OCL_ON (mog2_ocl->getBackgroundImage(u_background));
EXPECT_MAT_NEAR(background, u_background, 1.0);
}
///////////////////////////////////////////////////////////////////////////////////////////
OCL_INSTANTIATE_TEST_CASE_P(OCL_Video, Mog2_Update, Combine(
Values(UseGray(true),UseGray(false)),
Values(DetectShadow(true), DetectShadow(false)),
Values(UseFloat(false),UseFloat(true)))
);
OCL_INSTANTIATE_TEST_CASE_P(OCL_Video, Mog2_getBackgroundImage, Combine(
Values(DetectShadow(true), DetectShadow(false)),
Values(UseFloat(false),UseFloat(true)))
);
}}// namespace opencv_test::ocl
#endif

View File

@ -0,0 +1,85 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test { namespace {
CV_ENUM(DIS_TestPresets, DISOpticalFlow::PRESET_ULTRAFAST, DISOpticalFlow::PRESET_FAST, DISOpticalFlow::PRESET_MEDIUM);
typedef ocl::TSTestWithParam<DIS_TestPresets> OCL_DenseOpticalFlow_DIS;
OCL_TEST_P(OCL_DenseOpticalFlow_DIS, Mat)
{
int preset = (int)GetParam();
Mat frame1, frame2, GT;
frame1 = imread(TS::ptr()->get_data_path() + "optflow/RubberWhale1.png");
frame2 = imread(TS::ptr()->get_data_path() + "optflow/RubberWhale2.png");
CV_Assert(!frame1.empty() && !frame2.empty());
cvtColor(frame1, frame1, COLOR_BGR2GRAY);
cvtColor(frame2, frame2, COLOR_BGR2GRAY);
{
Mat flow;
UMat ocl_flow;
Ptr<DenseOpticalFlow> algo = DISOpticalFlow::create(preset);
OCL_OFF(algo->calc(frame1, frame2, flow));
OCL_ON(algo->calc(frame1, frame2, ocl_flow));
ASSERT_EQ(flow.rows, ocl_flow.rows);
ASSERT_EQ(flow.cols, ocl_flow.cols);
EXPECT_MAT_SIMILAR(flow, ocl_flow, 6e-3);
}
}
OCL_INSTANTIATE_TEST_CASE_P(Video, OCL_DenseOpticalFlow_DIS,
DIS_TestPresets::all());
}} // namespace
#endif // HAVE_OPENCL

View File

@ -0,0 +1,120 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
/////////////////////////////////////////////////////////////////////////////////////////////////
// FarnebackOpticalFlow
namespace
{
IMPLEMENT_PARAM_CLASS(PyrScale, double)
IMPLEMENT_PARAM_CLASS(PolyN, int)
CV_FLAGS(FarnebackOptFlowFlags, 0, OPTFLOW_FARNEBACK_GAUSSIAN)
IMPLEMENT_PARAM_CLASS(UseInitFlow, bool)
}
PARAM_TEST_CASE(FarnebackOpticalFlow, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow)
{
int numLevels;
int winSize;
int numIters;
double pyrScale;
int polyN;
int flags;
bool useInitFlow;
virtual void SetUp()
{
numLevels = 5;
winSize = 13;
numIters = 10;
pyrScale = GET_PARAM(0);
polyN = GET_PARAM(1);
flags = GET_PARAM(2);
useInitFlow = GET_PARAM(3);
}
};
OCL_TEST_P(FarnebackOpticalFlow, Mat)
{
cv::Mat frame0 = readImage("optflow/RubberWhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("optflow/RubberWhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
double polySigma = polyN <= 5 ? 1.1 : 1.5;
cv::Mat flow; cv::UMat uflow;
if (useInitFlow)
{
OCL_ON(cv::calcOpticalFlowFarneback(frame0, frame1, uflow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags));
uflow.copyTo(flow);
flags |= cv::OPTFLOW_USE_INITIAL_FLOW;
}
OCL_OFF(cv::calcOpticalFlowFarneback(frame0, frame1, flow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags));
OCL_ON(cv::calcOpticalFlowFarneback(frame0, frame1, uflow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags));
EXPECT_MAT_SIMILAR(flow, uflow, 0.1);
}
OCL_INSTANTIATE_TEST_CASE_P(Video, FarnebackOpticalFlow,
Combine(
Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),
Values(PolyN(5), PolyN(7)),
Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),
Values(UseInitFlow(false), UseInitFlow(true))
)
);
} } // namespace opencv_test::ocl
#endif // HAVE_OPENCL

View File

@ -0,0 +1,161 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test { namespace ocl {
/////////////////////////////////////////////////////////////////////////////////////////////////
// PyrLKOpticalFlow
PARAM_TEST_CASE(PyrLKOpticalFlow, int, int)
{
Size winSize;
int maxLevel;
TermCriteria criteria;
int flags;
double minEigThreshold;
virtual void SetUp()
{
winSize = Size(GET_PARAM(0), GET_PARAM(0));
maxLevel = GET_PARAM(1);
criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01);
flags = 0;
minEigThreshold = 1e-4f;
}
};
OCL_TEST_P(PyrLKOpticalFlow, Mat)
{
static const int npoints = 1000;
static const float eps = 0.03f;
static const float erreps = 0.1f;
cv::Mat frame0 = readImage("optflow/RubberWhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
UMat umatFrame0; frame0.copyTo(umatFrame0);
cv::Mat frame1 = readImage("optflow/RubberWhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
UMat umatFrame1; frame1.copyTo(umatFrame1);
// SKIP unstable tests
#ifdef __linux__
if (cvtest::skipUnstableTests && ocl::useOpenCL())
{
if (ocl::Device::getDefault().isIntel())
throw cvtest::SkipTestException("Skip unstable test");
}
#endif
std::vector<cv::Point2f> pts;
cv::goodFeaturesToTrack(frame0, pts, npoints, 0.01, 0.0);
std::vector<cv::Point2f> cpuNextPts;
std::vector<unsigned char> cpuStatusCPU;
std::vector<float> cpuErr;
OCL_OFF(cv::calcOpticalFlowPyrLK(frame0, frame1, pts, cpuNextPts, cpuStatusCPU, cpuErr, winSize, maxLevel, criteria, flags, minEigThreshold));
UMat umatNextPts, umatStatus, umatErr;
OCL_ON(cv::calcOpticalFlowPyrLK(umatFrame0, umatFrame1, pts, umatNextPts, umatStatus, umatErr, winSize, maxLevel, criteria, flags, minEigThreshold));
std::vector<cv::Point2f> nextPts; umatNextPts.reshape(2, 1).copyTo(nextPts);
std::vector<unsigned char> status; umatStatus.reshape(1, 1).copyTo(status);
std::vector<float> err; umatErr.reshape(1, 1).copyTo(err);
ASSERT_EQ(cpuNextPts.size(), nextPts.size());
ASSERT_EQ(cpuStatusCPU.size(), status.size());
size_t mistmatch = 0;
size_t errmatch = 0;
for (size_t i = 0; i < nextPts.size(); ++i)
{
if (status[i] != cpuStatusCPU[i])
{
++mistmatch;
continue;
}
if (status[i])
{
cv::Point2i a = nextPts[i];
cv::Point2i b = cpuNextPts[i];
bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
float errdiff = 0.0f;
if (!eq || errdiff > 1e-1)
{
++mistmatch;
continue;
}
eq = std::abs(cpuErr[i] - err[i]) <= (0.01 * std::max(1.0f, cpuErr[i]));
if(!eq)
++errmatch;
}
}
double bad_ratio = static_cast<double>(mistmatch) / (nextPts.size());
double err_ratio = static_cast<double>(errmatch) / (nextPts.size());
ASSERT_LE(bad_ratio, eps);
ASSERT_LE(err_ratio, erreps);
}
OCL_INSTANTIATE_TEST_CASE_P(Video, PyrLKOpticalFlow,
Combine(
Values(11, 15, 21, 25),
Values(3, 5)
)
);
}} // namespace
#endif // HAVE_OPENCL

View File

@ -0,0 +1,175 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
static string getDataDir() { return TS::ptr()->get_data_path(); }
static string getRubberWhaleFrame1() { return getDataDir() + "optflow/RubberWhale1.png"; }
static string getRubberWhaleFrame2() { return getDataDir() + "optflow/RubberWhale2.png"; }
static string getRubberWhaleGroundTruth() { return getDataDir() + "optflow/RubberWhale.flo"; }
static bool isFlowCorrect(float u) { return !cvIsNaN(u) && (fabs(u) < 1e9); }
static float calcRMSE(Mat flow1, Mat flow2)
{
float sum = 0;
int counter = 0;
const int rows = flow1.rows;
const int cols = flow1.cols;
for (int y = 0; y < rows; ++y)
{
for (int x = 0; x < cols; ++x)
{
Vec2f flow1_at_point = flow1.at<Vec2f>(y, x);
Vec2f flow2_at_point = flow2.at<Vec2f>(y, x);
float u1 = flow1_at_point[0];
float v1 = flow1_at_point[1];
float u2 = flow2_at_point[0];
float v2 = flow2_at_point[1];
if (isFlowCorrect(u1) && isFlowCorrect(u2) && isFlowCorrect(v1) && isFlowCorrect(v2))
{
sum += (u1 - u2) * (u1 - u2) + (v1 - v2) * (v1 - v2);
counter++;
}
}
}
return (float)sqrt(sum / (1e-9 + counter));
}
bool readRubberWhale(Mat &dst_frame_1, Mat &dst_frame_2, Mat &dst_GT)
{
const string frame1_path = getRubberWhaleFrame1();
const string frame2_path = getRubberWhaleFrame2();
const string gt_flow_path = getRubberWhaleGroundTruth();
dst_frame_1 = imread(frame1_path);
dst_frame_2 = imread(frame2_path);
dst_GT = readOpticalFlow(gt_flow_path);
if (dst_frame_1.empty() || dst_frame_2.empty() || dst_GT.empty())
return false;
else
return true;
}
TEST(DenseOpticalFlow_DIS, ReferenceAccuracy)
{
Mat frame1, frame2, GT;
ASSERT_TRUE(readRubberWhale(frame1, frame2, GT));
int presets[] = {DISOpticalFlow::PRESET_ULTRAFAST, DISOpticalFlow::PRESET_FAST, DISOpticalFlow::PRESET_MEDIUM};
float target_RMSE[] = {0.86f, 0.74f, 0.49f};
cvtColor(frame1, frame1, COLOR_BGR2GRAY);
cvtColor(frame2, frame2, COLOR_BGR2GRAY);
Ptr<DenseOpticalFlow> algo;
// iterate over presets:
for (int i = 0; i < 3; i++)
{
Mat flow;
algo = DISOpticalFlow::create(presets[i]);
algo->calc(frame1, frame2, flow);
ASSERT_EQ(GT.rows, flow.rows);
ASSERT_EQ(GT.cols, flow.cols);
EXPECT_LE(calcRMSE(GT, flow), target_RMSE[i]);
}
}
TEST(DenseOpticalFlow_DIS, InvalidImgSize_CoarsestLevelLessThanZero)
{
cv::Ptr<cv::DISOpticalFlow> of = cv::DISOpticalFlow::create();
const int mat_size = 10;
cv::Mat x(mat_size, mat_size, CV_8UC1, 42);
cv::Mat y(mat_size, mat_size, CV_8UC1, 42);
cv::Mat flow;
ASSERT_THROW(of->calc(x, y, flow), cv::Exception);
}
// make sure that autoSelectPatchSizeAndScales() works properly.
TEST(DenseOpticalFlow_DIS, InvalidImgSize_CoarsestLevelLessThanFinestLevel)
{
cv::Ptr<cv::DISOpticalFlow> of = cv::DISOpticalFlow::create();
const int mat_size = 80;
cv::Mat x(mat_size, mat_size, CV_8UC1, 42);
cv::Mat y(mat_size, mat_size, CV_8UC1, 42);
cv::Mat flow;
of->calc(x, y, flow);
ASSERT_EQ(flow.rows, mat_size);
ASSERT_EQ(flow.cols, mat_size);
}
TEST(DenseOpticalFlow_VariationalRefinement, ReferenceAccuracy)
{
Mat frame1, frame2, GT;
ASSERT_TRUE(readRubberWhale(frame1, frame2, GT));
float target_RMSE = 0.86f;
cvtColor(frame1, frame1, COLOR_BGR2GRAY);
cvtColor(frame2, frame2, COLOR_BGR2GRAY);
Ptr<VariationalRefinement> var_ref;
var_ref = VariationalRefinement::create();
var_ref->setAlpha(20.0f);
var_ref->setDelta(5.0f);
var_ref->setGamma(10.0f);
var_ref->setSorIterations(25);
var_ref->setFixedPointIterations(25);
Mat flow(frame1.size(), CV_32FC2);
flow.setTo(0.0f);
var_ref->calc(frame1, frame2, flow);
ASSERT_EQ(GT.rows, flow.rows);
ASSERT_EQ(GT.cols, flow.cols);
EXPECT_LE(calcRMSE(GT, flow), target_RMSE);
}
}} // namespace

View File

@ -0,0 +1,160 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
typedef tuple<Size> OFParams;
typedef TestWithParam<OFParams> DenseOpticalFlow_DIS;
typedef TestWithParam<OFParams> DenseOpticalFlow_VariationalRefinement;
TEST_P(DenseOpticalFlow_DIS, MultithreadReproducibility)
{
double MAX_DIF = 0.01;
double MAX_MEAN_DIF = 0.001;
int loopsCount = 2;
RNG rng(0);
OFParams params = GetParam();
Size size = get<0>(params);
int nThreads = cv::getNumThreads();
if (nThreads == 1)
throw SkipTestException("Single thread environment");
for (int iter = 0; iter <= loopsCount; iter++)
{
Mat frame1(size, CV_8U);
randu(frame1, 0, 255);
Mat frame2(size, CV_8U);
randu(frame2, 0, 255);
Ptr<DISOpticalFlow> algo = DISOpticalFlow::create();
int psz = rng.uniform(4, 16);
int pstr = rng.uniform(1, psz - 1);
int grad_iter = rng.uniform(1, 64);
int var_iter = rng.uniform(0, 10);
bool use_mean_normalization = !!rng.uniform(0, 2);
bool use_spatial_propagation = !!rng.uniform(0, 2);
algo->setFinestScale(0);
algo->setPatchSize(psz);
algo->setPatchStride(pstr);
algo->setGradientDescentIterations(grad_iter);
algo->setVariationalRefinementIterations(var_iter);
algo->setUseMeanNormalization(use_mean_normalization);
algo->setUseSpatialPropagation(use_spatial_propagation);
cv::setNumThreads(nThreads);
Mat resMultiThread;
algo->calc(frame1, frame2, resMultiThread);
cv::setNumThreads(1);
Mat resSingleThread;
algo->calc(frame1, frame2, resSingleThread);
EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_INF), MAX_DIF);
EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_L1), MAX_MEAN_DIF * frame1.total());
// resulting flow should be within the frame bounds:
double min_val, max_val;
minMaxLoc(resMultiThread, &min_val, &max_val);
EXPECT_LE(abs(min_val), sqrt( static_cast<double>(size.height * size.height + size.width * size.width)) );
EXPECT_LE(abs(max_val), sqrt( static_cast<double>(size.height * size.height + size.width * size.width)) );
}
}
INSTANTIATE_TEST_CASE_P(FullSet, DenseOpticalFlow_DIS, Values(szODD, szQVGA));
TEST_P(DenseOpticalFlow_VariationalRefinement, MultithreadReproducibility)
{
double MAX_DIF = 0.01;
double MAX_MEAN_DIF = 0.001;
float input_flow_rad = 5.0;
int loopsCount = 2;
RNG rng(0);
OFParams params = GetParam();
Size size = get<0>(params);
int nThreads = cv::getNumThreads();
if (nThreads == 1)
throw SkipTestException("Single thread environment");
for (int iter = 0; iter <= loopsCount; iter++)
{
Mat frame1(size, CV_8U);
randu(frame1, 0, 255);
Mat frame2(size, CV_8U);
randu(frame2, 0, 255);
Mat flow(size, CV_32FC2);
randu(flow, -input_flow_rad, input_flow_rad);
Ptr<VariationalRefinement> var = VariationalRefinement::create();
var->setAlpha(rng.uniform(1.0f, 100.0f));
var->setGamma(rng.uniform(0.1f, 10.0f));
var->setDelta(rng.uniform(0.1f, 10.0f));
var->setSorIterations(rng.uniform(1, 20));
var->setFixedPointIterations(rng.uniform(1, 20));
var->setOmega(rng.uniform(1.01f, 1.99f));
cv::setNumThreads(nThreads);
Mat resMultiThread;
flow.copyTo(resMultiThread);
var->calc(frame1, frame2, resMultiThread);
cv::setNumThreads(1);
Mat resSingleThread;
flow.copyTo(resSingleThread);
var->calc(frame1, frame2, resSingleThread);
EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_INF), MAX_DIF);
EXPECT_LE(cv::norm(resSingleThread, resMultiThread, NORM_L1), MAX_MEAN_DIF * frame1.total());
// resulting flow should be within the frame bounds:
double min_val, max_val;
minMaxLoc(resMultiThread, &min_val, &max_val);
EXPECT_LE(abs(min_val), sqrt( static_cast<double>(size.height * size.height + size.width * size.width)) );
EXPECT_LE(abs(max_val), sqrt( static_cast<double>(size.height * size.height + size.width * size.width)) );
}
}
INSTANTIATE_TEST_CASE_P(FullSet, DenseOpticalFlow_VariationalRefinement, Values(szODD, szQVGA));
}} // namespace

View File

@ -0,0 +1,249 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
namespace opencv_test { namespace {
class CV_AccumBaseTest : public cvtest::ArrayTest
{
public:
CV_AccumBaseTest();
protected:
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
double get_success_error_level( int test_case_idx, int i, int j );
double alpha;
};
CV_AccumBaseTest::CV_AccumBaseTest()
{
test_array[INPUT].push_back(NULL);
test_array[INPUT_OUTPUT].push_back(NULL);
test_array[REF_INPUT_OUTPUT].push_back(NULL);
test_array[MASK].push_back(NULL);
optional_mask = true;
element_wise_relative_error = false;
} // ctor
void CV_AccumBaseTest::get_test_array_types_and_sizes( int test_case_idx,
vector<vector<Size> >& sizes, vector<vector<int> >& types )
{
RNG& rng = ts->get_rng();
int depth = cvtest::randInt(rng) % 4, cn = cvtest::randInt(rng) & 1 ? 3 : 1;
int accdepth = (int)(cvtest::randInt(rng) % 2 + 1);
int i, input_count = (int)test_array[INPUT].size();
cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
depth = depth == 0 ? CV_8U : depth == 1 ? CV_16U : depth == 2 ? CV_32F : CV_64F;
accdepth = accdepth == 1 ? CV_32F : CV_64F;
accdepth = MAX(accdepth, depth);
for( i = 0; i < input_count; i++ )
types[INPUT][i] = CV_MAKETYPE(depth,cn);
types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(accdepth,cn);
alpha = cvtest::randReal(rng);
}
double CV_AccumBaseTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
return test_mat[INPUT_OUTPUT][0].depth() < CV_64F ||
test_mat[INPUT][0].depth() == CV_32F ? FLT_EPSILON*100 : DBL_EPSILON*1000;
}
/// acc
class CV_AccTest : public CV_AccumBaseTest
{
public:
CV_AccTest() { }
protected:
void run_func();
void prepare_to_validation( int );
};
void CV_AccTest::run_func(void)
{
cvAcc( test_array[INPUT][0], test_array[INPUT_OUTPUT][0], test_array[MASK][0] );
}
void CV_AccTest::prepare_to_validation( int )
{
const Mat& src = test_mat[INPUT][0];
Mat& dst = test_mat[REF_INPUT_OUTPUT][0];
const Mat& mask = test_array[MASK][0] ? test_mat[MASK][0] : Mat();
Mat temp;
cvtest::add( src, 1, dst, 1, cvScalarAll(0.), temp, dst.type() );
cvtest::copy( temp, dst, mask );
}
/// square acc
class CV_SquareAccTest : public CV_AccumBaseTest
{
public:
CV_SquareAccTest();
protected:
void run_func();
void prepare_to_validation( int );
};
CV_SquareAccTest::CV_SquareAccTest()
{
}
void CV_SquareAccTest::run_func()
{
cvSquareAcc( test_array[INPUT][0], test_array[INPUT_OUTPUT][0], test_array[MASK][0] );
}
void CV_SquareAccTest::prepare_to_validation( int )
{
const Mat& src = test_mat[INPUT][0];
Mat& dst = test_mat[REF_INPUT_OUTPUT][0];
const Mat& mask = test_array[MASK][0] ? test_mat[MASK][0] : Mat();
Mat temp;
cvtest::convert( src, temp, dst.type() );
cvtest::multiply( temp, temp, temp, 1 );
cvtest::add( temp, 1, dst, 1, cvScalarAll(0.), temp, dst.depth() );
cvtest::copy( temp, dst, mask );
}
/// multiply acc
class CV_MultiplyAccTest : public CV_AccumBaseTest
{
public:
CV_MultiplyAccTest();
protected:
void run_func();
void prepare_to_validation( int );
};
CV_MultiplyAccTest::CV_MultiplyAccTest()
{
test_array[INPUT].push_back(NULL);
}
void CV_MultiplyAccTest::run_func()
{
cvMultiplyAcc( test_array[INPUT][0], test_array[INPUT][1],
test_array[INPUT_OUTPUT][0], test_array[MASK][0] );
}
void CV_MultiplyAccTest::prepare_to_validation( int )
{
const Mat& src1 = test_mat[INPUT][0];
const Mat& src2 = test_mat[INPUT][1];
Mat& dst = test_mat[REF_INPUT_OUTPUT][0];
const Mat& mask = test_array[MASK][0] ? test_mat[MASK][0] : Mat();
Mat temp1, temp2;
cvtest::convert( src1, temp1, dst.type() );
cvtest::convert( src2, temp2, dst.type() );
cvtest::multiply( temp1, temp2, temp1, 1 );
cvtest::add( temp1, 1, dst, 1, cvScalarAll(0.), temp1, dst.depth() );
cvtest::copy( temp1, dst, mask );
}
/// running average
class CV_RunningAvgTest : public CV_AccumBaseTest
{
public:
CV_RunningAvgTest();
protected:
void run_func();
void prepare_to_validation( int );
};
CV_RunningAvgTest::CV_RunningAvgTest()
{
}
void CV_RunningAvgTest::run_func()
{
cvRunningAvg( test_array[INPUT][0], test_array[INPUT_OUTPUT][0],
alpha, test_array[MASK][0] );
}
void CV_RunningAvgTest::prepare_to_validation( int )
{
const Mat& src = test_mat[INPUT][0];
Mat& dst = test_mat[REF_INPUT_OUTPUT][0];
Mat temp;
const Mat& mask = test_array[MASK][0] ? test_mat[MASK][0] : Mat();
double a[1], b[1];
int accdepth = test_mat[INPUT_OUTPUT][0].depth();
CvMat A = cvMat(1,1,accdepth,a), B = cvMat(1,1,accdepth,b);
cvSetReal1D( &A, 0, alpha);
cvSetReal1D( &B, 0, 1 - cvGetReal1D(&A, 0));
cvtest::convert( src, temp, dst.type() );
cvtest::add( src, cvGetReal1D(&A, 0), dst, cvGetReal1D(&B, 0), cvScalarAll(0.), temp, temp.depth() );
cvtest::copy( temp, dst, mask );
}
TEST(Video_Acc, accuracy) { CV_AccTest test; test.safe_run(); }
TEST(Video_AccSquared, accuracy) { CV_SquareAccTest test; test.safe_run(); }
TEST(Video_AccProduct, accuracy) { CV_MultiplyAccTest test; test.safe_run(); }
TEST(Video_RunningAvg, accuracy) { CV_RunningAvgTest test; test.safe_run(); }
}} // namespace

View File

@ -0,0 +1,466 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/video/tracking.hpp"
namespace opencv_test { namespace {
using namespace cv;
class CV_TrackBaseTest : public cvtest::BaseTest
{
public:
CV_TrackBaseTest();
virtual ~CV_TrackBaseTest();
void clear();
protected:
int read_params( const cv::FileStorage& fs );
void run_func(void);
int prepare_test_case( int test_case_idx );
int validate_test_results( int test_case_idx );
void generate_object();
int min_log_size, max_log_size;
Mat img;
RotatedRect box0;
Size img_size;
TermCriteria criteria;
int img_type;
};
CV_TrackBaseTest::CV_TrackBaseTest()
{
img = 0;
test_case_count = 100;
min_log_size = 5;
max_log_size = 8;
}
CV_TrackBaseTest::~CV_TrackBaseTest()
{
clear();
}
void CV_TrackBaseTest::clear()
{
img.release();
cvtest::BaseTest::clear();
}
int CV_TrackBaseTest::read_params( const cv::FileStorage& fs )
{
int code = cvtest::BaseTest::read_params( fs );
if( code < 0 )
return code;
read( find_param( fs, "test_case_count" ), test_case_count, test_case_count );
read( find_param( fs, "min_log_size" ), min_log_size, min_log_size );
read( find_param( fs, "max_log_size" ), max_log_size, max_log_size );
min_log_size = cvtest::clipInt( min_log_size, 1, 10 );
max_log_size = cvtest::clipInt( max_log_size, 1, 10 );
if( min_log_size > max_log_size )
{
std::swap( min_log_size, max_log_size );
}
return 0;
}
void CV_TrackBaseTest::generate_object()
{
int x, y;
double cx = box0.center.x;
double cy = box0.center.y;
double width = box0.size.width*0.5;
double height = box0.size.height*0.5;
double angle = box0.angle*CV_PI/180.;
double a = sin(angle), b = -cos(angle);
double inv_ww = 1./(width*width), inv_hh = 1./(height*height);
img = Mat::zeros( img_size.height, img_size.width, img_type );
// use the straightforward algorithm: for every pixel check if it is inside the ellipse
for( y = 0; y < img_size.height; y++ )
{
uchar* ptr = img.ptr(y);
float* fl = (float*)ptr;
double x_ = (y - cy)*b, y_ = (y - cy)*a;
for( x = 0; x < img_size.width; x++ )
{
double x1 = (x - cx)*a - x_;
double y1 = (x - cx)*b + y_;
if( x1*x1*inv_hh + y1*y1*inv_ww <= 1. )
{
if( img_type == CV_8U )
ptr[x] = (uchar)1;
else
fl[x] = (float)1.f;
}
}
}
}
int CV_TrackBaseTest::prepare_test_case( int test_case_idx )
{
RNG& rng = ts->get_rng();
cvtest::BaseTest::prepare_test_case( test_case_idx );
float m;
clear();
box0.size.width = (float)exp((cvtest::randReal(rng) * (max_log_size - min_log_size) + min_log_size)*CV_LOG2);
box0.size.height = (float)exp((cvtest::randReal(rng) * (max_log_size - min_log_size) + min_log_size)*CV_LOG2);
box0.angle = (float)(cvtest::randReal(rng)*180.);
if( box0.size.width > box0.size.height )
{
std::swap( box0.size.width, box0.size.height );
}
m = MAX( box0.size.width, box0.size.height );
img_size.width = cvRound(cvtest::randReal(rng)*m*0.5 + m + 1);
img_size.height = cvRound(cvtest::randReal(rng)*m*0.5 + m + 1);
img_type = cvtest::randInt(rng) % 2 ? CV_32F : CV_8U;
img_type = CV_8U;
box0.center.x = (float)(img_size.width*0.5 + (cvtest::randReal(rng)-0.5)*(img_size.width - m));
box0.center.y = (float)(img_size.height*0.5 + (cvtest::randReal(rng)-0.5)*(img_size.height - m));
criteria = TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER, 10, 0.1 );
generate_object();
return 1;
}
void CV_TrackBaseTest::run_func(void)
{
}
int CV_TrackBaseTest::validate_test_results( int /*test_case_idx*/ )
{
return 0;
}
///////////////////////// CamShift //////////////////////////////
class CV_CamShiftTest : public CV_TrackBaseTest
{
public:
CV_CamShiftTest();
protected:
void run_func(void);
int prepare_test_case( int test_case_idx );
int validate_test_results( int test_case_idx );
void generate_object();
RotatedRect box;
Rect init_rect;
int area0;
};
CV_CamShiftTest::CV_CamShiftTest()
{
}
int CV_CamShiftTest::prepare_test_case( int test_case_idx )
{
RNG& rng = ts->get_rng();
double m;
int code = CV_TrackBaseTest::prepare_test_case( test_case_idx );
int i, area;
if( code <= 0 )
return code;
area0 = countNonZero(img);
for(i = 0; i < 100; i++)
{
m = MAX(box0.size.width,box0.size.height)*0.8;
init_rect.x = cvFloor(box0.center.x - m*(0.45 + cvtest::randReal(rng)*0.2));
init_rect.y = cvFloor(box0.center.y - m*(0.45 + cvtest::randReal(rng)*0.2));
init_rect.width = cvCeil(box0.center.x + m*(0.45 + cvtest::randReal(rng)*0.2) - init_rect.x);
init_rect.height = cvCeil(box0.center.y + m*(0.45 + cvtest::randReal(rng)*0.2) - init_rect.y);
if( init_rect.x < 0 || init_rect.y < 0 ||
init_rect.x + init_rect.width >= img_size.width ||
init_rect.y + init_rect.height >= img_size.height )
continue;
Mat temp = img(init_rect);
area = countNonZero( temp );
if( area >= 0.1*area0 )
break;
}
return i < 100 ? code : 0;
}
void CV_CamShiftTest::run_func(void)
{
box = CamShift( img, init_rect, criteria );
}
int CV_CamShiftTest::validate_test_results( int /*test_case_idx*/ )
{
int code = cvtest::TS::OK;
double m = MAX(box0.size.width, box0.size.height);
double diff_angle;
if( cvIsNaN(box.size.width) || cvIsInf(box.size.width) || box.size.width <= 0 ||
cvIsNaN(box.size.height) || cvIsInf(box.size.height) || box.size.height <= 0 ||
cvIsNaN(box.center.x) || cvIsInf(box.center.x) ||
cvIsNaN(box.center.y) || cvIsInf(box.center.y) ||
cvIsNaN(box.angle) || cvIsInf(box.angle) || box.angle < -180 || box.angle > 180 )
{
ts->printf( cvtest::TS::LOG, "Invalid CvBox2D or CvConnectedComp was returned by cvCamShift\n" );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
goto _exit_;
}
box.angle = (float)(180 - box.angle);
if( fabs(box.size.width - box0.size.width) > box0.size.width*0.2 ||
fabs(box.size.height - box0.size.height) > box0.size.height*0.3 )
{
ts->printf( cvtest::TS::LOG, "Incorrect CvBox2D size (=%.1f x %.1f, should be %.1f x %.1f)\n",
box.size.width, box.size.height, box0.size.width, box0.size.height );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
if( fabs(box.center.x - box0.center.x) > m*0.1 ||
fabs(box.center.y - box0.center.y) > m*0.1 )
{
ts->printf( cvtest::TS::LOG, "Incorrect CvBox2D position (=(%.1f, %.1f), should be (%.1f, %.1f))\n",
box.center.x, box.center.y, box0.center.x, box0.center.y );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
if( box.angle < 0 )
box.angle += 180;
diff_angle = fabs(box0.angle - box.angle);
diff_angle = MIN( diff_angle, fabs(box0.angle - box.angle + 180));
if( fabs(diff_angle) > 30 && box0.size.height > box0.size.width*1.2 )
{
ts->printf( cvtest::TS::LOG, "Incorrect CvBox2D angle (=%1.f, should be %1.f)\n",
box.angle, box0.angle );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
_exit_:
if( code < 0 )
{
#if 0 //defined _DEBUG && defined _WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
cvCvtColor( img, dst, CV_GRAY2BGR );
cvRectangle( dst, cvPoint(init_rect.x, init_rect.y),
cvPoint(init_rect.x + init_rect.width, init_rect.y + init_rect.height),
CV_RGB(255,0,0), 3, 8, 0 );
cvEllipseBox( dst, box, CV_RGB(0,255,0), 3, 8, 0 );
cvShowImage( "test", dst );
cvReleaseImage( &dst );
cvWaitKey();
#endif
ts->set_failed_test_info( code );
}
return code;
}
///////////////////////// MeanShift //////////////////////////////
class CV_MeanShiftTest : public CV_TrackBaseTest
{
public:
CV_MeanShiftTest();
protected:
void run_func(void);
int prepare_test_case( int test_case_idx );
int validate_test_results( int test_case_idx );
void generate_object();
Rect init_rect, rect;
int area0, area;
};
CV_MeanShiftTest::CV_MeanShiftTest()
{
}
int CV_MeanShiftTest::prepare_test_case( int test_case_idx )
{
RNG& rng = ts->get_rng();
double m;
int code = CV_TrackBaseTest::prepare_test_case( test_case_idx );
int i;
if( code <= 0 )
return code;
area0 = countNonZero(img);
for(i = 0; i < 100; i++)
{
m = (box0.size.width + box0.size.height)*0.5;
init_rect.x = cvFloor(box0.center.x - m*(0.4 + cvtest::randReal(rng)*0.2));
init_rect.y = cvFloor(box0.center.y - m*(0.4 + cvtest::randReal(rng)*0.2));
init_rect.width = cvCeil(box0.center.x + m*(0.4 + cvtest::randReal(rng)*0.2) - init_rect.x);
init_rect.height = cvCeil(box0.center.y + m*(0.4 + cvtest::randReal(rng)*0.2) - init_rect.y);
if( init_rect.x < 0 || init_rect.y < 0 ||
init_rect.x + init_rect.width >= img_size.width ||
init_rect.y + init_rect.height >= img_size.height )
continue;
Mat temp = img(init_rect);
area = countNonZero( temp );
if( area >= 0.5*area0 )
break;
}
return i < 100 ? code : 0;
}
void CV_MeanShiftTest::run_func(void)
{
rect = init_rect;
meanShift( img, rect, criteria );
}
int CV_MeanShiftTest::validate_test_results( int /*test_case_idx*/ )
{
int code = cvtest::TS::OK;
Point2f c;
double m = MAX(box0.size.width, box0.size.height), delta;
c.x = (float)(rect.x + rect.width*0.5);
c.y = (float)(rect.y + rect.height*0.5);
if( fabs(c.x - box0.center.x) > m*0.1 ||
fabs(c.y - box0.center.y) > m*0.1 )
{
ts->printf( cvtest::TS::LOG, "Incorrect CvBox2D position (=(%.1f, %.1f), should be (%.1f, %.1f))\n",
c.x, c.y, box0.center.x, box0.center.y );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
delta = m*0.7;
if( rect.x < box0.center.x - delta ||
rect.y < box0.center.y - delta ||
rect.x + rect.width > box0.center.x + delta ||
rect.y + rect.height > box0.center.y + delta )
{
ts->printf( cvtest::TS::LOG,
"Incorrect CvConnectedComp ((%d,%d,%d,%d) is not within (%.1f,%.1f,%.1f,%.1f))\n",
rect.x, rect.y, rect.x + rect.width, rect.y + rect.height,
box0.center.x - delta, box0.center.y - delta, box0.center.x + delta, box0.center.y + delta );
code = cvtest::TS::FAIL_BAD_ACCURACY;
}
_exit_:
if( code < 0 )
{
#if 0// defined _DEBUG && defined _WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
cvCvtColor( img, dst, CV_GRAY2BGR );
cvRectangle( dst, cvPoint(init_rect.x, init_rect.y),
cvPoint(init_rect.x + init_rect.width, init_rect.y + init_rect.height),
CV_RGB(255,0,0), 3, 8, 0 );
cvRectangle( dst, cvPoint(comp.rect.x, comp.rect.y),
cvPoint(comp.rect.x + comp.rect.width, comp.rect.y + comp.rect.height),
CV_RGB(0,255,0), 3, 8, 0 );
cvShowImage( "test", dst );
cvReleaseImage( &dst );
cvWaitKey();
#endif
ts->set_failed_test_info( code );
}
return code;
}
TEST(Video_CAMShift, accuracy) { CV_CamShiftTest test; test.safe_run(); }
TEST(Video_MeanShift, accuracy) { CV_MeanShiftTest test; test.safe_run(); }
}} // namespace
/* End of file. */

View File

@ -0,0 +1,522 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
class CV_ECC_BaseTest : public cvtest::BaseTest
{
public:
CV_ECC_BaseTest();
protected:
double computeRMS(const Mat& mat1, const Mat& mat2);
bool isMapCorrect(const Mat& mat);
double MAX_RMS_ECC;//upper bound for RMS error
int ntests;//number of tests per motion type
int ECC_iterations;//number of iterations for ECC
double ECC_epsilon; //we choose a negative value, so that
// ECC_iterations are always executed
};
CV_ECC_BaseTest::CV_ECC_BaseTest()
{
MAX_RMS_ECC=0.1;
ntests = 3;
ECC_iterations = 50;
ECC_epsilon = -1; //-> negative value means that ECC_Iterations will be executed
}
bool CV_ECC_BaseTest::isMapCorrect(const Mat& map)
{
bool tr = true;
float mapVal;
for(int i =0; i<map.rows; i++)
for(int j=0; j<map.cols; j++){
mapVal = map.at<float>(i, j);
tr = tr & (!cvIsNaN(mapVal) && (fabs(mapVal) < 1e9));
}
return tr;
}
double CV_ECC_BaseTest::computeRMS(const Mat& mat1, const Mat& mat2){
CV_Assert(mat1.rows == mat2.rows);
CV_Assert(mat1.cols == mat2.cols);
Mat errorMat;
subtract(mat1, mat2, errorMat);
return sqrt(errorMat.dot(errorMat)/(mat1.rows*mat1.cols));
}
class CV_ECC_Test_Translation : public CV_ECC_BaseTest
{
public:
CV_ECC_Test_Translation();
protected:
void run(int);
bool testTranslation(int);
};
CV_ECC_Test_Translation::CV_ECC_Test_Translation(){}
bool CV_ECC_Test_Translation::testTranslation(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng();
int progress=0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat translationGround = (Mat_<float>(2,3) << 1, 0, (rng.uniform(10.f, 20.f)),
0, 1, (rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, translationGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapTranslation = (Mat_<float>(2,3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapTranslation, 0,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapTranslation)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapTranslation, translationGround));
return false;
}
}
return true;
}
void CV_ECC_Test_Translation::run(int from)
{
if (!testTranslation(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Euclidean : public CV_ECC_BaseTest
{
public:
CV_ECC_Test_Euclidean();
protected:
void run(int);
bool testEuclidean(int);
};
CV_ECC_Test_Euclidean::CV_ECC_Test_Euclidean() { }
bool CV_ECC_Test_Euclidean::testEuclidean(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng();
int progress = 0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
double angle = CV_PI/30 + CV_PI*rng.uniform((double)-2.f, (double)2.f)/180;
Mat euclideanGround = (Mat_<float>(2,3) << cos(angle), -sin(angle), (rng.uniform(10.f, 20.f)),
sin(angle), cos(angle), (rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, euclideanGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapEuclidean = (Mat_<float>(2,3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapEuclidean, 1,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapEuclidean)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapEuclidean, euclideanGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapEuclidean, euclideanGround));
return false;
}
}
return true;
}
void CV_ECC_Test_Euclidean::run(int from)
{
if (!testEuclidean(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Affine : public CV_ECC_BaseTest
{
public:
CV_ECC_Test_Affine();
protected:
void run(int);
bool testAffine(int);
};
CV_ECC_Test_Affine::CV_ECC_Test_Affine(){}
bool CV_ECC_Test_Affine::testAffine(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng();
int progress = 0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat affineGround = (Mat_<float>(2,3) << (1-rng.uniform(-0.05f, 0.05f)),
(rng.uniform(-0.03f, 0.03f)), (rng.uniform(10.f, 20.f)),
(rng.uniform(-0.03f, 0.03f)), (1-rng.uniform(-0.05f, 0.05f)),
(rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, affineGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapAffine = (Mat_<float>(2,3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapAffine, 2,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapAffine)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapAffine, affineGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapAffine, affineGround));
return false;
}
}
return true;
}
void CV_ECC_Test_Affine::run(int from)
{
if (!testAffine(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Homography : public CV_ECC_BaseTest
{
public:
CV_ECC_Test_Homography();
protected:
void run(int);
bool testHomography(int);
};
CV_ECC_Test_Homography::CV_ECC_Test_Homography(){}
bool CV_ECC_Test_Homography::testHomography(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng();
int progress = 0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat homoGround = (Mat_<float>(3,3) << (1-rng.uniform(-0.05f, 0.05f)),
(rng.uniform(-0.03f, 0.03f)), (rng.uniform(10.f, 20.f)),
(rng.uniform(-0.03f, 0.03f)), (1-rng.uniform(-0.05f, 0.05f)),(rng.uniform(10.f, 20.f)),
(rng.uniform(0.0001f, 0.0003f)), (rng.uniform(0.0001f, 0.0003f)), 1.f);
Mat warpedImage;
warpPerspective(testImg, warpedImage, homoGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapHomography = Mat::eye(3, 3, CV_32F);
findTransformECC(warpedImage, testImg, mapHomography, 3,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapHomography)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapHomography, homoGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapHomography, homoGround));
return false;
}
}
return true;
}
void CV_ECC_Test_Homography::run(int from)
{
if (!testHomography(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Mask : public CV_ECC_BaseTest
{
public:
CV_ECC_Test_Mask();
protected:
void run(int);
bool testMask(int);
};
CV_ECC_Test_Mask::CV_ECC_Test_Mask(){}
bool CV_ECC_Test_Mask::testMask(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat scaledImage;
resize(img, scaledImage, Size(216, 216), 0, 0, INTER_LINEAR_EXACT );
Mat_<float> testImg;
scaledImage.convertTo(testImg, testImg.type());
cv::RNG rng = ts->get_rng();
int progress=0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat translationGround = (Mat_<float>(2,3) << 1, 0, (rng.uniform(10.f, 20.f)),
0, 1, (rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, translationGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapTranslation = (Mat_<float>(2,3) << 1, 0, 0, 0, 1, 0);
Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols);
for (int i=testImg.rows*2/3; i<testImg.rows; i++) {
for (int j=testImg.cols*2/3; j<testImg.cols; j++) {
testImg(i, j) = 0;
mask(i, j) = 0;
}
}
findTransformECC(warpedImage, testImg, mapTranslation, 0,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon), mask);
if (!isMapCorrect(mapTranslation)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapTranslation, translationGround));
return false;
}
// Test with non-default gaussian blur.
findTransformECC(warpedImage, testImg, mapTranslation, 0,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon), mask, 1);
if (!isMapCorrect(mapTranslation)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false;
}
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapTranslation, translationGround));
return false;
}
}
return true;
}
void CV_ECC_Test_Mask::run(int from)
{
if (!testMask(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
TEST(Video_ECC_Test_Compute, accuracy)
{
Mat testImg = (Mat_<float>(3, 3) << 1, 0, 0, 1, 0, 0, 1, 0, 0);
Mat warpedImage = (Mat_<float>(3, 3) << 0, 1, 0, 0, 1, 0, 0, 1, 0);
Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols);
double ecc = computeECC(warpedImage, testImg, mask);
EXPECT_NEAR(ecc, -0.5f, 1e-5f);
}
TEST(Video_ECC_Test_Compute, bug_14657)
{
/*
* Simple test case - a 2 x 2 matrix with 10, 10, 10, 6. When the mean (36 / 4 = 9) is subtracted,
* it results in 1, 1, 1, 0 for the unsigned int case - compare to 1, 1, 1, -3 in the signed case.
* For this reason, when the same matrix was provided as the input and the template, we didn't get 1 as expected.
*/
Mat img = (Mat_<uint8_t>(2, 2) << 10, 10, 10, 6);
EXPECT_NEAR(computeECC(img, img), 1.0f, 1e-5f);
}
TEST(Video_ECC_Translation, accuracy) { CV_ECC_Test_Translation test; test.safe_run();}
TEST(Video_ECC_Euclidean, accuracy) { CV_ECC_Test_Euclidean test; test.safe_run(); }
TEST(Video_ECC_Affine, accuracy) { CV_ECC_Test_Affine test; test.safe_run(); }
TEST(Video_ECC_Homography, accuracy) { CV_ECC_Test_Homography test; test.safe_run(); }
TEST(Video_ECC_Mask, accuracy) { CV_ECC_Test_Mask test; test.safe_run(); }
}} // namespace

View File

@ -0,0 +1,184 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
// this is test for a deprecated function. let's ignore deprecated warnings in this file
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#elif defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#elif defined(_MSC_VER)
#pragma warning( disable : 4996)
#endif
namespace opencv_test { namespace {
class CV_RigidTransform_Test : public cvtest::BaseTest
{
public:
CV_RigidTransform_Test();
~CV_RigidTransform_Test();
protected:
void run(int);
bool testNPoints(int);
bool testImage();
};
CV_RigidTransform_Test::CV_RigidTransform_Test()
{
}
CV_RigidTransform_Test::~CV_RigidTransform_Test() {}
struct WrapAff2D
{
const double *F;
WrapAff2D(const Mat& aff) : F(aff.ptr<double>()) {}
Point2f operator()(const Point2f& p)
{
return Point2f( (float)(p.x * F[0] + p.y * F[1] + F[2]),
(float)(p.x * F[3] + p.y * F[4] + F[5]) );
}
};
bool CV_RigidTransform_Test::testNPoints(int from)
{
cv::RNG rng = ts->get_rng();
int progress = 0;
int k, ntests = 10000;
for( k = from; k < ntests; k++ )
{
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat aff(2, 3, CV_64F);
rng.fill(aff, RNG::UNIFORM, Scalar(-2), Scalar(2));
int n = (unsigned)rng % 100 + 10;
Mat fpts(1, n, CV_32FC2);
Mat tpts(1, n, CV_32FC2);
rng.fill(fpts, RNG::UNIFORM, Scalar(0,0), Scalar(10,10));
std::transform(fpts.ptr<Point2f>(), fpts.ptr<Point2f>() + n, tpts.ptr<Point2f>(), WrapAff2D(aff));
Mat noise(1, n, CV_32FC2);
rng.fill(noise, RNG::NORMAL, Scalar::all(0), Scalar::all(0.001*(n<=7 ? 0 : n <= 30 ? 1 : 10)));
tpts += noise;
Mat aff_est = estimateRigidTransform(fpts, tpts, true);
double thres = 0.1*cvtest::norm(aff, NORM_L2);
double d = cvtest::norm(aff_est, aff, NORM_L2);
if (d > thres)
{
double dB=0, nB=0;
if (n <= 4)
{
Mat A = fpts.reshape(1, 3);
Mat B = A - repeat(A.row(0), 3, 1), Bt = B.t();
B = Bt*B;
dB = cv::determinant(B);
nB = cvtest::norm(B, NORM_L2);
if( fabs(dB) < 0.01*nB )
continue;
}
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( cvtest::TS::LOG, "Threshold = %f, norm of difference = %f", thres, d );
return false;
}
}
return true;
}
bool CV_RigidTransform_Test::testImage()
{
Mat img;
Mat testImg = imread( string(ts->get_data_path()) + "shared/graffiti.png", 1);
if (testImg.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
pyrDown(testImg, img);
Mat aff = cv::getRotationMatrix2D(Point(img.cols/2, img.rows/2), 1, 0.99);
aff.ptr<double>()[2]+=3;
aff.ptr<double>()[5]+=3;
Mat rotated;
warpAffine(img, rotated, aff, img.size());
Mat aff_est = estimateRigidTransform(img, rotated, true);
const double thres = 0.033;
if (cvtest::norm(aff_est, aff, NORM_INF) > thres)
{
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( cvtest::TS::LOG, "Threshold = %f, norm of difference = %f", thres,
cvtest::norm(aff_est, aff, NORM_INF) );
return false;
}
return true;
}
void CV_RigidTransform_Test::run( int start_from )
{
cvtest::DefaultRngAuto dra;
if (!testNPoints(start_from))
return;
if (!testImage())
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
TEST(Video_RigidFlow, accuracy) { CV_RigidTransform_Test test; test.safe_run(); }
}} // namespace

View File

@ -0,0 +1,113 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/video/tracking.hpp"
namespace opencv_test { namespace {
class CV_KalmanTest : public cvtest::BaseTest
{
public:
CV_KalmanTest();
protected:
void run(int);
};
CV_KalmanTest::CV_KalmanTest()
{
}
void CV_KalmanTest::run( int )
{
int code = cvtest::TS::OK;
const int Dim = 7;
const int Steps = 100;
const double max_init = 1;
const double max_noise = 0.1;
const double EPSILON = 1.000;
RNG& rng = ts->get_rng();
int i, j;
cv::Mat Sample(Dim,1,CV_32F);
cv::Mat Temp(Dim,1,CV_32F);
cv::KalmanFilter Kalm(Dim, Dim);
Kalm.transitionMatrix = cv::Mat::eye(Dim, Dim, CV_32F);
Kalm.measurementMatrix = cv::Mat::eye(Dim, Dim, CV_32F);
Kalm.processNoiseCov = cv::Mat::eye(Dim, Dim, CV_32F);
Kalm.errorCovPre = cv::Mat::eye(Dim, Dim, CV_32F);
Kalm.errorCovPost = cv::Mat::eye(Dim, Dim, CV_32F);
Kalm.measurementNoiseCov = cv::Mat::zeros(Dim, Dim, CV_32F);
Kalm.statePre = cv::Mat::zeros(Dim, 1, CV_32F);
Kalm.statePost = cv::Mat::zeros(Dim, 1, CV_32F);
cvtest::randUni(rng, Sample, Scalar::all(-max_init), Scalar::all(max_init));
Kalm.correct(Sample);
for(i = 0; i<Steps; i++)
{
Kalm.predict();
const Mat& Dyn = Kalm.transitionMatrix;
for(j = 0; j<Dim; j++)
{
float t = 0;
for(int k=0; k<Dim; k++)
{
t += Dyn.at<float>(j,k)*Sample.at<float>(k);
}
Temp.at<float>(j) = (float)(t+(cvtest::randReal(rng)*2-1)*max_noise);
}
Temp.copyTo(Sample);
Kalm.correct(Temp);
}
Mat _state_post = Kalm.statePost;
code = cvtest::cmpEps2( ts, Sample, _state_post, EPSILON, false, "The final estimated state" );
if( code < 0 )
ts->set_failed_test_info( code );
}
TEST(Video_Kalman, accuracy) { CV_KalmanTest test; test.safe_run(); }
}} // namespace
/* End of file. */

View File

@ -0,0 +1,25 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
static
void initTests()
{
const char* extraTestDataPath =
#ifdef WINRT
NULL;
#else
getenv("OPENCV_DNN_TEST_DATA_PATH");
#endif
if (extraTestDataPath)
cvtest::addDataSearchPath(extraTestDataPath);
cvtest::addDataSearchSubDirectory(""); // override "cv" prefix below to access without "../dnn" hacks
}
CV_TEST_MAIN("cv", initTests())

View File

@ -0,0 +1,257 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
namespace opencv_test { namespace {
/* ///////////////////// pyrlk_test ///////////////////////// */
class CV_OptFlowPyrLKTest : public cvtest::BaseTest
{
public:
CV_OptFlowPyrLKTest();
protected:
void run(int);
};
CV_OptFlowPyrLKTest::CV_OptFlowPyrLKTest() {}
void CV_OptFlowPyrLKTest::run( int )
{
int code = cvtest::TS::OK;
const double success_error_level = 0.3;
const int bad_points_max = 8;
/* test parameters */
double max_err = 0., sum_err = 0;
int pt_cmpd = 0;
int pt_exceed = 0;
int merr_i = 0, merr_j = 0, merr_k = 0, merr_nan = 0;
char filename[1000];
cv::Point2f *v = 0, *v2 = 0;
cv::Mat _u, _v, _v2;
cv::Mat imgI, imgJ;
int n = 0, i = 0;
for(;;)
{
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "lk_prev.dat" );
{
FileStorage fs(filename, FileStorage::READ);
fs["points"] >> _u;
if( _u.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
break;
}
}
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "lk_next.dat" );
{
FileStorage fs(filename, FileStorage::READ);
fs["points"] >> _v;
if( _v.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
break;
}
}
if( _u.cols != 2 || _u.type() != CV_32F ||
_v.cols != 2 || _v.type() != CV_32F ||
_v.rows != _u.rows )
{
ts->printf( cvtest::TS::LOG, "the loaded matrices of points are not valid\n" );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
break;
}
/* read first image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" );
imgI = cv::imread( filename, cv::IMREAD_UNCHANGED );
if( imgI.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
break;
}
/* read second image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" );
imgJ = cv::imread( filename, cv::IMREAD_UNCHANGED );
if( imgJ.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
break;
}
n = _u.rows;
std::vector<uchar> status(n, (uchar)0);
/* calculate flow */
calcOpticalFlowPyrLK(imgI, imgJ, _u, _v2, status, cv::noArray(), Size( 41, 41 ), 4,
TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0.01f ), 0 );
v = (cv::Point2f*)_v.ptr();
v2 = (cv::Point2f*)_v2.ptr();
/* compare results */
for( i = 0; i < n; i++ )
{
if( status[i] != 0 )
{
double err;
if( cvIsNaN(v[i].x) || cvIsNaN(v[i].y) )
{
merr_j++;
continue;
}
if( cvIsNaN(v2[i].x) || cvIsNaN(v2[i].y) )
{
merr_nan++;
continue;
}
err = fabs(v2[i].x - v[i].x) + fabs(v2[i].y - v[i].y);
if( err > max_err )
{
max_err = err;
merr_i = i;
}
pt_exceed += err > success_error_level;
sum_err += err;
pt_cmpd++;
}
else
{
if( !cvIsNaN( v[i].x ))
{
merr_i = i;
merr_k++;
ts->printf( cvtest::TS::LOG, "The algorithm lost the point #%d\n", i );
code = cvtest::TS::FAIL_BAD_ACCURACY;
break;
}
}
}
if( i < n )
break;
if( pt_exceed > bad_points_max )
{
ts->printf( cvtest::TS::LOG,
"The number of poorly tracked points is too big (>=%d)\n", pt_exceed );
code = cvtest::TS::FAIL_BAD_ACCURACY;
break;
}
if( max_err > 1 )
{
ts->printf( cvtest::TS::LOG, "Maximum tracking error is too big (=%g) at %d\n", max_err, merr_i );
code = cvtest::TS::FAIL_BAD_ACCURACY;
break;
}
if( merr_nan > 0 )
{
ts->printf( cvtest::TS::LOG, "NAN tracking result with status != 0 (%d times)\n", merr_nan );
code = cvtest::TS::FAIL_BAD_ACCURACY;
}
break;
}
if( code < 0 )
ts->set_failed_test_info( code );
}
TEST(Video_OpticalFlowPyrLK, accuracy) { CV_OptFlowPyrLKTest test; test.safe_run(); }
TEST(Video_OpticalFlowPyrLK, submat)
{
// see bug #2075
std::string path = cvtest::TS::ptr()->get_data_path() + "../cv/shared/lena.png";
cv::Mat lenaImg = cv::imread(path);
ASSERT_FALSE(lenaImg.empty());
cv::Mat wholeImage;
cv::resize(lenaImg, wholeImage, cv::Size(1024, 1024), 0, 0, cv::INTER_LINEAR_EXACT);
cv::Mat img1 = wholeImage(cv::Rect(0, 0, 640, 360)).clone();
cv::Mat img2 = wholeImage(cv::Rect(40, 60, 640, 360));
std::vector<uchar> status;
std::vector<float> error;
std::vector<cv::Point2f> prev;
std::vector<cv::Point2f> next;
cv::RNG rng(123123);
for(int i = 0; i < 50; ++i)
{
int x = rng.uniform(0, 640);
int y = rng.uniform(0, 360);
prev.push_back(cv::Point2f((float)x, (float)y));
}
ASSERT_NO_THROW(cv::calcOpticalFlowPyrLK(img1, img2, prev, next, status, error));
}
}} // namespace

View File

@ -0,0 +1,15 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/video.hpp"
#include <opencv2/ts/ts_perf.hpp>
namespace opencv_test {
using namespace perf;
}
#endif

View File

@ -0,0 +1,129 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
//#define DEBUG_TEST
#ifdef DEBUG_TEST
#include <opencv2/highgui.hpp>
#endif
namespace opencv_test { namespace {
//using namespace cv::tracking;
#define TESTSET_NAMES testing::Values("david", "dudek", "faceocc2")
const string TRACKING_DIR = "tracking";
const string FOLDER_IMG = "data";
const string FOLDER_OMIT_INIT = "initOmit";
#include "test_trackers.impl.hpp"
//[TESTDATA]
PARAM_TEST_CASE(DistanceAndOverlap, string)
{
string dataset;
virtual void SetUp()
{
dataset = GET_PARAM(0);
}
};
TEST_P(DistanceAndOverlap, MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .65f, NoTransform);
test.run();
}
TEST_P(DistanceAndOverlap, Shifted_Data_MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .6f, CenterShiftLeft);
test.run();
}
/***************************************************************************************/
//Tests with scaled initial window
TEST_P(DistanceAndOverlap, Scaled_Data_MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .7f, Scale_1_1);
test.run();
}
TEST_P(DistanceAndOverlap, GOTURN)
{
std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
cv::TrackerGOTURN::Params params;
params.modelTxt = model;
params.modelBin = weights;
TrackerTest<Tracker, Rect> test(TrackerGOTURN::create(params), dataset, 35, .35f, NoTransform);
test.run();
}
INSTANTIATE_TEST_CASE_P(Tracking, DistanceAndOverlap, TESTSET_NAMES);
TEST(GOTURN, memory_usage)
{
cv::Rect roi(145, 70, 85, 85);
std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
cv::TrackerGOTURN::Params params;
params.modelTxt = model;
params.modelBin = weights;
cv::Ptr<Tracker> tracker = TrackerGOTURN::create(params);
string inputVideo = cvtest::findDataFile("tracking/david/data/david.webm");
cv::VideoCapture video(inputVideo);
ASSERT_TRUE(video.isOpened()) << inputVideo;
cv::Mat frame;
video >> frame;
ASSERT_FALSE(frame.empty()) << inputVideo;
tracker->init(frame, roi);
string ground_truth_bb;
for (int nframes = 0; nframes < 15; ++nframes)
{
std::cout << "Frame: " << nframes << std::endl;
video >> frame;
bool res = tracker->update(frame, roi);
ASSERT_TRUE(res);
std::cout << "Predicted ROI: " << roi << std::endl;
}
}
TEST(DaSiamRPN, memory_usage)
{
cv::Rect roi(145, 70, 85, 85);
std::string model = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_model.onnx", false);
std::string kernel_r1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_r1.onnx", false);
std::string kernel_cls1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_cls1.onnx", false);
cv::TrackerDaSiamRPN::Params params;
params.model = model;
params.kernel_r1 = kernel_r1;
params.kernel_cls1 = kernel_cls1;
cv::Ptr<Tracker> tracker = TrackerDaSiamRPN::create(params);
string inputVideo = cvtest::findDataFile("tracking/david/data/david.webm");
cv::VideoCapture video(inputVideo);
ASSERT_TRUE(video.isOpened()) << inputVideo;
cv::Mat frame;
video >> frame;
ASSERT_FALSE(frame.empty()) << inputVideo;
tracker->init(frame, roi);
string ground_truth_bb;
for (int nframes = 0; nframes < 15; ++nframes)
{
std::cout << "Frame: " << nframes << std::endl;
video >> frame;
bool res = tracker->update(frame, roi);
ASSERT_TRUE(res);
std::cout << "Predicted ROI: " << roi << std::endl;
}
}
}} // namespace opencv_test::

View File

@ -0,0 +1,369 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
* The Evaluation Methodologies are partially based on:
* ====================================================================================================================
* [OTB] Y. Wu, J. Lim, and M.-H. Yang, "Online object tracking: A benchmark," in Computer Vision and Pattern Recognition (CVPR), 2013
*
*/
enum BBTransformations
{
NoTransform = 0,
CenterShiftLeft = 1,
CenterShiftRight = 2,
CenterShiftUp = 3,
CenterShiftDown = 4,
CornerShiftTopLeft = 5,
CornerShiftTopRight = 6,
CornerShiftBottomLeft = 7,
CornerShiftBottomRight = 8,
Scale_0_8 = 9,
Scale_0_9 = 10,
Scale_1_1 = 11,
Scale_1_2 = 12
};
namespace {
std::vector<std::string> splitString(const std::string& s_, const std::string& delimiter)
{
std::string s = s_;
std::vector<string> token;
size_t pos = 0;
while ((pos = s.find(delimiter)) != std::string::npos)
{
token.push_back(s.substr(0, pos));
s.erase(0, pos + delimiter.length());
}
token.push_back(s);
return token;
}
float calcDistance(const Rect& a, const Rect& b)
{
Point2f p_a((float)(a.x + a.width / 2), (float)(a.y + a.height / 2));
Point2f p_b((float)(b.x + b.width / 2), (float)(b.y + b.height / 2));
Point2f diff = p_a - p_b;
return sqrt(diff.dot(diff));
}
float calcOverlap(const Rect& a, const Rect& b)
{
float rectIntersectionArea = (float)(a & b).area();
return rectIntersectionArea / (a.area() + b.area() - rectIntersectionArea);
}
} // namespace
template <typename Tracker, typename ROI_t = Rect2d>
class TrackerTest
{
public:
TrackerTest(const Ptr<Tracker>& tracker, const string& video, float distanceThreshold,
float overlapThreshold, int shift = NoTransform, int segmentIdx = 1, int numSegments = 10);
~TrackerTest() {}
void run();
protected:
void checkDataTest();
void distanceAndOverlapTest();
Ptr<Tracker> tracker;
string video;
std::vector<Rect> bbs;
int startFrame;
string suffix;
string prefix;
float overlapThreshold;
float distanceThreshold;
int segmentIdx;
int shift;
int numSegments;
int gtStartFrame;
int endFrame;
vector<int> validSequence;
private:
Rect applyShift(const Rect& bb);
};
template <typename Tracker, typename ROI_t>
TrackerTest<Tracker, ROI_t>::TrackerTest(const Ptr<Tracker>& _tracker, const string& _video, float _distanceThreshold,
float _overlapThreshold, int _shift, int _segmentIdx, int _numSegments)
: tracker(_tracker)
, video(_video)
, overlapThreshold(_overlapThreshold)
, distanceThreshold(_distanceThreshold)
, segmentIdx(_segmentIdx)
, shift(_shift)
, numSegments(_numSegments)
{
// nothing
}
template <typename Tracker, typename ROI_t>
Rect TrackerTest<Tracker, ROI_t>::applyShift(const Rect& bb_)
{
Rect bb = bb_;
Point center(bb.x + (bb.width / 2), bb.y + (bb.height / 2));
int xLimit = bb.x + bb.width - 1;
int yLimit = bb.y + bb.height - 1;
int h = 0;
int w = 0;
float ratio = 1.0;
switch (shift)
{
case CenterShiftLeft:
bb.x = bb.x - (int)ceil(0.1 * bb.width);
break;
case CenterShiftRight:
bb.x = bb.x + (int)ceil(0.1 * bb.width);
break;
case CenterShiftUp:
bb.y = bb.y - (int)ceil(0.1 * bb.height);
break;
case CenterShiftDown:
bb.y = bb.y + (int)ceil(0.1 * bb.height);
break;
case CornerShiftTopLeft:
bb.x = (int)cvRound(bb.x - 0.1 * bb.width);
bb.y = (int)cvRound(bb.y - 0.1 * bb.height);
bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1;
break;
case CornerShiftTopRight:
xLimit = (int)cvRound(xLimit + 0.1 * bb.width);
bb.y = (int)cvRound(bb.y - 0.1 * bb.height);
bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1;
break;
case CornerShiftBottomLeft:
bb.x = (int)cvRound(bb.x - 0.1 * bb.width);
yLimit = (int)cvRound(yLimit + 0.1 * bb.height);
bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1;
break;
case CornerShiftBottomRight:
xLimit = (int)cvRound(xLimit + 0.1 * bb.width);
yLimit = (int)cvRound(yLimit + 0.1 * bb.height);
bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1;
break;
case Scale_0_8:
ratio = 0.8f;
w = (int)(ratio * bb.width);
h = (int)(ratio * bb.height);
bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
break;
case Scale_0_9:
ratio = 0.9f;
w = (int)(ratio * bb.width);
h = (int)(ratio * bb.height);
bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
break;
case 11:
//scale 1.1
ratio = 1.1f;
w = (int)(ratio * bb.width);
h = (int)(ratio * bb.height);
bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
break;
case 12:
//scale 1.2
ratio = 1.2f;
w = (int)(ratio * bb.width);
h = (int)(ratio * bb.height);
bb = Rect(center.x - (w / 2), center.y - (h / 2), w, h);
break;
default:
break;
}
return bb;
}
template <typename Tracker, typename ROI_t>
void TrackerTest<Tracker, ROI_t>::distanceAndOverlapTest()
{
bool initialized = false;
int fc = (startFrame - gtStartFrame);
bbs.at(fc) = applyShift(bbs.at(fc));
Rect currentBBi = bbs.at(fc);
ROI_t currentBB(currentBBi);
float sumDistance = 0;
float sumOverlap = 0;
string folder = cvtest::TS::ptr()->get_data_path() + "/" + TRACKING_DIR + "/" + video + "/" + FOLDER_IMG;
string videoPath = folder + "/" + video + ".webm";
VideoCapture c;
c.open(videoPath);
if (!c.isOpened())
throw SkipTestException("Can't open video file");
#if 0
c.set(CAP_PROP_POS_FRAMES, startFrame);
#else
if (startFrame)
std::cout << "startFrame = " << startFrame << std::endl;
for (int i = 0; i < startFrame; i++)
{
Mat dummy_frame;
c >> dummy_frame;
ASSERT_FALSE(dummy_frame.empty()) << i << ": " << videoPath;
}
#endif
for (int frameCounter = startFrame; frameCounter < endFrame; frameCounter++)
{
Mat frame;
c >> frame;
ASSERT_FALSE(frame.empty()) << "frameCounter=" << frameCounter << " video=" << videoPath;
if (!initialized)
{
tracker->init(frame, currentBB);
std::cout << "frame size = " << frame.size() << std::endl;
initialized = true;
}
else if (initialized)
{
if (frameCounter >= (int)bbs.size())
break;
tracker->update(frame, currentBB);
}
float curDistance = calcDistance(currentBB, bbs.at(fc));
float curOverlap = calcOverlap(currentBB, bbs.at(fc));
#ifdef DEBUG_TEST
Mat result;
repeat(frame, 1, 2, result);
rectangle(result, currentBB, Scalar(0, 255, 0), 1);
Rect roi2(frame.cols, 0, frame.cols, frame.rows);
rectangle(result(roi2), bbs.at(fc), Scalar(0, 0, 255), 1);
imshow("result", result);
waitKey(1);
#endif
sumDistance += curDistance;
sumOverlap += curOverlap;
fc++;
}
float meanDistance = sumDistance / (endFrame - startFrame);
float meanOverlap = sumOverlap / (endFrame - startFrame);
EXPECT_LE(meanDistance, distanceThreshold);
EXPECT_GE(meanOverlap, overlapThreshold);
}
template <typename Tracker, typename ROI_t>
void TrackerTest<Tracker, ROI_t>::checkDataTest()
{
FileStorage fs;
fs.open(cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/" + video + ".yml", FileStorage::READ);
fs["start"] >> startFrame;
fs["prefix"] >> prefix;
fs["suffix"] >> suffix;
fs.release();
string gtFile = cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/gt.txt";
std::ifstream gt;
//open the ground truth
gt.open(gtFile.c_str());
ASSERT_TRUE(gt.is_open()) << gtFile;
string line;
int bbCounter = 0;
while (getline(gt, line))
{
bbCounter++;
}
gt.close();
int seqLength = bbCounter;
for (int i = startFrame; i < seqLength; i++)
{
validSequence.push_back(i);
}
//exclude from the images sequence, the frames where the target is occluded or out of view
string omitFile = cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/" + FOLDER_OMIT_INIT + "/" + video + ".txt";
std::ifstream omit;
omit.open(omitFile.c_str());
if (omit.is_open())
{
string omitLine;
while (getline(omit, omitLine))
{
vector<string> tokens = splitString(omitLine, " ");
int s_start = atoi(tokens.at(0).c_str());
int s_end = atoi(tokens.at(1).c_str());
for (int k = s_start; k <= s_end; k++)
{
std::vector<int>::iterator position = std::find(validSequence.begin(), validSequence.end(), k);
if (position != validSequence.end())
validSequence.erase(position);
}
}
}
omit.close();
gtStartFrame = startFrame;
//compute the start and the and for each segment
int numFrame = (int)(validSequence.size() / numSegments);
startFrame += (segmentIdx - 1) * numFrame;
endFrame = startFrame + numFrame;
std::ifstream gt2;
//open the ground truth
gt2.open(gtFile.c_str());
ASSERT_TRUE(gt2.is_open()) << gtFile;
string line2;
int bbCounter2 = 0;
while (getline(gt2, line2))
{
vector<string> tokens = splitString(line2, ",");
Rect bb(atoi(tokens.at(0).c_str()), atoi(tokens.at(1).c_str()), atoi(tokens.at(2).c_str()), atoi(tokens.at(3).c_str()));
ASSERT_EQ((size_t)4, tokens.size()) << "Incorrect ground truth file " << gtFile;
bbs.push_back(bb);
bbCounter2++;
}
gt2.close();
if (segmentIdx == numSegments)
endFrame = (int)bbs.size();
}
template <typename Tracker, typename ROI_t>
void TrackerTest<Tracker, ROI_t>::run()
{
srand(1); // FIXIT remove that, ensure that there is no "rand()" in implementation
ASSERT_TRUE(tracker);
checkDataTest();
//check for failure
if (::testing::Test::HasFatalFailure())
return;
distanceAndOverlapTest();
}