feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
194
3rdparty/opencv-4.5.4/modules/stitching/src/autocalib.cpp
vendored
Normal file
194
3rdparty/opencv-4.5.4/modules/stitching/src/autocalib.cpp
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/core/hal/hal.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
namespace {
|
||||
|
||||
static inline bool decomposeCholesky(double* A, size_t astep, int m)
|
||||
{
|
||||
if (!hal::Cholesky64f(A, astep, m, 0, 0, 0))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, bool &f1_ok)
|
||||
{
|
||||
CV_Assert(H.type() == CV_64F && H.size() == Size(3, 3));
|
||||
|
||||
const double* h = H.ptr<double>();
|
||||
|
||||
double d1, d2; // Denominators
|
||||
double v1, v2; // Focal squares value candidates
|
||||
|
||||
f1_ok = true;
|
||||
d1 = h[6] * h[7];
|
||||
d2 = (h[7] - h[6]) * (h[7] + h[6]);
|
||||
v1 = -(h[0] * h[1] + h[3] * h[4]) / d1;
|
||||
v2 = (h[0] * h[0] + h[3] * h[3] - h[1] * h[1] - h[4] * h[4]) / d2;
|
||||
if (v1 < v2) std::swap(v1, v2);
|
||||
if (v1 > 0 && v2 > 0) f1 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
|
||||
else if (v1 > 0) f1 = std::sqrt(v1);
|
||||
else f1_ok = false;
|
||||
|
||||
f0_ok = true;
|
||||
d1 = h[0] * h[3] + h[1] * h[4];
|
||||
d2 = h[0] * h[0] + h[1] * h[1] - h[3] * h[3] - h[4] * h[4];
|
||||
v1 = -h[2] * h[5] / d1;
|
||||
v2 = (h[5] * h[5] - h[2] * h[2]) / d2;
|
||||
if (v1 < v2) std::swap(v1, v2);
|
||||
if (v1 > 0 && v2 > 0) f0 = std::sqrt(std::abs(d1) > std::abs(d2) ? v1 : v2);
|
||||
else if (v1 > 0) f0 = std::sqrt(v1);
|
||||
else f0_ok = false;
|
||||
}
|
||||
|
||||
|
||||
void estimateFocal(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
|
||||
std::vector<double> &focals)
|
||||
{
|
||||
const int num_images = static_cast<int>(features.size());
|
||||
focals.resize(num_images);
|
||||
|
||||
std::vector<double> all_focals;
|
||||
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
for (int j = 0; j < num_images; ++j)
|
||||
{
|
||||
const MatchesInfo &m = pairwise_matches[i*num_images + j];
|
||||
if (m.H.empty())
|
||||
continue;
|
||||
double f0, f1;
|
||||
bool f0ok, f1ok;
|
||||
focalsFromHomography(m.H, f0, f1, f0ok, f1ok);
|
||||
if (f0ok && f1ok)
|
||||
all_focals.push_back(std::sqrt(f0 * f1));
|
||||
}
|
||||
}
|
||||
|
||||
if (static_cast<int>(all_focals.size()) >= num_images - 1)
|
||||
{
|
||||
double median;
|
||||
|
||||
std::sort(all_focals.begin(), all_focals.end());
|
||||
if (all_focals.size() % 2 == 1)
|
||||
median = all_focals[all_focals.size() / 2];
|
||||
else
|
||||
median = (all_focals[all_focals.size() / 2 - 1] + all_focals[all_focals.size() / 2]) * 0.5;
|
||||
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
focals[i] = median;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOGLN("Can't estimate focal length, will use naive approach");
|
||||
double focals_sum = 0;
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
focals_sum += features[i].img_size.width + features[i].img_size.height;
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
focals[i] = focals_sum / num_images;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K)
|
||||
{
|
||||
int m = static_cast<int>(Hs.size());
|
||||
CV_Assert(m >= 1);
|
||||
|
||||
std::vector<Mat> Hs_(m);
|
||||
for (int i = 0; i < m; ++i)
|
||||
{
|
||||
CV_Assert(Hs[i].size() == Size(3, 3) && Hs[i].type() == CV_64F);
|
||||
Hs_[i] = Hs[i] / std::pow(determinant(Hs[i]), 1./3.);
|
||||
}
|
||||
|
||||
const int idx_map[3][3] = {{0, 1, 2}, {1, 3, 4}, {2, 4, 5}};
|
||||
Mat_<double> A(6*m, 6);
|
||||
A.setTo(0);
|
||||
|
||||
int eq_idx = 0;
|
||||
for (int k = 0; k < m; ++k)
|
||||
{
|
||||
Mat_<double> H(Hs_[k]);
|
||||
for (int i = 0; i < 3; ++i)
|
||||
{
|
||||
for (int j = i; j < 3; ++j, ++eq_idx)
|
||||
{
|
||||
for (int l = 0; l < 3; ++l)
|
||||
{
|
||||
for (int s = 0; s < 3; ++s)
|
||||
{
|
||||
int idx = idx_map[l][s];
|
||||
A(eq_idx, idx) += H(i,l) * H(j,s);
|
||||
}
|
||||
}
|
||||
A(eq_idx, idx_map[i][j]) -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Mat_<double> wcoef;
|
||||
SVD::solveZ(A, wcoef);
|
||||
|
||||
Mat_<double> W(3,3);
|
||||
for (int i = 0; i < 3; ++i)
|
||||
for (int j = i; j < 3; ++j)
|
||||
W(i,j) = W(j,i) = wcoef(idx_map[i][j], 0) / wcoef(5,0);
|
||||
if (!decomposeCholesky(W.ptr<double>(), W.step, 3))
|
||||
return false;
|
||||
W(0,1) = W(0,2) = W(1,2) = 0;
|
||||
K = W.t();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
903
3rdparty/opencv-4.5.4/modules/stitching/src/blenders.cpp
vendored
Normal file
903
3rdparty/opencv-4.5.4/modules/stitching/src/blenders.cpp
vendored
Normal file
@ -0,0 +1,903 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels_stitching.hpp"
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace blend
|
||||
{
|
||||
void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight,
|
||||
PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc);
|
||||
void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight,
|
||||
PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc);
|
||||
void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src,
|
||||
const int width, const int height);
|
||||
void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src,
|
||||
const int width, const int height);
|
||||
}
|
||||
}}}
|
||||
#endif
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
static const float WEIGHT_EPS = 1e-5f;
|
||||
|
||||
Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
|
||||
{
|
||||
if (type == NO)
|
||||
return makePtr<Blender>();
|
||||
if (type == FEATHER)
|
||||
return makePtr<FeatherBlender>(try_gpu);
|
||||
if (type == MULTI_BAND)
|
||||
return makePtr<MultiBandBlender>(try_gpu);
|
||||
CV_Error(Error::StsBadArg, "unsupported blending method");
|
||||
}
|
||||
|
||||
|
||||
void Blender::prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes)
|
||||
{
|
||||
prepare(resultRoi(corners, sizes));
|
||||
}
|
||||
|
||||
|
||||
void Blender::prepare(Rect dst_roi)
|
||||
{
|
||||
dst_.create(dst_roi.size(), CV_16SC3);
|
||||
dst_.setTo(Scalar::all(0));
|
||||
dst_mask_.create(dst_roi.size(), CV_8U);
|
||||
dst_mask_.setTo(Scalar::all(0));
|
||||
dst_roi_ = dst_roi;
|
||||
}
|
||||
|
||||
|
||||
void Blender::feed(InputArray _img, InputArray _mask, Point tl)
|
||||
{
|
||||
Mat img = _img.getMat();
|
||||
Mat mask = _mask.getMat();
|
||||
Mat dst = dst_.getMat(ACCESS_RW);
|
||||
Mat dst_mask = dst_mask_.getMat(ACCESS_RW);
|
||||
|
||||
CV_Assert(img.type() == CV_16SC3);
|
||||
CV_Assert(mask.type() == CV_8U);
|
||||
int dx = tl.x - dst_roi_.x;
|
||||
int dy = tl.y - dst_roi_.y;
|
||||
|
||||
for (int y = 0; y < img.rows; ++y)
|
||||
{
|
||||
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
|
||||
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
|
||||
const uchar *mask_row = mask.ptr<uchar>(y);
|
||||
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
|
||||
|
||||
for (int x = 0; x < img.cols; ++x)
|
||||
{
|
||||
if (mask_row[x])
|
||||
dst_row[dx + x] = src_row[x];
|
||||
dst_mask_row[dx + x] |= mask_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Blender::blend(InputOutputArray dst, InputOutputArray dst_mask)
|
||||
{
|
||||
UMat mask;
|
||||
compare(dst_mask_, 0, mask, CMP_EQ);
|
||||
dst_.setTo(Scalar::all(0), mask);
|
||||
dst.assign(dst_);
|
||||
dst_mask.assign(dst_mask_);
|
||||
dst_.release();
|
||||
dst_mask_.release();
|
||||
}
|
||||
|
||||
void FeatherBlender::prepare(Rect dst_roi)
|
||||
{
|
||||
Blender::prepare(dst_roi);
|
||||
dst_weight_map_.create(dst_roi.size(), CV_32F);
|
||||
dst_weight_map_.setTo(0);
|
||||
}
|
||||
|
||||
|
||||
void FeatherBlender::feed(InputArray _img, InputArray mask, Point tl)
|
||||
{
|
||||
Mat img = _img.getMat();
|
||||
Mat dst = dst_.getMat(ACCESS_RW);
|
||||
|
||||
CV_Assert(img.type() == CV_16SC3);
|
||||
CV_Assert(mask.type() == CV_8U);
|
||||
|
||||
createWeightMap(mask, sharpness_, weight_map_);
|
||||
Mat weight_map = weight_map_.getMat(ACCESS_READ);
|
||||
Mat dst_weight_map = dst_weight_map_.getMat(ACCESS_RW);
|
||||
|
||||
int dx = tl.x - dst_roi_.x;
|
||||
int dy = tl.y - dst_roi_.y;
|
||||
|
||||
for (int y = 0; y < img.rows; ++y)
|
||||
{
|
||||
const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);
|
||||
Point3_<short>* dst_row = dst.ptr<Point3_<short> >(dy + y);
|
||||
const float* weight_row = weight_map.ptr<float>(y);
|
||||
float* dst_weight_row = dst_weight_map.ptr<float>(dy + y);
|
||||
|
||||
for (int x = 0; x < img.cols; ++x)
|
||||
{
|
||||
dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]);
|
||||
dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]);
|
||||
dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]);
|
||||
dst_weight_row[dx + x] += weight_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FeatherBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
|
||||
{
|
||||
normalizeUsingWeightMap(dst_weight_map_, dst_);
|
||||
compare(dst_weight_map_, WEIGHT_EPS, dst_mask_, CMP_GT);
|
||||
Blender::blend(dst, dst_mask);
|
||||
}
|
||||
|
||||
|
||||
Rect FeatherBlender::createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
|
||||
std::vector<UMat> &weight_maps)
|
||||
{
|
||||
weight_maps.resize(masks.size());
|
||||
for (size_t i = 0; i < masks.size(); ++i)
|
||||
createWeightMap(masks[i], sharpness_, weight_maps[i]);
|
||||
|
||||
Rect dst_roi = resultRoi(corners, masks);
|
||||
Mat weights_sum(dst_roi.size(), CV_32F);
|
||||
weights_sum.setTo(0);
|
||||
|
||||
for (size_t i = 0; i < weight_maps.size(); ++i)
|
||||
{
|
||||
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
|
||||
weight_maps[i].cols, weight_maps[i].rows);
|
||||
add(weights_sum(roi), weight_maps[i], weights_sum(roi));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < weight_maps.size(); ++i)
|
||||
{
|
||||
Rect roi(corners[i].x - dst_roi.x, corners[i].y - dst_roi.y,
|
||||
weight_maps[i].cols, weight_maps[i].rows);
|
||||
Mat tmp = weights_sum(roi);
|
||||
tmp.setTo(1, tmp < std::numeric_limits<float>::epsilon());
|
||||
divide(weight_maps[i], tmp, weight_maps[i]);
|
||||
}
|
||||
|
||||
return dst_roi;
|
||||
}
|
||||
|
||||
|
||||
MultiBandBlender::MultiBandBlender(int try_gpu, int num_bands, int weight_type)
|
||||
{
|
||||
num_bands_ = 0;
|
||||
setNumBands(num_bands);
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
can_use_gpu_ = try_gpu && cuda::getCudaEnabledDeviceCount();
|
||||
gpu_feed_idx_ = 0;
|
||||
#else
|
||||
CV_UNUSED(try_gpu);
|
||||
can_use_gpu_ = false;
|
||||
#endif
|
||||
|
||||
CV_Assert(weight_type == CV_32F || weight_type == CV_16S);
|
||||
weight_type_ = weight_type;
|
||||
}
|
||||
|
||||
void MultiBandBlender::prepare(Rect dst_roi)
|
||||
{
|
||||
dst_roi_final_ = dst_roi;
|
||||
|
||||
// Crop unnecessary bands
|
||||
double max_len = static_cast<double>(std::max(dst_roi.width, dst_roi.height));
|
||||
num_bands_ = std::min(actual_num_bands_, static_cast<int>(ceil(std::log(max_len) / std::log(2.0))));
|
||||
|
||||
// Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
|
||||
dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
|
||||
dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);
|
||||
|
||||
Blender::prepare(dst_roi);
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
if (can_use_gpu_)
|
||||
{
|
||||
gpu_initialized_ = false;
|
||||
gpu_feed_idx_ = 0;
|
||||
|
||||
gpu_tl_points_.clear();
|
||||
gpu_weight_pyr_gauss_vec_.clear();
|
||||
gpu_src_pyr_laplace_vec_.clear();
|
||||
gpu_ups_.clear();
|
||||
gpu_imgs_with_border_.clear();
|
||||
|
||||
gpu_dst_pyr_laplace_.resize(num_bands_ + 1);
|
||||
gpu_dst_pyr_laplace_[0].create(dst_roi.size(), CV_16SC3);
|
||||
gpu_dst_pyr_laplace_[0].setTo(Scalar::all(0));
|
||||
|
||||
gpu_dst_band_weights_.resize(num_bands_ + 1);
|
||||
gpu_dst_band_weights_[0].create(dst_roi.size(), weight_type_);
|
||||
gpu_dst_band_weights_[0].setTo(0);
|
||||
|
||||
for (int i = 1; i <= num_bands_; ++i)
|
||||
{
|
||||
gpu_dst_pyr_laplace_[i].create((gpu_dst_pyr_laplace_[i - 1].rows + 1) / 2,
|
||||
(gpu_dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
|
||||
gpu_dst_band_weights_[i].create((gpu_dst_band_weights_[i - 1].rows + 1) / 2,
|
||||
(gpu_dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
|
||||
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
|
||||
gpu_dst_band_weights_[i].setTo(0);
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
dst_pyr_laplace_.resize(num_bands_ + 1);
|
||||
dst_pyr_laplace_[0] = dst_;
|
||||
|
||||
dst_band_weights_.resize(num_bands_ + 1);
|
||||
dst_band_weights_[0].create(dst_roi.size(), weight_type_);
|
||||
dst_band_weights_[0].setTo(0);
|
||||
|
||||
for (int i = 1; i <= num_bands_; ++i)
|
||||
{
|
||||
dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2,
|
||||
(dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
|
||||
dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
|
||||
(dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
|
||||
dst_pyr_laplace_[i].setTo(Scalar::all(0));
|
||||
dst_band_weights_[i].setTo(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
static bool ocl_MultiBandBlender_feed(InputArray _src, InputArray _weight,
|
||||
InputOutputArray _dst, InputOutputArray _dst_weight)
|
||||
{
|
||||
String buildOptions = "-D DEFINE_feed";
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "src", _src);
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "dst", _dst);
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "dstWeight", _dst_weight);
|
||||
ocl::Kernel k("feed", ocl::stitching::multibandblend_oclsrc, buildOptions);
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
UMat src = _src.getUMat();
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnly(src),
|
||||
ocl::KernelArg::ReadOnly(_weight.getUMat()),
|
||||
ocl::KernelArg::ReadWrite(_dst.getUMat()),
|
||||
ocl::KernelArg::ReadWrite(_dst_weight.getUMat())
|
||||
);
|
||||
|
||||
size_t globalsize[2] = {(size_t)src.cols, (size_t)src.rows };
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
|
||||
{
|
||||
#if ENABLE_LOG
|
||||
int64 t = getTickCount();
|
||||
#endif
|
||||
|
||||
UMat img;
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
// If using gpu save the top left coordinate when running first time after prepare
|
||||
if (can_use_gpu_)
|
||||
{
|
||||
if (!gpu_initialized_)
|
||||
{
|
||||
gpu_tl_points_.push_back(tl);
|
||||
}
|
||||
else
|
||||
{
|
||||
tl = gpu_tl_points_[gpu_feed_idx_];
|
||||
}
|
||||
}
|
||||
// If _img is not a GpuMat get it as UMat from the InputArray object.
|
||||
// If it is GpuMat make a dummy object with right dimensions but no data and
|
||||
// get _img as a GpuMat
|
||||
if (!_img.isGpuMat())
|
||||
#endif
|
||||
{
|
||||
img = _img.getUMat();
|
||||
}
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
else
|
||||
{
|
||||
gpu_img_ = _img.getGpuMat();
|
||||
img = UMat(gpu_img_.rows, gpu_img_.cols, gpu_img_.type());
|
||||
}
|
||||
#endif
|
||||
|
||||
CV_Assert(img.type() == CV_16SC3 || img.type() == CV_8UC3);
|
||||
CV_Assert(mask.type() == CV_8U);
|
||||
|
||||
// Keep source image in memory with small border
|
||||
int gap = 3 * (1 << num_bands_);
|
||||
Point tl_new(std::max(dst_roi_.x, tl.x - gap),
|
||||
std::max(dst_roi_.y, tl.y - gap));
|
||||
Point br_new(std::min(dst_roi_.br().x, tl.x + img.cols + gap),
|
||||
std::min(dst_roi_.br().y, tl.y + img.rows + gap));
|
||||
|
||||
// Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_).
|
||||
// After that scale between layers is exactly 2.
|
||||
//
|
||||
// We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when
|
||||
// image is bordered to have size equal to the final image size, but this is too memory hungry approach.
|
||||
tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_);
|
||||
tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_);
|
||||
int width = br_new.x - tl_new.x;
|
||||
int height = br_new.y - tl_new.y;
|
||||
width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_);
|
||||
height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_);
|
||||
br_new.x = tl_new.x + width;
|
||||
br_new.y = tl_new.y + height;
|
||||
int dy = std::max(br_new.y - dst_roi_.br().y, 0);
|
||||
int dx = std::max(br_new.x - dst_roi_.br().x, 0);
|
||||
tl_new.x -= dx; br_new.x -= dx;
|
||||
tl_new.y -= dy; br_new.y -= dy;
|
||||
|
||||
int top = tl.y - tl_new.y;
|
||||
int left = tl.x - tl_new.x;
|
||||
int bottom = br_new.y - tl.y - img.rows;
|
||||
int right = br_new.x - tl.x - img.cols;
|
||||
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
if (can_use_gpu_)
|
||||
{
|
||||
if (!gpu_initialized_)
|
||||
{
|
||||
gpu_imgs_with_border_.push_back(cuda::GpuMat());
|
||||
gpu_weight_pyr_gauss_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
|
||||
gpu_src_pyr_laplace_vec_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
|
||||
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_));
|
||||
}
|
||||
|
||||
// If _img is not GpuMat upload it to gpu else gpu_img_ was set already
|
||||
if (!_img.isGpuMat())
|
||||
{
|
||||
gpu_img_.upload(img);
|
||||
}
|
||||
|
||||
// Create the source image Laplacian pyramid
|
||||
cuda::copyMakeBorder(gpu_img_, gpu_imgs_with_border_[gpu_feed_idx_], top, bottom,
|
||||
left, right, BORDER_REFLECT);
|
||||
gpu_imgs_with_border_[gpu_feed_idx_].convertTo(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][0], CV_16S);
|
||||
for (int i = 0; i < num_bands_; ++i)
|
||||
cuda::pyrDown(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
|
||||
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1]);
|
||||
for (int i = 0; i < num_bands_; ++i)
|
||||
{
|
||||
cuda::pyrUp(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i + 1], gpu_ups_[gpu_feed_idx_][i]);
|
||||
cuda::subtract(gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i],
|
||||
gpu_ups_[gpu_feed_idx_][i],
|
||||
gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i]);
|
||||
}
|
||||
|
||||
// Create the weight map Gaussian pyramid only if not yet initialized
|
||||
if (!gpu_initialized_)
|
||||
{
|
||||
if (mask.isGpuMat())
|
||||
{
|
||||
gpu_mask_ = mask.getGpuMat();
|
||||
}
|
||||
else
|
||||
{
|
||||
gpu_mask_.upload(mask);
|
||||
}
|
||||
|
||||
if (weight_type_ == CV_32F)
|
||||
{
|
||||
gpu_mask_.convertTo(gpu_weight_map_, CV_32F, 1. / 255.);
|
||||
}
|
||||
else // weight_type_ == CV_16S
|
||||
{
|
||||
gpu_mask_.convertTo(gpu_weight_map_, CV_16S);
|
||||
cuda::compare(gpu_mask_, 0, gpu_add_mask_, CMP_NE);
|
||||
cuda::add(gpu_weight_map_, Scalar::all(1), gpu_weight_map_, gpu_add_mask_);
|
||||
}
|
||||
cuda::copyMakeBorder(gpu_weight_map_, gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][0], top,
|
||||
bottom, left, right, BORDER_CONSTANT);
|
||||
for (int i = 0; i < num_bands_; ++i)
|
||||
cuda::pyrDown(gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i],
|
||||
gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i + 1]);
|
||||
}
|
||||
|
||||
int y_tl = tl_new.y - dst_roi_.y;
|
||||
int y_br = br_new.y - dst_roi_.y;
|
||||
int x_tl = tl_new.x - dst_roi_.x;
|
||||
int x_br = br_new.x - dst_roi_.x;
|
||||
|
||||
// Add weighted layer of the source image to the final Laplacian pyramid layer
|
||||
for (int i = 0; i <= num_bands_; ++i)
|
||||
{
|
||||
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
|
||||
cuda::GpuMat &_src_pyr_laplace = gpu_src_pyr_laplace_vec_[gpu_feed_idx_][i];
|
||||
cuda::GpuMat _dst_pyr_laplace = gpu_dst_pyr_laplace_[i](rc);
|
||||
cuda::GpuMat &_weight_pyr_gauss = gpu_weight_pyr_gauss_vec_[gpu_feed_idx_][i];
|
||||
cuda::GpuMat _dst_band_weights = gpu_dst_band_weights_[i](rc);
|
||||
|
||||
using namespace cv::cuda::device::blend;
|
||||
if (weight_type_ == CV_32F)
|
||||
{
|
||||
addSrcWeightGpu32F(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
|
||||
}
|
||||
else
|
||||
{
|
||||
addSrcWeightGpu16S(_src_pyr_laplace, _weight_pyr_gauss, _dst_pyr_laplace, _dst_band_weights, rc);
|
||||
}
|
||||
x_tl /= 2; y_tl /= 2;
|
||||
x_br /= 2; y_br /= 2;
|
||||
}
|
||||
++gpu_feed_idx_;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Create the source image Laplacian pyramid
|
||||
UMat img_with_border;
|
||||
copyMakeBorder(_img, img_with_border, top, bottom, left, right,
|
||||
BORDER_REFLECT);
|
||||
LOGLN(" Add border to the source image, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
t = getTickCount();
|
||||
#endif
|
||||
|
||||
std::vector<UMat> src_pyr_laplace;
|
||||
createLaplacePyr(img_with_border, num_bands_, src_pyr_laplace);
|
||||
|
||||
LOGLN(" Create the source image Laplacian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
t = getTickCount();
|
||||
#endif
|
||||
|
||||
// Create the weight map Gaussian pyramid
|
||||
UMat weight_map;
|
||||
std::vector<UMat> weight_pyr_gauss(num_bands_ + 1);
|
||||
|
||||
if (weight_type_ == CV_32F)
|
||||
{
|
||||
mask.getUMat().convertTo(weight_map, CV_32F, 1./255.);
|
||||
}
|
||||
else // weight_type_ == CV_16S
|
||||
{
|
||||
mask.getUMat().convertTo(weight_map, CV_16S);
|
||||
UMat add_mask;
|
||||
compare(mask, 0, add_mask, CMP_NE);
|
||||
add(weight_map, Scalar::all(1), weight_map, add_mask);
|
||||
}
|
||||
|
||||
copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, BORDER_CONSTANT);
|
||||
|
||||
for (int i = 0; i < num_bands_; ++i)
|
||||
pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);
|
||||
|
||||
LOGLN(" Create the weight map Gaussian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
t = getTickCount();
|
||||
#endif
|
||||
|
||||
int y_tl = tl_new.y - dst_roi_.y;
|
||||
int y_br = br_new.y - dst_roi_.y;
|
||||
int x_tl = tl_new.x - dst_roi_.x;
|
||||
int x_br = br_new.x - dst_roi_.x;
|
||||
|
||||
// Add weighted layer of the source image to the final Laplacian pyramid layer
|
||||
for (int i = 0; i <= num_bands_; ++i)
|
||||
{
|
||||
Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
|
||||
#ifdef HAVE_OPENCL
|
||||
if ( !cv::ocl::isOpenCLActivated() ||
|
||||
!ocl_MultiBandBlender_feed(src_pyr_laplace[i], weight_pyr_gauss[i],
|
||||
dst_pyr_laplace_[i](rc), dst_band_weights_[i](rc)) )
|
||||
#endif
|
||||
{
|
||||
Mat _src_pyr_laplace = src_pyr_laplace[i].getMat(ACCESS_READ);
|
||||
Mat _dst_pyr_laplace = dst_pyr_laplace_[i](rc).getMat(ACCESS_RW);
|
||||
Mat _weight_pyr_gauss = weight_pyr_gauss[i].getMat(ACCESS_READ);
|
||||
Mat _dst_band_weights = dst_band_weights_[i](rc).getMat(ACCESS_RW);
|
||||
if (weight_type_ == CV_32F)
|
||||
{
|
||||
for (int y = 0; y < rc.height; ++y)
|
||||
{
|
||||
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
|
||||
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
|
||||
const float* weight_row = _weight_pyr_gauss.ptr<float>(y);
|
||||
float* dst_weight_row = _dst_band_weights.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < rc.width; ++x)
|
||||
{
|
||||
dst_row[x].x += static_cast<short>(src_row[x].x * weight_row[x]);
|
||||
dst_row[x].y += static_cast<short>(src_row[x].y * weight_row[x]);
|
||||
dst_row[x].z += static_cast<short>(src_row[x].z * weight_row[x]);
|
||||
dst_weight_row[x] += weight_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
else // weight_type_ == CV_16S
|
||||
{
|
||||
for (int y = 0; y < y_br - y_tl; ++y)
|
||||
{
|
||||
const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y);
|
||||
Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y);
|
||||
const short* weight_row = _weight_pyr_gauss.ptr<short>(y);
|
||||
short* dst_weight_row = _dst_band_weights.ptr<short>(y);
|
||||
|
||||
for (int x = 0; x < x_br - x_tl; ++x)
|
||||
{
|
||||
dst_row[x].x += short((src_row[x].x * weight_row[x]) >> 8);
|
||||
dst_row[x].y += short((src_row[x].y * weight_row[x]) >> 8);
|
||||
dst_row[x].z += short((src_row[x].z * weight_row[x]) >> 8);
|
||||
dst_weight_row[x] += weight_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef HAVE_OPENCL
|
||||
else
|
||||
{
|
||||
CV_IMPL_ADD(CV_IMPL_OCL);
|
||||
}
|
||||
#endif
|
||||
|
||||
x_tl /= 2; y_tl /= 2;
|
||||
x_br /= 2; y_br /= 2;
|
||||
}
|
||||
|
||||
LOGLN(" Add weighted layer of the source image to the final Laplacian pyramid layer, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
}
|
||||
|
||||
|
||||
void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask)
|
||||
{
|
||||
Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height);
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
if (can_use_gpu_)
|
||||
{
|
||||
if (!gpu_initialized_)
|
||||
{
|
||||
gpu_ups_.push_back(std::vector<cuda::GpuMat>(num_bands_+1));
|
||||
}
|
||||
|
||||
for (int i = 0; i <= num_bands_; ++i)
|
||||
{
|
||||
cuda::GpuMat dst_i = gpu_dst_pyr_laplace_[i];
|
||||
cuda::GpuMat weight_i = gpu_dst_band_weights_[i];
|
||||
|
||||
using namespace ::cv::cuda::device::blend;
|
||||
if (weight_type_ == CV_32F)
|
||||
{
|
||||
normalizeUsingWeightMapGpu32F(weight_i, dst_i, weight_i.cols, weight_i.rows);
|
||||
}
|
||||
else
|
||||
{
|
||||
normalizeUsingWeightMapGpu16S(weight_i, dst_i, weight_i.cols, weight_i.rows);
|
||||
}
|
||||
}
|
||||
|
||||
// Restore image from Laplacian pyramid
|
||||
for (size_t i = num_bands_; i > 0; --i)
|
||||
{
|
||||
cuda::pyrUp(gpu_dst_pyr_laplace_[i], gpu_ups_[gpu_ups_.size()-1][num_bands_-i]);
|
||||
cuda::add(gpu_ups_[gpu_ups_.size()-1][num_bands_-i],
|
||||
gpu_dst_pyr_laplace_[i - 1],
|
||||
gpu_dst_pyr_laplace_[i - 1]);
|
||||
}
|
||||
|
||||
// If dst is GpuMat do masking on gpu and return dst as a GpuMat
|
||||
// else download the image to cpu and return it as an ordinary Mat
|
||||
if (dst.isGpuMat())
|
||||
{
|
||||
cuda::GpuMat &gpu_dst = dst.getGpuMatRef();
|
||||
|
||||
cuda::compare(gpu_dst_band_weights_[0](dst_rc), WEIGHT_EPS, gpu_dst_mask_, CMP_GT);
|
||||
|
||||
cuda::compare(gpu_dst_mask_, 0, gpu_mask_, CMP_EQ);
|
||||
|
||||
gpu_dst_pyr_laplace_[0](dst_rc).setTo(Scalar::all(0), gpu_mask_);
|
||||
gpu_dst_pyr_laplace_[0](dst_rc).convertTo(gpu_dst, CV_16S);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
gpu_dst_pyr_laplace_[0](dst_rc).download(dst_);
|
||||
Mat dst_band_weights_0;
|
||||
gpu_dst_band_weights_[0].download(dst_band_weights_0);
|
||||
|
||||
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
|
||||
Blender::blend(dst, dst_mask);
|
||||
}
|
||||
|
||||
// Set destination Mats to 0 so new image can be blended
|
||||
for (size_t i = 0; i < (size_t)(num_bands_ + 1); ++i)
|
||||
{
|
||||
gpu_dst_band_weights_[i].setTo(0);
|
||||
gpu_dst_pyr_laplace_[i].setTo(Scalar::all(0));
|
||||
}
|
||||
gpu_feed_idx_ = 0;
|
||||
gpu_initialized_ = true;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
cv::UMat dst_band_weights_0;
|
||||
|
||||
for (int i = 0; i <= num_bands_; ++i)
|
||||
normalizeUsingWeightMap(dst_band_weights_[i], dst_pyr_laplace_[i]);
|
||||
|
||||
restoreImageFromLaplacePyr(dst_pyr_laplace_);
|
||||
|
||||
dst_ = dst_pyr_laplace_[0](dst_rc);
|
||||
dst_band_weights_0 = dst_band_weights_[0];
|
||||
|
||||
dst_pyr_laplace_.clear();
|
||||
dst_band_weights_.clear();
|
||||
|
||||
compare(dst_band_weights_0(dst_rc), WEIGHT_EPS, dst_mask_, CMP_GT);
|
||||
|
||||
Blender::blend(dst, dst_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Auxiliary functions
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
static bool ocl_normalizeUsingWeightMap(InputArray _weight, InputOutputArray _mat)
|
||||
{
|
||||
String buildOptions = "-D DEFINE_normalizeUsingWeightMap";
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "mat", _mat);
|
||||
ocl::buildOptionsAddMatrixDescription(buildOptions, "weight", _weight);
|
||||
ocl::Kernel k("normalizeUsingWeightMap", ocl::stitching::multibandblend_oclsrc, buildOptions);
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
UMat mat = _mat.getUMat();
|
||||
|
||||
k.args(ocl::KernelArg::ReadWrite(mat),
|
||||
ocl::KernelArg::ReadOnly(_weight.getUMat())
|
||||
);
|
||||
|
||||
size_t globalsize[2] = {(size_t)mat.cols, (size_t)mat.rows };
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
void normalizeUsingWeightMap(InputArray _weight, InputOutputArray _src)
|
||||
{
|
||||
Mat src;
|
||||
Mat weight;
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
if ( !cv::ocl::isOpenCLActivated() ||
|
||||
!ocl_normalizeUsingWeightMap(_weight, _src) )
|
||||
#endif
|
||||
{
|
||||
src = _src.getMat();
|
||||
weight = _weight.getMat();
|
||||
|
||||
CV_Assert(src.type() == CV_16SC3);
|
||||
|
||||
if (weight.type() == CV_32FC1)
|
||||
{
|
||||
for (int y = 0; y < src.rows; ++y)
|
||||
{
|
||||
Point3_<short> *row = src.ptr<Point3_<short> >(y);
|
||||
const float *weight_row = weight.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < src.cols; ++x)
|
||||
{
|
||||
row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
|
||||
row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
|
||||
row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Assert(weight.type() == CV_16SC1);
|
||||
|
||||
for (int y = 0; y < src.rows; ++y)
|
||||
{
|
||||
const short *weight_row = weight.ptr<short>(y);
|
||||
Point3_<short> *row = src.ptr<Point3_<short> >(y);
|
||||
|
||||
for (int x = 0; x < src.cols; ++x)
|
||||
{
|
||||
int w = weight_row[x] + 1;
|
||||
row[x].x = static_cast<short>((row[x].x << 8) / w);
|
||||
row[x].y = static_cast<short>((row[x].y << 8) / w);
|
||||
row[x].z = static_cast<short>((row[x].z << 8) / w);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef HAVE_OPENCL
|
||||
else
|
||||
{
|
||||
CV_IMPL_ADD(CV_IMPL_OCL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void createWeightMap(InputArray mask, float sharpness, InputOutputArray weight)
|
||||
{
|
||||
CV_Assert(mask.type() == CV_8U);
|
||||
distanceTransform(mask, weight, DIST_L1, 3);
|
||||
UMat tmp;
|
||||
multiply(weight, sharpness, tmp);
|
||||
threshold(tmp, weight, 1.f, 1.f, THRESH_TRUNC);
|
||||
}
|
||||
|
||||
|
||||
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
|
||||
{
|
||||
pyr.resize(num_levels + 1);
|
||||
|
||||
if(img.depth() == CV_8U)
|
||||
{
|
||||
if(num_levels == 0)
|
||||
{
|
||||
img.getUMat().convertTo(pyr[0], CV_16S);
|
||||
return;
|
||||
}
|
||||
|
||||
UMat downNext;
|
||||
UMat current = img.getUMat();
|
||||
pyrDown(img, downNext);
|
||||
|
||||
for(int i = 1; i < num_levels; ++i)
|
||||
{
|
||||
UMat lvl_up;
|
||||
UMat lvl_down;
|
||||
|
||||
pyrDown(downNext, lvl_down);
|
||||
pyrUp(downNext, lvl_up, current.size());
|
||||
subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S);
|
||||
|
||||
current = downNext;
|
||||
downNext = lvl_down;
|
||||
}
|
||||
|
||||
{
|
||||
UMat lvl_up;
|
||||
pyrUp(downNext, lvl_up, current.size());
|
||||
subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S);
|
||||
|
||||
downNext.convertTo(pyr[num_levels], CV_16S);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pyr[0] = img.getUMat();
|
||||
for (int i = 0; i < num_levels; ++i)
|
||||
pyrDown(pyr[i], pyr[i + 1]);
|
||||
UMat tmp;
|
||||
for (int i = 0; i < num_levels; ++i)
|
||||
{
|
||||
pyrUp(pyr[i + 1], tmp, pyr[i].size());
|
||||
subtract(pyr[i], tmp, pyr[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void createLaplacePyrGpu(InputArray img, int num_levels, std::vector<UMat> &pyr)
|
||||
{
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
pyr.resize(num_levels + 1);
|
||||
|
||||
std::vector<cuda::GpuMat> gpu_pyr(num_levels + 1);
|
||||
gpu_pyr[0].upload(img);
|
||||
for (int i = 0; i < num_levels; ++i)
|
||||
cuda::pyrDown(gpu_pyr[i], gpu_pyr[i + 1]);
|
||||
|
||||
cuda::GpuMat tmp;
|
||||
for (int i = 0; i < num_levels; ++i)
|
||||
{
|
||||
cuda::pyrUp(gpu_pyr[i + 1], tmp);
|
||||
cuda::subtract(gpu_pyr[i], tmp, gpu_pyr[i]);
|
||||
gpu_pyr[i].download(pyr[i]);
|
||||
}
|
||||
|
||||
gpu_pyr[num_levels].download(pyr[num_levels]);
|
||||
#else
|
||||
CV_UNUSED(img);
|
||||
CV_UNUSED(num_levels);
|
||||
CV_UNUSED(pyr);
|
||||
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void restoreImageFromLaplacePyr(std::vector<UMat> &pyr)
|
||||
{
|
||||
if (pyr.empty())
|
||||
return;
|
||||
UMat tmp;
|
||||
for (size_t i = pyr.size() - 1; i > 0; --i)
|
||||
{
|
||||
pyrUp(pyr[i], tmp, pyr[i - 1].size());
|
||||
add(tmp, pyr[i - 1], pyr[i - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void restoreImageFromLaplacePyrGpu(std::vector<UMat> &pyr)
|
||||
{
|
||||
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING)
|
||||
if (pyr.empty())
|
||||
return;
|
||||
|
||||
std::vector<cuda::GpuMat> gpu_pyr(pyr.size());
|
||||
for (size_t i = 0; i < pyr.size(); ++i)
|
||||
gpu_pyr[i].upload(pyr[i]);
|
||||
|
||||
cuda::GpuMat tmp;
|
||||
for (size_t i = pyr.size() - 1; i > 0; --i)
|
||||
{
|
||||
cuda::pyrUp(gpu_pyr[i], tmp);
|
||||
cuda::add(tmp, gpu_pyr[i - 1], gpu_pyr[i - 1]);
|
||||
}
|
||||
|
||||
gpu_pyr[0].download(pyr[0]);
|
||||
#else
|
||||
CV_UNUSED(pyr);
|
||||
CV_Error(Error::StsNotImplemented, "CUDA optimization is unavailable");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
73
3rdparty/opencv-4.5.4/modules/stitching/src/camera.cpp
vendored
Normal file
73
3rdparty/opencv-4.5.4/modules/stitching/src/camera.cpp
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
CameraParams::CameraParams() : focal(1), aspect(1), ppx(0), ppy(0),
|
||||
R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
|
||||
|
||||
CameraParams::CameraParams(const CameraParams &other) { *this = other; }
|
||||
|
||||
CameraParams& CameraParams::operator =(const CameraParams &other)
|
||||
{
|
||||
focal = other.focal;
|
||||
ppx = other.ppx;
|
||||
ppy = other.ppy;
|
||||
aspect = other.aspect;
|
||||
R = other.R.clone();
|
||||
t = other.t.clone();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Mat CameraParams::K() const
|
||||
{
|
||||
Mat_<double> k = Mat::eye(3, 3, CV_64F);
|
||||
k(0,0) = focal; k(0,2) = ppx;
|
||||
k(1,1) = focal * aspect; k(1,2) = ppy;
|
||||
return std::move(k);
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
221
3rdparty/opencv-4.5.4/modules/stitching/src/cuda/build_warp_maps.cu
vendored
Normal file
221
3rdparty/opencv-4.5.4/modules/stitching/src/cuda/build_warp_maps.cu
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/vec_math.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
#include "opencv2/core/cuda/border_interpolate.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
// TODO use intrinsics like __sinf and so on
|
||||
|
||||
namespace build_warp_maps
|
||||
{
|
||||
|
||||
__constant__ float ck_rinv[9];
|
||||
__constant__ float cr_kinv[9];
|
||||
__constant__ float ct[3];
|
||||
__constant__ float cscale;
|
||||
}
|
||||
|
||||
|
||||
class PlaneMapper
|
||||
{
|
||||
public:
|
||||
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
using namespace build_warp_maps;
|
||||
|
||||
float x_ = u / cscale - ct[0];
|
||||
float y_ = v / cscale - ct[1];
|
||||
|
||||
float z;
|
||||
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
|
||||
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
|
||||
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
|
||||
|
||||
x /= z;
|
||||
y /= z;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class CylindricalMapper
|
||||
{
|
||||
public:
|
||||
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
using namespace build_warp_maps;
|
||||
|
||||
u /= cscale;
|
||||
float x_ = ::sinf(u);
|
||||
float y_ = v / cscale;
|
||||
float z_ = ::cosf(u);
|
||||
|
||||
float z;
|
||||
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
|
||||
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
|
||||
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
|
||||
|
||||
if (z > 0) { x /= z; y /= z; }
|
||||
else x = y = -1;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class SphericalMapper
|
||||
{
|
||||
public:
|
||||
static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
using namespace build_warp_maps;
|
||||
|
||||
v /= cscale;
|
||||
u /= cscale;
|
||||
|
||||
float sinv = ::sinf(v);
|
||||
float x_ = sinv * ::sinf(u);
|
||||
float y_ = -::cosf(v);
|
||||
float z_ = sinv * ::cosf(u);
|
||||
|
||||
float z;
|
||||
x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
|
||||
y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
|
||||
z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
|
||||
|
||||
if (z > 0) { x /= z; y /= z; }
|
||||
else x = y = -1;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename Mapper>
|
||||
__global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
|
||||
PtrStepf map_x, PtrStepf map_y)
|
||||
{
|
||||
int du = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int dv = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
if (du < cols && dv < rows)
|
||||
{
|
||||
float u = tl_u + du;
|
||||
float v = tl_v + dv;
|
||||
float x, y;
|
||||
Mapper::mapBackward(u, v, x, y);
|
||||
map_x.ptr(dv)[du] = x;
|
||||
map_y.ptr(dv)[du] = y;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], const float t[3],
|
||||
float scale, cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
|
||||
|
||||
int cols = map_x.cols;
|
||||
int rows = map_x.rows;
|
||||
|
||||
dim3 threads(32, 8);
|
||||
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
|
||||
|
||||
buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
if (stream == 0)
|
||||
cudaSafeCall(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
|
||||
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], float scale,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
|
||||
|
||||
int cols = map_x.cols;
|
||||
int rows = map_x.rows;
|
||||
|
||||
dim3 threads(32, 8);
|
||||
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
|
||||
|
||||
buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
if (stream == 0)
|
||||
cudaSafeCall(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
|
||||
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], float scale,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
|
||||
|
||||
int cols = map_x.cols;
|
||||
int rows = map_x.rows;
|
||||
|
||||
dim3 threads(32, 8);
|
||||
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
|
||||
|
||||
buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
if (stream == 0)
|
||||
cudaSafeCall(cudaDeviceSynchronize());
|
||||
}
|
||||
} // namespace imgproc
|
||||
}}} // namespace cv { namespace cuda { namespace cudev {
|
||||
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
112
3rdparty/opencv-4.5.4/modules/stitching/src/cuda/multiband_blend.cu
vendored
Normal file
112
3rdparty/opencv-4.5.4/modules/stitching/src/cuda/multiband_blend.cu
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/types.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace blend
|
||||
{
|
||||
__global__ void addSrcWeightKernel16S(const PtrStep<short> src, const PtrStep<short> src_weight,
|
||||
PtrStep<short> dst, PtrStep<short> dst_weight, int rows, int cols)
|
||||
{
|
||||
int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (y < rows && x < cols)
|
||||
{
|
||||
const short3 v = ((const short3*)src.ptr(y))[x];
|
||||
short w = src_weight.ptr(y)[x];
|
||||
((short3*)dst.ptr(y))[x].x += short((v.x * w) >> 8);
|
||||
((short3*)dst.ptr(y))[x].y += short((v.y * w) >> 8);
|
||||
((short3*)dst.ptr(y))[x].z += short((v.z * w) >> 8);
|
||||
dst_weight.ptr(y)[x] += w;
|
||||
}
|
||||
}
|
||||
|
||||
void addSrcWeightGpu16S(const PtrStep<short> src, const PtrStep<short> src_weight,
|
||||
PtrStep<short> dst, PtrStep<short> dst_weight, cv::Rect &rc)
|
||||
{
|
||||
dim3 threads(16, 16);
|
||||
dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y));
|
||||
addSrcWeightKernel16S<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width);
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
}
|
||||
|
||||
__global__ void addSrcWeightKernel32F(const PtrStep<short> src, const PtrStepf src_weight,
|
||||
PtrStep<short> dst, PtrStepf dst_weight, int rows, int cols)
|
||||
{
|
||||
int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (y < rows && x < cols)
|
||||
{
|
||||
const short3 v = ((const short3*)src.ptr(y))[x];
|
||||
float w = src_weight.ptr(y)[x];
|
||||
((short3*)dst.ptr(y))[x].x += static_cast<short>(v.x * w);
|
||||
((short3*)dst.ptr(y))[x].y += static_cast<short>(v.y * w);
|
||||
((short3*)dst.ptr(y))[x].z += static_cast<short>(v.z * w);
|
||||
dst_weight.ptr(y)[x] += w;
|
||||
}
|
||||
}
|
||||
|
||||
void addSrcWeightGpu32F(const PtrStep<short> src, const PtrStepf src_weight,
|
||||
PtrStep<short> dst, PtrStepf dst_weight, cv::Rect &rc)
|
||||
{
|
||||
dim3 threads(16, 16);
|
||||
dim3 grid(divUp(rc.width, threads.x), divUp(rc.height, threads.y));
|
||||
addSrcWeightKernel32F<<<grid, threads>>>(src, src_weight, dst, dst_weight, rc.height, rc.width);
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
}
|
||||
|
||||
__global__ void normalizeUsingWeightKernel16S(const PtrStep<short> weight, PtrStep<short> src,
|
||||
const int width, const int height)
|
||||
{
|
||||
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
|
||||
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
|
||||
|
||||
if (x < width && y < height)
|
||||
{
|
||||
const short3 v = ((short3*)src.ptr(y))[x];
|
||||
short w = weight.ptr(y)[x];
|
||||
((short3*)src.ptr(y))[x] = make_short3(short((v.x << 8) / w),
|
||||
short((v.y << 8) / w), short((v.z << 8) / w));
|
||||
}
|
||||
}
|
||||
|
||||
void normalizeUsingWeightMapGpu16S(const PtrStep<short> weight, PtrStep<short> src,
|
||||
const int width, const int height)
|
||||
{
|
||||
dim3 threads(16, 16);
|
||||
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
|
||||
normalizeUsingWeightKernel16S<<<grid, threads>>> (weight, src, width, height);
|
||||
}
|
||||
|
||||
__global__ void normalizeUsingWeightKernel32F(const PtrStepf weight, PtrStep<short> src,
|
||||
const int width, const int height)
|
||||
{
|
||||
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
|
||||
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
|
||||
|
||||
if (x < width && y < height)
|
||||
{
|
||||
const float WEIGHT_EPS = 1e-5f;
|
||||
const short3 v = ((short3*)src.ptr(y))[x];
|
||||
float w = weight.ptr(y)[x];
|
||||
((short3*)src.ptr(y))[x] = make_short3(static_cast<short>(v.x / (w + WEIGHT_EPS)),
|
||||
static_cast<short>(v.y / (w + WEIGHT_EPS)),
|
||||
static_cast<short>(v.z / (w + WEIGHT_EPS)));
|
||||
}
|
||||
}
|
||||
|
||||
void normalizeUsingWeightMapGpu32F(const PtrStepf weight, PtrStep<short> src,
|
||||
const int width, const int height)
|
||||
{
|
||||
dim3 threads(16, 16);
|
||||
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
|
||||
normalizeUsingWeightKernel32F<<<grid, threads>>> (weight, src, width, height);
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
#endif
|
619
3rdparty/opencv-4.5.4/modules/stitching/src/exposure_compensate.cpp
vendored
Normal file
619
3rdparty/opencv-4.5.4/modules/stitching/src/exposure_compensate.cpp
vendored
Normal file
@ -0,0 +1,619 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#ifdef HAVE_EIGEN
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/Dense>
|
||||
#endif
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
|
||||
{
|
||||
Ptr<ExposureCompensator> e;
|
||||
if (type == NO)
|
||||
e = makePtr<NoExposureCompensator>();
|
||||
else if (type == GAIN)
|
||||
e = makePtr<GainCompensator>();
|
||||
else if (type == GAIN_BLOCKS)
|
||||
e = makePtr<BlocksGainCompensator>();
|
||||
else if (type == CHANNELS)
|
||||
e = makePtr<ChannelsCompensator>();
|
||||
else if (type == CHANNELS_BLOCKS)
|
||||
e = makePtr<BlocksChannelsCompensator>();
|
||||
|
||||
if (e.get() != nullptr)
|
||||
return e;
|
||||
|
||||
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
|
||||
}
|
||||
|
||||
|
||||
void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<UMat> &masks)
|
||||
{
|
||||
std::vector<std::pair<UMat,uchar> > level_masks;
|
||||
for (size_t i = 0; i < masks.size(); ++i)
|
||||
level_masks.push_back(std::make_pair(masks[i], (uchar)255));
|
||||
feed(corners, images, level_masks);
|
||||
}
|
||||
|
||||
|
||||
void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
LOGLN("Exposure compensation...");
|
||||
#if ENABLE_LOG
|
||||
int64 t = getTickCount();
|
||||
#endif
|
||||
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
Mat accumulated_gains;
|
||||
prepareSimilarityMask(corners, images);
|
||||
|
||||
for (int n = 0; n < nr_feeds_; ++n)
|
||||
{
|
||||
if (n > 0)
|
||||
{
|
||||
// Apply previous iteration gains
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
apply(i, corners[i], images[i], masks[i].first);
|
||||
}
|
||||
|
||||
singleFeed(corners, images, masks);
|
||||
|
||||
if (n == 0)
|
||||
accumulated_gains = gains_.clone();
|
||||
else
|
||||
multiply(accumulated_gains, gains_, accumulated_gains);
|
||||
}
|
||||
gains_ = accumulated_gains;
|
||||
|
||||
LOGLN("Exposure compensation, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
}
|
||||
|
||||
void GainCompensator::singleFeed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
CV_Assert(corners.size() == images.size() && images.size() == masks.size());
|
||||
|
||||
if (images.size() == 0)
|
||||
return;
|
||||
|
||||
const int num_channels = images[0].channels();
|
||||
CV_Assert(std::all_of(images.begin(), images.end(),
|
||||
[num_channels](const UMat& image) { return image.channels() == num_channels; }));
|
||||
CV_Assert(num_channels == 1 || num_channels == 3);
|
||||
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
Mat_<int> N(num_images, num_images); N.setTo(0);
|
||||
Mat_<double> I(num_images, num_images); I.setTo(0);
|
||||
Mat_<bool> skip(num_images, 1); skip.setTo(true);
|
||||
|
||||
Mat subimg1, subimg2;
|
||||
Mat_<uchar> submask1, submask2, intersect;
|
||||
|
||||
std::vector<UMat>::iterator similarity_it = similarities_.begin();
|
||||
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
for (int j = i; j < num_images; ++j)
|
||||
{
|
||||
Rect roi;
|
||||
if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
|
||||
{
|
||||
subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
|
||||
subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
|
||||
|
||||
submask1 = masks[i].first(Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
|
||||
submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
|
||||
intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second);
|
||||
|
||||
if (!similarities_.empty())
|
||||
{
|
||||
CV_Assert(similarity_it != similarities_.end());
|
||||
UMat similarity = *similarity_it++;
|
||||
// in-place operation has an issue. don't remove the swap
|
||||
// detail https://github.com/opencv/opencv/issues/19184
|
||||
Mat_<uchar> intersect_updated;
|
||||
bitwise_and(intersect, similarity, intersect_updated);
|
||||
std::swap(intersect, intersect_updated);
|
||||
}
|
||||
|
||||
int intersect_count = countNonZero(intersect);
|
||||
N(i, j) = N(j, i) = std::max(1, intersect_count);
|
||||
|
||||
// Don't compute Isums if subimages do not intersect anyway
|
||||
if (intersect_count == 0)
|
||||
continue;
|
||||
|
||||
// Don't skip images that intersect with at least one other image
|
||||
if (i != j)
|
||||
{
|
||||
skip(i, 0) = false;
|
||||
skip(j, 0) = false;
|
||||
}
|
||||
|
||||
double Isum1 = 0, Isum2 = 0;
|
||||
for (int y = 0; y < roi.height; ++y)
|
||||
{
|
||||
if (num_channels == 3)
|
||||
{
|
||||
const Vec<uchar, 3>* r1 = subimg1.ptr<Vec<uchar, 3> >(y);
|
||||
const Vec<uchar, 3>* r2 = subimg2.ptr<Vec<uchar, 3> >(y);
|
||||
for (int x = 0; x < roi.width; ++x)
|
||||
{
|
||||
if (intersect(y, x))
|
||||
{
|
||||
Isum1 += norm(r1[x]);
|
||||
Isum2 += norm(r2[x]);
|
||||
}
|
||||
}
|
||||
}
|
||||
else // if (num_channels == 1)
|
||||
{
|
||||
const uchar* r1 = subimg1.ptr<uchar>(y);
|
||||
const uchar* r2 = subimg2.ptr<uchar>(y);
|
||||
for (int x = 0; x < roi.width; ++x)
|
||||
{
|
||||
if (intersect(y, x))
|
||||
{
|
||||
Isum1 += r1[x];
|
||||
Isum2 += r2[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
I(i, j) = Isum1 / N(i, j);
|
||||
I(j, i) = Isum2 / N(i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (getUpdateGain() || gains_.rows != num_images)
|
||||
{
|
||||
double alpha = 0.01;
|
||||
double beta = 100;
|
||||
int num_eq = num_images - countNonZero(skip);
|
||||
gains_.create(num_images, 1);
|
||||
gains_.setTo(1);
|
||||
|
||||
// No image process, gains are all set to one, stop here
|
||||
if (num_eq == 0)
|
||||
return;
|
||||
|
||||
Mat_<double> A(num_eq, num_eq); A.setTo(0);
|
||||
Mat_<double> b(num_eq, 1); b.setTo(0);
|
||||
for (int i = 0, ki = 0; i < num_images; ++i)
|
||||
{
|
||||
if (skip(i, 0))
|
||||
continue;
|
||||
|
||||
for (int j = 0, kj = 0; j < num_images; ++j)
|
||||
{
|
||||
if (skip(j, 0))
|
||||
continue;
|
||||
|
||||
b(ki, 0) += beta * N(i, j);
|
||||
A(ki, ki) += beta * N(i, j);
|
||||
if (j != i)
|
||||
{
|
||||
A(ki, ki) += 2 * alpha * I(i, j) * I(i, j) * N(i, j);
|
||||
A(ki, kj) -= 2 * alpha * I(i, j) * I(j, i) * N(i, j);
|
||||
}
|
||||
++kj;
|
||||
}
|
||||
++ki;
|
||||
}
|
||||
|
||||
Mat_<double> l_gains;
|
||||
|
||||
#ifdef HAVE_EIGEN
|
||||
Eigen::MatrixXf eigen_A, eigen_b, eigen_x;
|
||||
cv2eigen(A, eigen_A);
|
||||
cv2eigen(b, eigen_b);
|
||||
|
||||
Eigen::LLT<Eigen::MatrixXf> solver(eigen_A);
|
||||
#if ENABLE_LOG
|
||||
if (solver.info() != Eigen::ComputationInfo::Success)
|
||||
LOGLN("Failed to solve exposure compensation system");
|
||||
#endif
|
||||
eigen_x = solver.solve(eigen_b);
|
||||
|
||||
Mat_<float> l_gains_float;
|
||||
eigen2cv(eigen_x, l_gains_float);
|
||||
l_gains_float.convertTo(l_gains, CV_64FC1);
|
||||
#else
|
||||
solve(A, b, l_gains);
|
||||
#endif
|
||||
CV_CheckTypeEQ(l_gains.type(), CV_64FC1, "");
|
||||
|
||||
for (int i = 0, j = 0; i < num_images; ++i)
|
||||
{
|
||||
// Only assign non-skipped gains. Other gains are already set to 1
|
||||
if (!skip(i, 0))
|
||||
gains_.at<double>(i, 0) = l_gains(j++, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GainCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
multiply(image, gains_(index, 0), image);
|
||||
}
|
||||
|
||||
|
||||
std::vector<double> GainCompensator::gains() const
|
||||
{
|
||||
std::vector<double> gains_vec(gains_.rows);
|
||||
for (int i = 0; i < gains_.rows; ++i)
|
||||
gains_vec[i] = gains_(i, 0);
|
||||
return gains_vec;
|
||||
}
|
||||
|
||||
void GainCompensator::getMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
umv.clear();
|
||||
for (int i = 0; i < gains_.rows; ++i)
|
||||
umv.push_back(Mat(1,1,CV_64FC1,Scalar(gains_(i, 0))));
|
||||
}
|
||||
void GainCompensator::setMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
gains_=Mat_<double>(static_cast<int>(umv.size()),1);
|
||||
for (int i = 0; i < static_cast<int>(umv.size()); i++)
|
||||
{
|
||||
int type = umv[i].type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
||||
CV_CheckType(type, depth == CV_64F && cn == 1, "Only double images are supported for gain");
|
||||
CV_Assert(umv[i].rows == 1 && umv[i].cols == 1);
|
||||
gains_(i, 0) = umv[i].at<double>(0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void GainCompensator::prepareSimilarityMask(
|
||||
const std::vector<Point> &corners, const std::vector<UMat> &images)
|
||||
{
|
||||
if (similarity_threshold_ >= 1)
|
||||
{
|
||||
LOGLN(" skipping similarity mask: disabled");
|
||||
return;
|
||||
}
|
||||
if (!similarities_.empty())
|
||||
{
|
||||
LOGLN(" skipping similarity mask: already set");
|
||||
return;
|
||||
}
|
||||
|
||||
LOGLN(" calculating similarity mask");
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
for (int j = i; j < num_images; ++j)
|
||||
{
|
||||
Rect roi;
|
||||
if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
|
||||
{
|
||||
UMat subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i]));
|
||||
UMat subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j]));
|
||||
UMat similarity = buildSimilarityMask(subimg1, subimg2);
|
||||
similarities_.push_back(similarity);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UMat GainCompensator::buildSimilarityMask(InputArray src_array1, InputArray src_array2)
|
||||
{
|
||||
CV_Assert(src_array1.rows() == src_array2.rows() && src_array1.cols() == src_array2.cols());
|
||||
CV_Assert(src_array1.type() == src_array2.type());
|
||||
CV_Assert(src_array1.type() == CV_8UC3 || src_array1.type() == CV_8UC1);
|
||||
|
||||
Mat src1 = src_array1.getMat();
|
||||
Mat src2 = src_array2.getMat();
|
||||
|
||||
UMat umat_similarity(src1.rows, src1.cols, CV_8UC1);
|
||||
Mat similarity = umat_similarity.getMat(ACCESS_WRITE);
|
||||
|
||||
if (src1.channels() == 3)
|
||||
{
|
||||
for (int y = 0; y < similarity.rows; ++y)
|
||||
{
|
||||
for (int x = 0; x < similarity.cols; ++x)
|
||||
{
|
||||
Vec<float, 3> vec_diff =
|
||||
Vec<float, 3>(*src1.ptr<Vec<uchar, 3>>(y, x))
|
||||
- Vec<float, 3>(*src2.ptr<Vec<uchar, 3>>(y, x));
|
||||
double diff = norm(vec_diff * (1.f / 255.f));
|
||||
|
||||
*similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else // if (src1.channels() == 1)
|
||||
{
|
||||
for (int y = 0; y < similarity.rows; ++y)
|
||||
{
|
||||
for (int x = 0; x < similarity.cols; ++x)
|
||||
{
|
||||
float diff = std::abs(static_cast<int>(*src1.ptr<uchar>(y, x))
|
||||
- static_cast<int>(*src2.ptr<uchar>(y, x))) / 255.f;
|
||||
|
||||
*similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
similarity.release();
|
||||
|
||||
Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
|
||||
UMat umat_erode;
|
||||
erode(umat_similarity, umat_erode, kernel);
|
||||
dilate(umat_erode, umat_similarity, kernel);
|
||||
|
||||
return umat_similarity;
|
||||
}
|
||||
|
||||
void ChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
std::array<std::vector<UMat>, 3> images_channels;
|
||||
|
||||
// Split channels of each input image
|
||||
for (const UMat& image: images)
|
||||
{
|
||||
std::vector<UMat> image_channels;
|
||||
image_channels.resize(3);
|
||||
split(image, image_channels);
|
||||
|
||||
for (int i = 0; i < int(images_channels.size()); ++i)
|
||||
images_channels[i].emplace_back(std::move(image_channels[i]));
|
||||
}
|
||||
|
||||
// For each channel, feed the channel of each image in a GainCompensator
|
||||
gains_.clear();
|
||||
gains_.resize(images.size());
|
||||
|
||||
GainCompensator compensator(getNrFeeds());
|
||||
compensator.setSimilarityThreshold(getSimilarityThreshold());
|
||||
compensator.prepareSimilarityMask(corners, images);
|
||||
|
||||
for (int c = 0; c < 3; ++c)
|
||||
{
|
||||
const std::vector<UMat>& channels = images_channels[c];
|
||||
|
||||
compensator.feed(corners, channels, masks);
|
||||
|
||||
std::vector<double> gains = compensator.gains();
|
||||
for (int i = 0; i < int(gains.size()); ++i)
|
||||
gains_.at(i)[c] = gains[i];
|
||||
}
|
||||
}
|
||||
|
||||
void ChannelsCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
multiply(image, gains_.at(index), image);
|
||||
}
|
||||
|
||||
void ChannelsCompensator::getMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
umv.clear();
|
||||
for (int i = 0; i < static_cast<int>(gains_.size()); ++i)
|
||||
{
|
||||
Mat m;
|
||||
Mat(gains_[i]).copyTo(m);
|
||||
umv.push_back(m);
|
||||
}
|
||||
}
|
||||
|
||||
void ChannelsCompensator::setMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
for (int i = 0; i < static_cast<int>(umv.size()); i++)
|
||||
{
|
||||
Scalar s;
|
||||
umv[i].copyTo(s);
|
||||
gains_.push_back(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class Compensator>
|
||||
void BlocksCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
CV_Assert(corners.size() == images.size() && images.size() == masks.size());
|
||||
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
|
||||
std::vector<Size> bl_per_imgs(num_images);
|
||||
std::vector<Point> block_corners;
|
||||
std::vector<UMat> block_images;
|
||||
std::vector<std::pair<UMat,uchar> > block_masks;
|
||||
|
||||
// Construct blocks for gain compensator
|
||||
for (int img_idx = 0; img_idx < num_images; ++img_idx)
|
||||
{
|
||||
Size bl_per_img((images[img_idx].cols + bl_width_ - 1) / bl_width_,
|
||||
(images[img_idx].rows + bl_height_ - 1) / bl_height_);
|
||||
int bl_width = (images[img_idx].cols + bl_per_img.width - 1) / bl_per_img.width;
|
||||
int bl_height = (images[img_idx].rows + bl_per_img.height - 1) / bl_per_img.height;
|
||||
bl_per_imgs[img_idx] = bl_per_img;
|
||||
for (int by = 0; by < bl_per_img.height; ++by)
|
||||
{
|
||||
for (int bx = 0; bx < bl_per_img.width; ++bx)
|
||||
{
|
||||
Point bl_tl(bx * bl_width, by * bl_height);
|
||||
Point bl_br(std::min(bl_tl.x + bl_width, images[img_idx].cols),
|
||||
std::min(bl_tl.y + bl_height, images[img_idx].rows));
|
||||
|
||||
block_corners.push_back(corners[img_idx] + bl_tl);
|
||||
block_images.push_back(images[img_idx](Rect(bl_tl, bl_br)));
|
||||
block_masks.push_back(std::make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)),
|
||||
masks[img_idx].second));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (getUpdateGain() || int(gain_maps_.size()) != num_images)
|
||||
{
|
||||
Compensator compensator;
|
||||
compensator.setNrFeeds(getNrFeeds());
|
||||
compensator.setSimilarityThreshold(getSimilarityThreshold());
|
||||
compensator.feed(block_corners, block_images, block_masks);
|
||||
|
||||
gain_maps_.clear();
|
||||
gain_maps_.resize(num_images);
|
||||
|
||||
Mat_<float> ker(1, 3);
|
||||
ker(0, 0) = 0.25; ker(0, 1) = 0.5; ker(0, 2) = 0.25;
|
||||
|
||||
int bl_idx = 0;
|
||||
for (int img_idx = 0; img_idx < num_images; ++img_idx)
|
||||
{
|
||||
Size bl_per_img = bl_per_imgs[img_idx];
|
||||
UMat gain_map = getGainMap(compensator, bl_idx, bl_per_img);
|
||||
bl_idx += bl_per_img.width*bl_per_img.height;
|
||||
|
||||
for (int i=0; i<nr_gain_filtering_iterations_; ++i)
|
||||
{
|
||||
UMat tmp;
|
||||
sepFilter2D(gain_map, tmp, CV_32F, ker, ker);
|
||||
swap(gain_map, tmp);
|
||||
}
|
||||
|
||||
gain_maps_[img_idx] = gain_map;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
UMat BlocksCompensator::getGainMap(const GainCompensator& compensator, int bl_idx, Size bl_per_img)
|
||||
{
|
||||
std::vector<double> gains = compensator.gains();
|
||||
|
||||
UMat u_gain_map(bl_per_img, CV_32F);
|
||||
Mat_<float> gain_map = u_gain_map.getMat(ACCESS_WRITE);
|
||||
|
||||
for (int by = 0; by < bl_per_img.height; ++by)
|
||||
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
|
||||
gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
|
||||
|
||||
return u_gain_map;
|
||||
}
|
||||
|
||||
UMat BlocksCompensator::getGainMap(const ChannelsCompensator& compensator, int bl_idx, Size bl_per_img)
|
||||
{
|
||||
std::vector<Scalar> gains = compensator.gains();
|
||||
|
||||
UMat u_gain_map(bl_per_img, CV_32FC3);
|
||||
Mat_<Vec3f> gain_map = u_gain_map.getMat(ACCESS_WRITE);
|
||||
|
||||
for (int by = 0; by < bl_per_img.height; ++by)
|
||||
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
|
||||
for (int c = 0; c < 3; ++c)
|
||||
gain_map(by, bx)[c] = static_cast<float>(gains[bl_idx][c]);
|
||||
|
||||
return u_gain_map;
|
||||
}
|
||||
|
||||
void BlocksCompensator::apply(int index, Point /*corner*/, InputOutputArray _image, InputArray /*mask*/)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
CV_Assert(_image.type() == CV_8UC3);
|
||||
|
||||
UMat u_gain_map;
|
||||
if (gain_maps_.at(index).size() == _image.size())
|
||||
u_gain_map = gain_maps_.at(index);
|
||||
else
|
||||
resize(gain_maps_.at(index), u_gain_map, _image.size(), 0, 0, INTER_LINEAR);
|
||||
|
||||
if (u_gain_map.channels() != 3)
|
||||
{
|
||||
std::vector<UMat> gains_channels;
|
||||
gains_channels.push_back(u_gain_map);
|
||||
gains_channels.push_back(u_gain_map);
|
||||
gains_channels.push_back(u_gain_map);
|
||||
merge(gains_channels, u_gain_map);
|
||||
}
|
||||
|
||||
multiply(_image, u_gain_map, _image, 1, _image.type());
|
||||
}
|
||||
|
||||
void BlocksCompensator::getMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
umv.clear();
|
||||
for (int i = 0; i < static_cast<int>(gain_maps_.size()); ++i)
|
||||
{
|
||||
Mat m;
|
||||
gain_maps_[i].copyTo(m);
|
||||
umv.push_back(m);
|
||||
}
|
||||
}
|
||||
|
||||
void BlocksCompensator::setMatGains(std::vector<Mat>& umv)
|
||||
{
|
||||
for (int i = 0; i < static_cast<int>(umv.size()); i++)
|
||||
{
|
||||
UMat m;
|
||||
umv[i].copyTo(m);
|
||||
gain_maps_.push_back(m);
|
||||
}
|
||||
}
|
||||
|
||||
void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
BlocksCompensator::feed<GainCompensator>(corners, images, masks);
|
||||
}
|
||||
|
||||
void BlocksChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
|
||||
const std::vector<std::pair<UMat,uchar> > &masks)
|
||||
{
|
||||
BlocksCompensator::feed<ChannelsCompensator>(corners, images, masks);
|
||||
}
|
||||
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
567
3rdparty/opencv-4.5.4/modules/stitching/src/matchers.cpp
vendored
Normal file
567
3rdparty/opencv-4.5.4/modules/stitching/src/matchers.cpp
vendored
Normal file
@ -0,0 +1,567 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "opencv2/core/opencl/ocl_defs.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::detail;
|
||||
using namespace cv::cuda;
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAIMGPROC
|
||||
# include "opencv2/cudaimgproc.hpp"
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
struct DistIdxPair
|
||||
{
|
||||
bool operator<(const DistIdxPair &other) const { return dist < other.dist; }
|
||||
double dist;
|
||||
int idx;
|
||||
};
|
||||
|
||||
|
||||
struct MatchPairsBody : ParallelLoopBody
|
||||
{
|
||||
MatchPairsBody(FeaturesMatcher &_matcher, const std::vector<ImageFeatures> &_features,
|
||||
std::vector<MatchesInfo> &_pairwise_matches, std::vector<std::pair<int,int> > &_near_pairs)
|
||||
: matcher(_matcher), features(_features),
|
||||
pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}
|
||||
|
||||
void operator ()(const Range &r) const CV_OVERRIDE
|
||||
{
|
||||
cv::RNG rng = cv::theRNG(); // save entry rng state
|
||||
const int num_images = static_cast<int>(features.size());
|
||||
for (int i = r.start; i < r.end; ++i)
|
||||
{
|
||||
cv::theRNG() = cv::RNG(rng.state + i); // force "stable" RNG seed for each processed pair
|
||||
|
||||
int from = near_pairs[i].first;
|
||||
int to = near_pairs[i].second;
|
||||
int pair_idx = from*num_images + to;
|
||||
|
||||
matcher(features[from], features[to], pairwise_matches[pair_idx]);
|
||||
pairwise_matches[pair_idx].src_img_idx = from;
|
||||
pairwise_matches[pair_idx].dst_img_idx = to;
|
||||
|
||||
size_t dual_pair_idx = to*num_images + from;
|
||||
|
||||
pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
|
||||
pairwise_matches[dual_pair_idx].src_img_idx = to;
|
||||
pairwise_matches[dual_pair_idx].dst_img_idx = from;
|
||||
|
||||
if (!pairwise_matches[pair_idx].H.empty())
|
||||
pairwise_matches[dual_pair_idx].H = pairwise_matches[pair_idx].H.inv();
|
||||
|
||||
for (size_t j = 0; j < pairwise_matches[dual_pair_idx].matches.size(); ++j)
|
||||
std::swap(pairwise_matches[dual_pair_idx].matches[j].queryIdx,
|
||||
pairwise_matches[dual_pair_idx].matches[j].trainIdx);
|
||||
LOG(".");
|
||||
}
|
||||
}
|
||||
|
||||
FeaturesMatcher &matcher;
|
||||
const std::vector<ImageFeatures> &features;
|
||||
std::vector<MatchesInfo> &pairwise_matches;
|
||||
std::vector<std::pair<int,int> > &near_pairs;
|
||||
|
||||
private:
|
||||
void operator =(const MatchPairsBody&);
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef std::set<std::pair<int,int> > MatchesSet;
|
||||
|
||||
// These two classes are aimed to find features matches only, not to
|
||||
// estimate homography
|
||||
|
||||
class CpuMatcher CV_FINAL : public FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
CpuMatcher(float match_conf) : FeaturesMatcher(true), match_conf_(match_conf) {}
|
||||
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) CV_OVERRIDE;
|
||||
|
||||
private:
|
||||
float match_conf_;
|
||||
};
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
||||
class GpuMatcher CV_FINAL : public FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
GpuMatcher(float match_conf) : match_conf_(match_conf) {}
|
||||
void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
|
||||
|
||||
void collectGarbage();
|
||||
|
||||
private:
|
||||
float match_conf_;
|
||||
GpuMat descriptors1_, descriptors2_;
|
||||
GpuMat train_idx_, distance_, all_dist_;
|
||||
std::vector< std::vector<DMatch> > pair_matches;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
CV_Assert(features1.descriptors.type() == features2.descriptors.type());
|
||||
CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
|
||||
|
||||
matches_info.matches.clear();
|
||||
|
||||
Ptr<cv::DescriptorMatcher> matcher;
|
||||
#if 0 // TODO check this
|
||||
if (ocl::isOpenCLActivated())
|
||||
{
|
||||
matcher = makePtr<BFMatcher>((int)NORM_L2);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
|
||||
Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
|
||||
|
||||
if (features2.descriptors.depth() == CV_8U)
|
||||
{
|
||||
indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
|
||||
searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
|
||||
}
|
||||
|
||||
matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
|
||||
}
|
||||
std::vector< std::vector<DMatch> > pair_matches;
|
||||
MatchesSet matches;
|
||||
|
||||
// Find 1->2 matches
|
||||
matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
{
|
||||
matches_info.matches.push_back(m0);
|
||||
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
|
||||
}
|
||||
}
|
||||
LOG("\n1->2 matches: " << matches_info.matches.size() << endl);
|
||||
|
||||
// Find 2->1 matches
|
||||
pair_matches.clear();
|
||||
matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
|
||||
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
||||
}
|
||||
LOG("1->2 & 2->1 matches: " << matches_info.matches.size() << endl);
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
||||
void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
matches_info.matches.clear();
|
||||
|
||||
ensureSizeIsEnough(features1.descriptors.size(), features1.descriptors.type(), descriptors1_);
|
||||
ensureSizeIsEnough(features2.descriptors.size(), features2.descriptors.type(), descriptors2_);
|
||||
|
||||
descriptors1_.upload(features1.descriptors);
|
||||
descriptors2_.upload(features2.descriptors);
|
||||
|
||||
//TODO: NORM_L1 allows to avoid matcher crashes for ORB features, but is not absolutely correct for them.
|
||||
// The best choice for ORB features is NORM_HAMMING, but it is incorrect for SURF features.
|
||||
// More accurate fix in this place should be done in the future -- the type of the norm
|
||||
// should be either a parameter of this method, or a field of the class.
|
||||
Ptr<cuda::DescriptorMatcher> matcher = cuda::DescriptorMatcher::createBFMatcher(NORM_L1);
|
||||
|
||||
MatchesSet matches;
|
||||
|
||||
// Find 1->2 matches
|
||||
pair_matches.clear();
|
||||
matcher->knnMatch(descriptors1_, descriptors2_, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
{
|
||||
matches_info.matches.push_back(m0);
|
||||
matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
|
||||
}
|
||||
}
|
||||
|
||||
// Find 2->1 matches
|
||||
pair_matches.clear();
|
||||
matcher->knnMatch(descriptors2_, descriptors1_, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
|
||||
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
||||
}
|
||||
}
|
||||
|
||||
void GpuMatcher::collectGarbage()
|
||||
{
|
||||
descriptors1_.release();
|
||||
descriptors2_.release();
|
||||
train_idx_.release();
|
||||
distance_.release();
|
||||
all_dist_.release();
|
||||
std::vector< std::vector<DMatch> >().swap(pair_matches);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
void computeImageFeatures(
|
||||
const Ptr<Feature2D> &featuresFinder,
|
||||
InputArrayOfArrays images,
|
||||
std::vector<ImageFeatures> &features,
|
||||
InputArrayOfArrays masks)
|
||||
{
|
||||
// compute all features
|
||||
std::vector<std::vector<KeyPoint>> keypoints;
|
||||
std::vector<UMat> descriptors;
|
||||
// TODO replace with 1 call to new over load of detectAndCompute
|
||||
featuresFinder->detect(images, keypoints, masks);
|
||||
featuresFinder->compute(images, keypoints, descriptors);
|
||||
|
||||
// store to ImageFeatures
|
||||
size_t count = images.total();
|
||||
features.resize(count);
|
||||
CV_Assert(count == keypoints.size() && count == descriptors.size());
|
||||
for (size_t i = 0; i < count; ++i)
|
||||
{
|
||||
features[i].img_size = images.size(int(i));
|
||||
features[i].keypoints = std::move(keypoints[i]);
|
||||
features[i].descriptors = std::move(descriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void computeImageFeatures(
|
||||
const Ptr<Feature2D> &featuresFinder,
|
||||
InputArray image,
|
||||
ImageFeatures &features,
|
||||
InputArray mask)
|
||||
{
|
||||
features.img_size = image.size();
|
||||
featuresFinder->detectAndCompute(image, mask, features.keypoints, features.descriptors);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0), confidence(0) {}
|
||||
|
||||
MatchesInfo::MatchesInfo(const MatchesInfo &other) { *this = other; }
|
||||
|
||||
MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
|
||||
{
|
||||
src_img_idx = other.src_img_idx;
|
||||
dst_img_idx = other.dst_img_idx;
|
||||
matches = other.matches;
|
||||
inliers_mask = other.inliers_mask;
|
||||
num_inliers = other.num_inliers;
|
||||
H = other.H.clone();
|
||||
confidence = other.confidence;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
|
||||
const UMat &mask)
|
||||
{
|
||||
const int num_images = static_cast<int>(features.size());
|
||||
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
|
||||
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
|
||||
if (mask_.empty())
|
||||
mask_ = Mat::ones(num_images, num_images, CV_8U);
|
||||
|
||||
std::vector<std::pair<int,int> > near_pairs;
|
||||
for (int i = 0; i < num_images - 1; ++i)
|
||||
for (int j = i + 1; j < num_images; ++j)
|
||||
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
|
||||
near_pairs.push_back(std::make_pair(i, j));
|
||||
|
||||
pairwise_matches.clear(); // clear history values
|
||||
pairwise_matches.resize(num_images * num_images);
|
||||
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
||||
|
||||
if (is_thread_safe_)
|
||||
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
|
||||
else
|
||||
body(Range(0, static_cast<int>(near_pairs.size())));
|
||||
LOGLN_CHAT("");
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
|
||||
{
|
||||
CV_UNUSED(try_use_gpu);
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
||||
if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
|
||||
{
|
||||
impl_ = makePtr<GpuMatcher>(match_conf);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
impl_ = makePtr<CpuMatcher>(match_conf);
|
||||
}
|
||||
|
||||
is_thread_safe_ = impl_->isThreadSafe();
|
||||
num_matches_thresh1_ = num_matches_thresh1;
|
||||
num_matches_thresh2_ = num_matches_thresh2;
|
||||
}
|
||||
|
||||
Ptr<BestOf2NearestMatcher> BestOf2NearestMatcher::create(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
|
||||
{
|
||||
return makePtr<BestOf2NearestMatcher>(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
|
||||
MatchesInfo &matches_info)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
(*impl_)(features1, features2, matches_info);
|
||||
|
||||
// Check if it makes sense to find homography
|
||||
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
|
||||
return;
|
||||
|
||||
// Construct point-point correspondences for homography estimation
|
||||
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
||||
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
||||
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
||||
{
|
||||
const DMatch& m = matches_info.matches[i];
|
||||
|
||||
Point2f p = features1.keypoints[m.queryIdx].pt;
|
||||
p.x -= features1.img_size.width * 0.5f;
|
||||
p.y -= features1.img_size.height * 0.5f;
|
||||
src_points.at<Point2f>(0, static_cast<int>(i)) = p;
|
||||
|
||||
p = features2.keypoints[m.trainIdx].pt;
|
||||
p.x -= features2.img_size.width * 0.5f;
|
||||
p.y -= features2.img_size.height * 0.5f;
|
||||
dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
|
||||
}
|
||||
|
||||
// Find pair-wise motion
|
||||
matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
|
||||
if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
|
||||
return;
|
||||
|
||||
// Find number of inliers
|
||||
matches_info.num_inliers = 0;
|
||||
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
|
||||
if (matches_info.inliers_mask[i])
|
||||
matches_info.num_inliers++;
|
||||
|
||||
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
|
||||
// using Invariant Features"
|
||||
matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
|
||||
|
||||
// Set zero confidence to remove matches between too close images, as they don't provide
|
||||
// additional information anyway. The threshold was set experimentally.
|
||||
matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
|
||||
|
||||
// Check if we should try to refine motion
|
||||
if (matches_info.num_inliers < num_matches_thresh2_)
|
||||
return;
|
||||
|
||||
// Construct point-point correspondences for inliers only
|
||||
src_points.create(1, matches_info.num_inliers, CV_32FC2);
|
||||
dst_points.create(1, matches_info.num_inliers, CV_32FC2);
|
||||
int inlier_idx = 0;
|
||||
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
||||
{
|
||||
if (!matches_info.inliers_mask[i])
|
||||
continue;
|
||||
|
||||
const DMatch& m = matches_info.matches[i];
|
||||
|
||||
Point2f p = features1.keypoints[m.queryIdx].pt;
|
||||
p.x -= features1.img_size.width * 0.5f;
|
||||
p.y -= features1.img_size.height * 0.5f;
|
||||
src_points.at<Point2f>(0, inlier_idx) = p;
|
||||
|
||||
p = features2.keypoints[m.trainIdx].pt;
|
||||
p.x -= features2.img_size.width * 0.5f;
|
||||
p.y -= features2.img_size.height * 0.5f;
|
||||
dst_points.at<Point2f>(0, inlier_idx) = p;
|
||||
|
||||
inlier_idx++;
|
||||
}
|
||||
|
||||
// Rerun motion estimation on inliers only
|
||||
matches_info.H = findHomography(src_points, dst_points, RANSAC);
|
||||
}
|
||||
|
||||
void BestOf2NearestMatcher::collectGarbage()
|
||||
{
|
||||
impl_->collectGarbage();
|
||||
}
|
||||
|
||||
|
||||
BestOf2NearestRangeMatcher::BestOf2NearestRangeMatcher(int range_width, bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2): BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2)
|
||||
{
|
||||
range_width_ = range_width;
|
||||
}
|
||||
|
||||
|
||||
void BestOf2NearestRangeMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
|
||||
const UMat &mask)
|
||||
{
|
||||
const int num_images = static_cast<int>(features.size());
|
||||
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
|
||||
Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
|
||||
if (mask_.empty())
|
||||
mask_ = Mat::ones(num_images, num_images, CV_8U);
|
||||
|
||||
std::vector<std::pair<int,int> > near_pairs;
|
||||
for (int i = 0; i < num_images - 1; ++i)
|
||||
for (int j = i + 1; j < std::min(num_images, i + 1 + range_width_); ++j)
|
||||
if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
|
||||
near_pairs.push_back(std::make_pair(i, j));
|
||||
|
||||
pairwise_matches.resize(num_images * num_images);
|
||||
MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
|
||||
|
||||
if (is_thread_safe_)
|
||||
parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
|
||||
else
|
||||
body(Range(0, static_cast<int>(near_pairs.size())));
|
||||
LOGLN_CHAT("");
|
||||
}
|
||||
|
||||
|
||||
void AffineBestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
|
||||
MatchesInfo &matches_info)
|
||||
{
|
||||
(*impl_)(features1, features2, matches_info);
|
||||
|
||||
// Check if it makes sense to find transform
|
||||
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
|
||||
return;
|
||||
|
||||
// Construct point-point correspondences for transform estimation
|
||||
Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
||||
Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
|
||||
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
||||
{
|
||||
const cv::DMatch &m = matches_info.matches[i];
|
||||
src_points.at<Point2f>(0, static_cast<int>(i)) = features1.keypoints[m.queryIdx].pt;
|
||||
dst_points.at<Point2f>(0, static_cast<int>(i)) = features2.keypoints[m.trainIdx].pt;
|
||||
}
|
||||
|
||||
// Find pair-wise motion
|
||||
if (full_affine_)
|
||||
matches_info.H = estimateAffine2D(src_points, dst_points, matches_info.inliers_mask);
|
||||
else
|
||||
matches_info.H = estimateAffinePartial2D(src_points, dst_points, matches_info.inliers_mask);
|
||||
|
||||
if (matches_info.H.empty()) {
|
||||
// could not find transformation
|
||||
matches_info.confidence = 0;
|
||||
matches_info.num_inliers = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// Find number of inliers
|
||||
matches_info.num_inliers = 0;
|
||||
for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
|
||||
if (matches_info.inliers_mask[i])
|
||||
matches_info.num_inliers++;
|
||||
|
||||
// These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic
|
||||
// Image Stitching using Invariant Features"
|
||||
matches_info.confidence =
|
||||
matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
|
||||
|
||||
/* should we remove matches between too close images? */
|
||||
// matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
|
||||
|
||||
// extend H to represent linear transformation in homogeneous coordinates
|
||||
matches_info.H.push_back(Mat::zeros(1, 3, CV_64F));
|
||||
matches_info.H.at<double>(2, 2) = 1;
|
||||
}
|
||||
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
1213
3rdparty/opencv-4.5.4/modules/stitching/src/motion_estimators.cpp
vendored
Normal file
1213
3rdparty/opencv-4.5.4/modules/stitching/src/motion_estimators.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
298
3rdparty/opencv-4.5.4/modules/stitching/src/opencl/multibandblend.cl
vendored
Normal file
298
3rdparty/opencv-4.5.4/modules/stitching/src/opencl/multibandblend.cl
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2014, Itseez, Inc, all rights reserved.
|
||||
|
||||
//
|
||||
// Common preprocessors macro
|
||||
//
|
||||
|
||||
//
|
||||
// TODO: Move this common code into "header" file
|
||||
//
|
||||
|
||||
#ifndef NL // New Line: for preprocessor debugging
|
||||
#define NL
|
||||
#endif
|
||||
|
||||
#define REF(x) x
|
||||
#define __CAT(x, y) x##y
|
||||
#define CAT(x, y) __CAT(x, y)
|
||||
|
||||
//
|
||||
// All matrixes are come with this description ("name" is a name of matrix):
|
||||
// * name_CN - number of channels (1,2,3,4)
|
||||
// * name_DEPTH - numeric value of CV_MAT_DEPTH(type). See CV_8U, CV_32S, etc macro below.
|
||||
//
|
||||
// Currently we also pass these attributes (to reduce this macro block):
|
||||
// * name_T - datatype (int, float, uchar4, float4)
|
||||
// * name_T1 - datatype for one channel (int, float, uchar).
|
||||
// It is equal to result of "T1(name_T)" macro
|
||||
// * name_TSIZE - CV_ELEM_SIZE(type).
|
||||
// We can't use sizeof(name_T) here, because sizeof(float3) is usually equal to 8, not 6.
|
||||
// * name_T1SIZE - CV_ELEM_SIZE1(type)
|
||||
//
|
||||
|
||||
//
|
||||
// Usage sample:
|
||||
//
|
||||
// #define workType TYPE(float, src_CN)
|
||||
// #define convertToWorkType CONVERT_TO(workType)
|
||||
// #define convertWorkTypeToDstType CONVERT(workType, dst_T)
|
||||
//
|
||||
// __kernel void kernelFn(DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(dst))
|
||||
// {
|
||||
// const int x = get_global_id(0);
|
||||
// const int y = get_global_id(1);
|
||||
//
|
||||
// if (x < srcWidth && y < srcHeight)
|
||||
// {
|
||||
// int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
|
||||
// int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
|
||||
// workType value = convertToWorkType(LOAD_MAT_AT(src, src_byteOffset));
|
||||
//
|
||||
// ... value processing ...
|
||||
//
|
||||
// STORE_MAT_AT(dst, dst_byteOffset, convertWorkTypeToDstType(value));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
|
||||
#define DECLARE_MAT_ARG(name) \
|
||||
__global uchar* restrict name ## Ptr, \
|
||||
int name ## StepBytes, \
|
||||
int name ## Offset, \
|
||||
int name ## Height, \
|
||||
int name ## Width NL
|
||||
|
||||
#define MAT_BYTE_OFFSET(name, x, y) mad24((y)/* + name ## OffsetY*/, name ## StepBytes, ((x)/* + name ## OffsetX*/) * (int)(name ## _TSIZE) + name ## Offset)
|
||||
#define MAT_RELATIVE_BYTE_OFFSET(name, x, y) mad24(y, name ## StepBytes, (x) * (int)(name ## _TSIZE))
|
||||
|
||||
#define __LOAD_MAT_AT(name, byteOffset) *((const __global name ## _T*)(name ## Ptr + (byteOffset)))
|
||||
#define __vload_CN__(name_cn) vload ## name_cn
|
||||
#define __vload_CN_(name_cn) __vload_CN__(name_cn)
|
||||
#define __vload_CN(name) __vload_CN_(name ## _CN)
|
||||
#define __LOAD_MAT_AT_vload(name, byteOffset) __vload_CN(name)(0, ((const __global name ## _T1*)(name ## Ptr + (byteOffset))))
|
||||
#define __LOAD_MAT_AT_1 __LOAD_MAT_AT
|
||||
#define __LOAD_MAT_AT_2 __LOAD_MAT_AT
|
||||
#define __LOAD_MAT_AT_3 __LOAD_MAT_AT_vload
|
||||
#define __LOAD_MAT_AT_4 __LOAD_MAT_AT
|
||||
#define __LOAD_MAT_AT_CN__(name_cn) __LOAD_MAT_AT_ ## name_cn
|
||||
#define __LOAD_MAT_AT_CN_(name_cn) __LOAD_MAT_AT_CN__(name_cn)
|
||||
#define __LOAD_MAT_AT_CN(name) __LOAD_MAT_AT_CN_(name ## _CN)
|
||||
#define LOAD_MAT_AT(name, byteOffset) __LOAD_MAT_AT_CN(name)(name, byteOffset)
|
||||
|
||||
#define __STORE_MAT_AT(name, byteOffset, v) *((__global name ## _T*)(name ## Ptr + (byteOffset))) = v
|
||||
#define __vstore_CN__(name_cn) vstore ## name_cn
|
||||
#define __vstore_CN_(name_cn) __vstore_CN__(name_cn)
|
||||
#define __vstore_CN(name) __vstore_CN_(name ## _CN)
|
||||
#define __STORE_MAT_AT_vstore(name, byteOffset, v) __vstore_CN(name)(v, 0, ((__global name ## _T1*)(name ## Ptr + (byteOffset))))
|
||||
#define __STORE_MAT_AT_1 __STORE_MAT_AT
|
||||
#define __STORE_MAT_AT_2 __STORE_MAT_AT
|
||||
#define __STORE_MAT_AT_3 __STORE_MAT_AT_vstore
|
||||
#define __STORE_MAT_AT_4 __STORE_MAT_AT
|
||||
#define __STORE_MAT_AT_CN__(name_cn) __STORE_MAT_AT_ ## name_cn
|
||||
#define __STORE_MAT_AT_CN_(name_cn) __STORE_MAT_AT_CN__(name_cn)
|
||||
#define __STORE_MAT_AT_CN(name) __STORE_MAT_AT_CN_(name ## _CN)
|
||||
#define STORE_MAT_AT(name, byteOffset, v) __STORE_MAT_AT_CN(name)(name, byteOffset, v)
|
||||
|
||||
#define T1_uchar uchar
|
||||
#define T1_uchar2 uchar
|
||||
#define T1_uchar3 uchar
|
||||
#define T1_uchar4 uchar
|
||||
#define T1_char char
|
||||
#define T1_char2 char
|
||||
#define T1_char3 char
|
||||
#define T1_char4 char
|
||||
#define T1_ushort ushort
|
||||
#define T1_ushort2 ushort
|
||||
#define T1_ushort3 ushort
|
||||
#define T1_ushort4 ushort
|
||||
#define T1_short short
|
||||
#define T1_short2 short
|
||||
#define T1_short3 short
|
||||
#define T1_short4 short
|
||||
#define T1_int int
|
||||
#define T1_int2 int
|
||||
#define T1_int3 int
|
||||
#define T1_int4 int
|
||||
#define T1_float float
|
||||
#define T1_float2 float
|
||||
#define T1_float3 float
|
||||
#define T1_float4 float
|
||||
#define T1_double double
|
||||
#define T1_double2 double
|
||||
#define T1_double3 double
|
||||
#define T1_double4 double
|
||||
#define T1(type) REF(CAT(T1_, REF(type)))
|
||||
|
||||
#define uchar1 uchar
|
||||
#define char1 char
|
||||
#define short1 short
|
||||
#define ushort1 ushort
|
||||
#define int1 int
|
||||
#define float1 float
|
||||
#define double1 double
|
||||
#define TYPE(type, cn) REF(CAT(REF(type), REF(cn)))
|
||||
|
||||
#define __CONVERT_MODE_uchar_uchar __NO_CONVERT
|
||||
#define __CONVERT_MODE_uchar_char __CONVERT_sat
|
||||
#define __CONVERT_MODE_uchar_ushort __CONVERT
|
||||
#define __CONVERT_MODE_uchar_short __CONVERT
|
||||
#define __CONVERT_MODE_uchar_int __CONVERT
|
||||
#define __CONVERT_MODE_uchar_float __CONVERT
|
||||
#define __CONVERT_MODE_uchar_double __CONVERT
|
||||
#define __CONVERT_MODE_char_uchar __CONVERT_sat
|
||||
#define __CONVERT_MODE_char_char __NO_CONVERT
|
||||
#define __CONVERT_MODE_char_ushort __CONVERT_sat
|
||||
#define __CONVERT_MODE_char_short __CONVERT
|
||||
#define __CONVERT_MODE_char_int __CONVERT
|
||||
#define __CONVERT_MODE_char_float __CONVERT
|
||||
#define __CONVERT_MODE_char_double __CONVERT
|
||||
#define __CONVERT_MODE_ushort_uchar __CONVERT_sat
|
||||
#define __CONVERT_MODE_ushort_char __CONVERT_sat
|
||||
#define __CONVERT_MODE_ushort_ushort __NO_CONVERT
|
||||
#define __CONVERT_MODE_ushort_short __CONVERT_sat
|
||||
#define __CONVERT_MODE_ushort_int __CONVERT
|
||||
#define __CONVERT_MODE_ushort_float __CONVERT
|
||||
#define __CONVERT_MODE_ushort_double __CONVERT
|
||||
#define __CONVERT_MODE_short_uchar __CONVERT_sat
|
||||
#define __CONVERT_MODE_short_char __CONVERT_sat
|
||||
#define __CONVERT_MODE_short_ushort __CONVERT_sat
|
||||
#define __CONVERT_MODE_short_short __NO_CONVERT
|
||||
#define __CONVERT_MODE_short_int __CONVERT
|
||||
#define __CONVERT_MODE_short_float __CONVERT
|
||||
#define __CONVERT_MODE_short_double __CONVERT
|
||||
#define __CONVERT_MODE_int_uchar __CONVERT_sat
|
||||
#define __CONVERT_MODE_int_char __CONVERT_sat
|
||||
#define __CONVERT_MODE_int_ushort __CONVERT_sat
|
||||
#define __CONVERT_MODE_int_short __CONVERT_sat
|
||||
#define __CONVERT_MODE_int_int __NO_CONVERT
|
||||
#define __CONVERT_MODE_int_float __CONVERT
|
||||
#define __CONVERT_MODE_int_double __CONVERT
|
||||
#define __CONVERT_MODE_float_uchar __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_float_char __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_float_ushort __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_float_short __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_float_int __CONVERT_rte
|
||||
#define __CONVERT_MODE_float_float __NO_CONVERT
|
||||
#define __CONVERT_MODE_float_double __CONVERT
|
||||
#define __CONVERT_MODE_double_uchar __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_double_char __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_double_ushort __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_double_short __CONVERT_sat_rte
|
||||
#define __CONVERT_MODE_double_int __CONVERT_rte
|
||||
#define __CONVERT_MODE_double_float __CONVERT
|
||||
#define __CONVERT_MODE_double_double __NO_CONVERT
|
||||
#define __CONVERT_MODE(srcType, dstType) CAT(__CONVERT_MODE_, CAT(REF(T1(srcType)), CAT(_, REF(T1(dstType)))))
|
||||
|
||||
#define __ROUND_MODE__NO_CONVERT
|
||||
#define __ROUND_MODE__CONVERT // nothing
|
||||
#define __ROUND_MODE__CONVERT_rte _rte
|
||||
#define __ROUND_MODE__CONVERT_sat _sat
|
||||
#define __ROUND_MODE__CONVERT_sat_rte _sat_rte
|
||||
#define ROUND_MODE(srcType, dstType) CAT(__ROUND_MODE_, __CONVERT_MODE(srcType, dstType))
|
||||
|
||||
#define __CONVERT_ROUND(dstType, roundMode) CAT(CAT(convert_, REF(dstType)), roundMode)
|
||||
#define __NO_CONVERT(dstType) // nothing
|
||||
#define __CONVERT(dstType) __CONVERT_ROUND(dstType,)
|
||||
#define __CONVERT_rte(dstType) __CONVERT_ROUND(dstType,_rte)
|
||||
#define __CONVERT_sat(dstType) __CONVERT_ROUND(dstType,_sat)
|
||||
#define __CONVERT_sat_rte(dstType) __CONVERT_ROUND(dstType,_sat_rte)
|
||||
#define CONVERT(srcType, dstType) REF(__CONVERT_MODE(srcType,dstType))(dstType)
|
||||
#define CONVERT_TO(dstType) __CONVERT_ROUND(dstType,)
|
||||
|
||||
// OpenCV depths
|
||||
#define CV_8U 0
|
||||
#define CV_8S 1
|
||||
#define CV_16U 2
|
||||
#define CV_16S 3
|
||||
#define CV_32S 4
|
||||
#define CV_32F 5
|
||||
#define CV_64F 6
|
||||
|
||||
//
|
||||
// End of common preprocessors macro
|
||||
//
|
||||
|
||||
|
||||
|
||||
#if defined(DEFINE_feed)
|
||||
|
||||
#define workType TYPE(weight_T1, src_CN)
|
||||
|
||||
#if src_DEPTH == 3 && src_CN == 3
|
||||
#define convertSrcToWorkType convert_float3
|
||||
#else
|
||||
#define convertSrcToWorkType CONVERT_TO(workType)
|
||||
#endif
|
||||
|
||||
#if dst_DEPTH == 3 && dst_CN == 3
|
||||
#define convertToDstType convert_short3
|
||||
#else
|
||||
#define convertToDstType CONVERT_TO(dst_T) // sat_rte provides incompatible results with CPU path
|
||||
#endif
|
||||
|
||||
__kernel void feed(
|
||||
DECLARE_MAT_ARG(src), DECLARE_MAT_ARG(weight),
|
||||
DECLARE_MAT_ARG(dst), DECLARE_MAT_ARG(dstWeight)
|
||||
)
|
||||
{
|
||||
const int x = get_global_id(0);
|
||||
const int y = get_global_id(1);
|
||||
|
||||
if (x < srcWidth && y < srcHeight)
|
||||
{
|
||||
int src_byteOffset = MAT_BYTE_OFFSET(src, x, y);
|
||||
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
|
||||
int dst_byteOffset = MAT_BYTE_OFFSET(dst, x, y);
|
||||
int dstWeight_byteOffset = MAT_BYTE_OFFSET(dstWeight, x, y);
|
||||
|
||||
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
|
||||
workType src_value = convertSrcToWorkType(LOAD_MAT_AT(src, src_byteOffset));
|
||||
STORE_MAT_AT(dst, dst_byteOffset, LOAD_MAT_AT(dst, dst_byteOffset) + convertToDstType(src_value * w));
|
||||
STORE_MAT_AT(dstWeight, dstWeight_byteOffset, LOAD_MAT_AT(dstWeight, dstWeight_byteOffset) + w);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(DEFINE_normalizeUsingWeightMap)
|
||||
|
||||
#if mat_DEPTH == 3 && mat_CN == 3
|
||||
#define workType float3
|
||||
#define convertSrcToWorkType convert_float3
|
||||
#define convertToDstType convert_short3
|
||||
#else
|
||||
#define workType TYPE(weight_T1, mat_CN)
|
||||
#define convertSrcToWorkType CONVERT_TO(workType)
|
||||
#define convertToDstType CONVERT_TO(mat_T) // sat_rte provides incompatible results with CPU path
|
||||
#endif
|
||||
|
||||
#if weight_DEPTH >= CV_32F
|
||||
#define WEIGHT_EPS 1e-5f
|
||||
#else
|
||||
#define WEIGHT_EPS 0
|
||||
#endif
|
||||
|
||||
__kernel void normalizeUsingWeightMap(
|
||||
DECLARE_MAT_ARG(mat), DECLARE_MAT_ARG(weight)
|
||||
)
|
||||
{
|
||||
const int x = get_global_id(0);
|
||||
const int y = get_global_id(1);
|
||||
|
||||
if (x < matWidth && y < matHeight)
|
||||
{
|
||||
int mat_byteOffset = MAT_BYTE_OFFSET(mat, x, y);
|
||||
int weight_byteOffset = MAT_BYTE_OFFSET(weight, x, y);
|
||||
|
||||
weight_T w = LOAD_MAT_AT(weight, weight_byteOffset);
|
||||
workType value = convertSrcToWorkType(LOAD_MAT_AT(mat, mat_byteOffset));
|
||||
value = value / (w + WEIGHT_EPS);
|
||||
STORE_MAT_AT(mat, mat_byteOffset, convertToDstType(value));
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
169
3rdparty/opencv-4.5.4/modules/stitching/src/opencl/warpers.cl
vendored
Normal file
169
3rdparty/opencv-4.5.4/modules/stitching/src/opencl/warpers.cl
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Peng Xiao, pengxiao@multicorewareinc.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
__kernel void buildWarpPlaneMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
|
||||
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
|
||||
__constant float * ck_rinv, __constant float * ct,
|
||||
int tl_u, int tl_v, float scale, int rowsPerWI)
|
||||
{
|
||||
int du = get_global_id(0);
|
||||
int dv0 = get_global_id(1) * rowsPerWI;
|
||||
|
||||
if (du < cols)
|
||||
{
|
||||
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
|
||||
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
|
||||
|
||||
float u = tl_u + du;
|
||||
float x_ = fma(u, scale, -ct[0]);
|
||||
float ct1 = 1 - ct[2];
|
||||
|
||||
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
|
||||
ymap_index += ymap_step)
|
||||
{
|
||||
__global float * xmap = (__global float *)(xmapptr + xmap_index);
|
||||
__global float * ymap = (__global float *)(ymapptr + ymap_index);
|
||||
|
||||
float v = tl_v + dv;
|
||||
float y_ = fma(v, scale, -ct[1]);
|
||||
|
||||
float x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * ct1));
|
||||
float y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * ct1));
|
||||
float z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * ct1));
|
||||
|
||||
if (z != 0)
|
||||
x /= z, y /= z;
|
||||
else
|
||||
x = y = -1;
|
||||
|
||||
xmap[0] = x;
|
||||
ymap[0] = y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void buildWarpCylindricalMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
|
||||
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
|
||||
__constant float * ck_rinv, int tl_u, int tl_v, float scale, int rowsPerWI)
|
||||
{
|
||||
int du = get_global_id(0);
|
||||
int dv0 = get_global_id(1) * rowsPerWI;
|
||||
|
||||
if (du < cols)
|
||||
{
|
||||
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
|
||||
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
|
||||
|
||||
float u = (tl_u + du) * scale;
|
||||
float x_, z_;
|
||||
x_ = sincos(u, &z_);
|
||||
|
||||
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
|
||||
ymap_index += ymap_step)
|
||||
{
|
||||
__global float * xmap = (__global float *)(xmapptr + xmap_index);
|
||||
__global float * ymap = (__global float *)(ymapptr + ymap_index);
|
||||
|
||||
float y_ = (tl_v + dv) * scale;
|
||||
|
||||
float x, y, z;
|
||||
x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * z_));
|
||||
y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * z_));
|
||||
z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * z_));
|
||||
|
||||
if (z > 0)
|
||||
x /= z, y /= z;
|
||||
else
|
||||
x = y = -1;
|
||||
|
||||
xmap[0] = x;
|
||||
ymap[0] = y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void buildWarpSphericalMaps(__global uchar * xmapptr, int xmap_step, int xmap_offset,
|
||||
__global uchar * ymapptr, int ymap_step, int ymap_offset, int rows, int cols,
|
||||
__constant float * ck_rinv, int tl_u, int tl_v, float scale, int rowsPerWI)
|
||||
{
|
||||
int du = get_global_id(0);
|
||||
int dv0 = get_global_id(1) * rowsPerWI;
|
||||
|
||||
if (du < cols)
|
||||
{
|
||||
int xmap_index = mad24(dv0, xmap_step, mad24(du, (int)sizeof(float), xmap_offset));
|
||||
int ymap_index = mad24(dv0, ymap_step, mad24(du, (int)sizeof(float), ymap_offset));
|
||||
|
||||
float u = (tl_u + du) * scale;
|
||||
float cosu, sinu = sincos(u, &cosu);
|
||||
|
||||
for (int dv = dv0, dv1 = min(rows, dv0 + rowsPerWI); dv < dv1; ++dv, xmap_index += xmap_step,
|
||||
ymap_index += ymap_step)
|
||||
{
|
||||
__global float * xmap = (__global float *)(xmapptr + xmap_index);
|
||||
__global float * ymap = (__global float *)(ymapptr + ymap_index);
|
||||
|
||||
float v = (tl_v + dv) * scale;
|
||||
|
||||
float cosv, sinv = sincos(v, &cosv);
|
||||
float x_ = sinv * sinu;
|
||||
float y_ = -cosv;
|
||||
float z_ = sinv * cosu;
|
||||
|
||||
float x, y, z;
|
||||
x = fma(ck_rinv[0], x_, fma(ck_rinv[1], y_, ck_rinv[2] * z_));
|
||||
y = fma(ck_rinv[3], x_, fma(ck_rinv[4], y_, ck_rinv[5] * z_));
|
||||
z = fma(ck_rinv[6], x_, fma(ck_rinv[7], y_, ck_rinv[8] * z_));
|
||||
|
||||
if (z > 0)
|
||||
x /= z, y /= z;
|
||||
else
|
||||
x = y = -1;
|
||||
|
||||
xmap[0] = x;
|
||||
ymap[0] = y;
|
||||
}
|
||||
}
|
||||
}
|
94
3rdparty/opencv-4.5.4/modules/stitching/src/precomp.hpp
vendored
Normal file
94
3rdparty/opencv-4.5.4/modules/stitching/src/precomp.hpp
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_STITCHING_PRECOMP_H__
|
||||
#define __OPENCV_STITCHING_PRECOMP_H__
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include <set>
|
||||
#include <functional>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
#include <cmath>
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/ocl.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/stitching.hpp"
|
||||
#include "opencv2/stitching/detail/autocalib.hpp"
|
||||
#include "opencv2/stitching/detail/blenders.hpp"
|
||||
#include "opencv2/stitching/detail/timelapsers.hpp"
|
||||
#include "opencv2/stitching/detail/camera.hpp"
|
||||
#include "opencv2/stitching/detail/exposure_compensate.hpp"
|
||||
#include "opencv2/stitching/detail/matchers.hpp"
|
||||
#include "opencv2/stitching/detail/motion_estimators.hpp"
|
||||
#include "opencv2/stitching/detail/seam_finders.hpp"
|
||||
#include "opencv2/stitching/detail/util.hpp"
|
||||
#include "opencv2/stitching/detail/warpers.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/features2d.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAARITHM
|
||||
# include "opencv2/cudaarithm.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAWARPING
|
||||
# include "opencv2/cudawarping.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDAFEATURES2D
|
||||
# include "opencv2/cudafeatures2d.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDALEGACY
|
||||
# include "opencv2/cudalegacy.hpp"
|
||||
#endif
|
||||
|
||||
#include "opencv2/core/private.hpp"
|
||||
|
||||
#include "util_log.hpp"
|
||||
|
||||
#endif
|
1717
3rdparty/opencv-4.5.4/modules/stitching/src/seam_finders.cpp
vendored
Normal file
1717
3rdparty/opencv-4.5.4/modules/stitching/src/seam_finders.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
654
3rdparty/opencv-4.5.4/modules/stitching/src/stitcher.cpp
vendored
Normal file
654
3rdparty/opencv-4.5.4/modules/stitching/src/stitcher.cpp
vendored
Normal file
@ -0,0 +1,654 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/)
|
||||
// Stitcher::ORIG_RESOL is initialized in stitching.hpp.
|
||||
#else
|
||||
const double Stitcher::ORIG_RESOL = -1.0;
|
||||
#endif
|
||||
|
||||
Ptr<Stitcher> Stitcher::create(Mode mode)
|
||||
{
|
||||
Ptr<Stitcher> stitcher = makePtr<Stitcher>();
|
||||
|
||||
stitcher->setRegistrationResol(0.6);
|
||||
stitcher->setSeamEstimationResol(0.1);
|
||||
stitcher->setCompositingResol(ORIG_RESOL);
|
||||
stitcher->setPanoConfidenceThresh(1);
|
||||
stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
|
||||
stitcher->setBlender(makePtr<detail::MultiBandBlender>(false));
|
||||
stitcher->setFeaturesFinder(ORB::create());
|
||||
stitcher->setInterpolationFlags(INTER_LINEAR);
|
||||
|
||||
stitcher->work_scale_ = 1;
|
||||
stitcher->seam_scale_ = 1;
|
||||
stitcher->seam_work_aspect_ = 1;
|
||||
stitcher->warped_image_scale_ = 1;
|
||||
|
||||
switch (mode)
|
||||
{
|
||||
case PANORAMA: // PANORAMA is the default
|
||||
// mostly already setup
|
||||
stitcher->setEstimator(makePtr<detail::HomographyBasedEstimator>());
|
||||
stitcher->setWaveCorrection(true);
|
||||
stitcher->setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
|
||||
stitcher->setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(false));
|
||||
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
|
||||
stitcher->setWarper(makePtr<SphericalWarper>());
|
||||
stitcher->setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
|
||||
break;
|
||||
|
||||
case SCANS:
|
||||
stitcher->setEstimator(makePtr<detail::AffineBasedEstimator>());
|
||||
stitcher->setWaveCorrection(false);
|
||||
stitcher->setFeaturesMatcher(makePtr<detail::AffineBestOf2NearestMatcher>(false, false));
|
||||
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterAffinePartial>());
|
||||
stitcher->setWarper(makePtr<AffineWarper>());
|
||||
stitcher->setExposureCompensator(makePtr<detail::NoExposureCompensator>());
|
||||
break;
|
||||
|
||||
default:
|
||||
CV_Error(Error::StsBadArg, "Invalid stitching mode. Must be one of Stitcher::Mode");
|
||||
break;
|
||||
}
|
||||
|
||||
return stitcher;
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
images.getUMatVector(imgs_);
|
||||
masks.getUMatVector(masks_);
|
||||
|
||||
Status status;
|
||||
|
||||
if ((status = matchImages()) != OK)
|
||||
return status;
|
||||
|
||||
if ((status = estimateCameraParams()) != OK)
|
||||
return status;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::composePanorama(OutputArray pano)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
return composePanorama(std::vector<UMat>(), pano);
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
LOGLN("Warping images (auxiliary)... ");
|
||||
|
||||
std::vector<UMat> imgs;
|
||||
images.getUMatVector(imgs);
|
||||
if (!imgs.empty())
|
||||
{
|
||||
CV_Assert(imgs.size() == imgs_.size());
|
||||
|
||||
UMat img;
|
||||
seam_est_imgs_.resize(imgs.size());
|
||||
|
||||
for (size_t i = 0; i < imgs.size(); ++i)
|
||||
{
|
||||
imgs_[i] = imgs[i];
|
||||
resize(imgs[i], img, Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
|
||||
seam_est_imgs_[i] = img.clone();
|
||||
}
|
||||
|
||||
std::vector<UMat> seam_est_imgs_subset;
|
||||
std::vector<UMat> imgs_subset;
|
||||
|
||||
for (size_t i = 0; i < indices_.size(); ++i)
|
||||
{
|
||||
imgs_subset.push_back(imgs_[indices_[i]]);
|
||||
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
|
||||
}
|
||||
|
||||
seam_est_imgs_ = seam_est_imgs_subset;
|
||||
imgs_ = imgs_subset;
|
||||
}
|
||||
|
||||
UMat pano_;
|
||||
|
||||
#if ENABLE_LOG
|
||||
int64 t = getTickCount();
|
||||
#endif
|
||||
|
||||
std::vector<Point> corners(imgs_.size());
|
||||
std::vector<UMat> masks_warped(imgs_.size());
|
||||
std::vector<UMat> images_warped(imgs_.size());
|
||||
std::vector<Size> sizes(imgs_.size());
|
||||
std::vector<UMat> masks(imgs_.size());
|
||||
|
||||
// Prepare image masks
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
{
|
||||
masks[i].create(seam_est_imgs_[i].size(), CV_8U);
|
||||
masks[i].setTo(Scalar::all(255));
|
||||
}
|
||||
|
||||
// Warp images and their masks
|
||||
Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
{
|
||||
Mat_<float> K;
|
||||
cameras_[i].K().convertTo(K, CV_32F);
|
||||
K(0,0) *= (float)seam_work_aspect_;
|
||||
K(0,2) *= (float)seam_work_aspect_;
|
||||
K(1,1) *= (float)seam_work_aspect_;
|
||||
K(1,2) *= (float)seam_work_aspect_;
|
||||
|
||||
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, interp_flags_, BORDER_REFLECT, images_warped[i]);
|
||||
sizes[i] = images_warped[i].size();
|
||||
|
||||
w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
|
||||
}
|
||||
|
||||
|
||||
LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
|
||||
// Compensate exposure before finding seams
|
||||
exposure_comp_->feed(corners, images_warped, masks_warped);
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
exposure_comp_->apply(int(i), corners[i], images_warped[i], masks_warped[i]);
|
||||
|
||||
// Find seams
|
||||
std::vector<UMat> images_warped_f(imgs_.size());
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
images_warped[i].convertTo(images_warped_f[i], CV_32F);
|
||||
seam_finder_->find(images_warped_f, corners, masks_warped);
|
||||
|
||||
// Release unused memory
|
||||
seam_est_imgs_.clear();
|
||||
images_warped.clear();
|
||||
images_warped_f.clear();
|
||||
masks.clear();
|
||||
|
||||
LOGLN("Compositing...");
|
||||
#if ENABLE_LOG
|
||||
t = getTickCount();
|
||||
#endif
|
||||
|
||||
UMat img_warped, img_warped_s;
|
||||
UMat dilated_mask, seam_mask, mask, mask_warped;
|
||||
|
||||
//double compose_seam_aspect = 1;
|
||||
double compose_work_aspect = 1;
|
||||
bool is_blender_prepared = false;
|
||||
|
||||
double compose_scale = 1;
|
||||
bool is_compose_scale_set = false;
|
||||
|
||||
std::vector<detail::CameraParams> cameras_scaled(cameras_);
|
||||
|
||||
UMat full_img, img;
|
||||
for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
|
||||
{
|
||||
LOGLN("Compositing image #" << indices_[img_idx] + 1);
|
||||
#if ENABLE_LOG
|
||||
int64 compositing_t = getTickCount();
|
||||
#endif
|
||||
|
||||
// Read image and resize it if necessary
|
||||
full_img = imgs_[img_idx];
|
||||
if (!is_compose_scale_set)
|
||||
{
|
||||
if (compose_resol_ > 0)
|
||||
compose_scale = std::min(1.0, std::sqrt(compose_resol_ * 1e6 / full_img.size().area()));
|
||||
is_compose_scale_set = true;
|
||||
|
||||
// Compute relative scales
|
||||
//compose_seam_aspect = compose_scale / seam_scale_;
|
||||
compose_work_aspect = compose_scale / work_scale_;
|
||||
|
||||
// Update warped image scale
|
||||
float warp_scale = static_cast<float>(warped_image_scale_ * compose_work_aspect);
|
||||
w = warper_->create(warp_scale);
|
||||
|
||||
// Update corners and sizes
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
{
|
||||
// Update intrinsics
|
||||
cameras_scaled[i].ppx *= compose_work_aspect;
|
||||
cameras_scaled[i].ppy *= compose_work_aspect;
|
||||
cameras_scaled[i].focal *= compose_work_aspect;
|
||||
|
||||
// Update corner and size
|
||||
Size sz = full_img_sizes_[i];
|
||||
if (std::abs(compose_scale - 1) > 1e-1)
|
||||
{
|
||||
sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
|
||||
sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
|
||||
}
|
||||
|
||||
Mat K;
|
||||
cameras_scaled[i].K().convertTo(K, CV_32F);
|
||||
Rect roi = w->warpRoi(sz, K, cameras_scaled[i].R);
|
||||
corners[i] = roi.tl();
|
||||
sizes[i] = roi.size();
|
||||
}
|
||||
}
|
||||
if (std::abs(compose_scale - 1) > 1e-1)
|
||||
{
|
||||
#if ENABLE_LOG
|
||||
int64 resize_t = getTickCount();
|
||||
#endif
|
||||
resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
|
||||
LOGLN(" resize time: " << ((getTickCount() - resize_t) / getTickFrequency()) << " sec");
|
||||
}
|
||||
else
|
||||
img = full_img;
|
||||
full_img.release();
|
||||
Size img_size = img.size();
|
||||
|
||||
LOGLN(" after resize time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
|
||||
|
||||
Mat K;
|
||||
cameras_scaled[img_idx].K().convertTo(K, CV_32F);
|
||||
|
||||
#if ENABLE_LOG
|
||||
int64 pt = getTickCount();
|
||||
#endif
|
||||
// Warp the current image
|
||||
w->warp(img, K, cameras_[img_idx].R, interp_flags_, BORDER_REFLECT, img_warped);
|
||||
LOGLN(" warp the current image: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
pt = getTickCount();
|
||||
#endif
|
||||
|
||||
// Warp the current image mask
|
||||
mask.create(img_size, CV_8U);
|
||||
mask.setTo(Scalar::all(255));
|
||||
w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
|
||||
LOGLN(" warp the current image mask: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
pt = getTickCount();
|
||||
#endif
|
||||
|
||||
// Compensate exposure
|
||||
exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
|
||||
LOGLN(" compensate exposure: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
pt = getTickCount();
|
||||
#endif
|
||||
|
||||
img_warped.convertTo(img_warped_s, CV_16S);
|
||||
img_warped.release();
|
||||
img.release();
|
||||
mask.release();
|
||||
|
||||
// Make sure seam mask has proper size
|
||||
dilate(masks_warped[img_idx], dilated_mask, Mat());
|
||||
resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
|
||||
|
||||
bitwise_and(seam_mask, mask_warped, mask_warped);
|
||||
|
||||
LOGLN(" other: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
|
||||
#if ENABLE_LOG
|
||||
pt = getTickCount();
|
||||
#endif
|
||||
|
||||
if (!is_blender_prepared)
|
||||
{
|
||||
blender_->prepare(corners, sizes);
|
||||
is_blender_prepared = true;
|
||||
}
|
||||
|
||||
LOGLN(" other2: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
|
||||
|
||||
LOGLN(" feed...");
|
||||
#if ENABLE_LOG
|
||||
int64 feed_t = getTickCount();
|
||||
#endif
|
||||
// Blend the current image
|
||||
blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
|
||||
LOGLN(" feed time: " << ((getTickCount() - feed_t) / getTickFrequency()) << " sec");
|
||||
LOGLN("Compositing ## time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
|
||||
}
|
||||
|
||||
#if ENABLE_LOG
|
||||
int64 blend_t = getTickCount();
|
||||
#endif
|
||||
UMat result;
|
||||
blender_->blend(result, result_mask_);
|
||||
LOGLN("blend time: " << ((getTickCount() - blend_t) / getTickFrequency()) << " sec");
|
||||
|
||||
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
|
||||
// Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
|
||||
// so convert it to avoid user confusing
|
||||
result.convertTo(pano, CV_8U);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
|
||||
{
|
||||
return stitch(images, noArray(), pano);
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
Status status = estimateTransform(images, masks);
|
||||
if (status != OK)
|
||||
return status;
|
||||
return composePanorama(pano);
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::matchImages()
|
||||
{
|
||||
if ((int)imgs_.size() < 2)
|
||||
{
|
||||
LOGLN("Need more images");
|
||||
return ERR_NEED_MORE_IMGS;
|
||||
}
|
||||
|
||||
work_scale_ = 1;
|
||||
seam_work_aspect_ = 1;
|
||||
seam_scale_ = 1;
|
||||
bool is_work_scale_set = false;
|
||||
bool is_seam_scale_set = false;
|
||||
features_.resize(imgs_.size());
|
||||
seam_est_imgs_.resize(imgs_.size());
|
||||
full_img_sizes_.resize(imgs_.size());
|
||||
|
||||
LOGLN("Finding features...");
|
||||
#if ENABLE_LOG
|
||||
int64 t = getTickCount();
|
||||
#endif
|
||||
|
||||
std::vector<UMat> feature_find_imgs(imgs_.size());
|
||||
std::vector<UMat> feature_find_masks(masks_.size());
|
||||
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
{
|
||||
full_img_sizes_[i] = imgs_[i].size();
|
||||
if (registr_resol_ < 0)
|
||||
{
|
||||
feature_find_imgs[i] = imgs_[i];
|
||||
work_scale_ = 1;
|
||||
is_work_scale_set = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!is_work_scale_set)
|
||||
{
|
||||
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
|
||||
is_work_scale_set = true;
|
||||
}
|
||||
resize(imgs_[i], feature_find_imgs[i], Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
|
||||
}
|
||||
if (!is_seam_scale_set)
|
||||
{
|
||||
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
|
||||
seam_work_aspect_ = seam_scale_ / work_scale_;
|
||||
is_seam_scale_set = true;
|
||||
}
|
||||
|
||||
if (!masks_.empty())
|
||||
{
|
||||
resize(masks_[i], feature_find_masks[i], Size(), work_scale_, work_scale_, INTER_NEAREST);
|
||||
}
|
||||
features_[i].img_idx = (int)i;
|
||||
LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
|
||||
|
||||
resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
|
||||
}
|
||||
|
||||
// find features possibly in parallel
|
||||
detail::computeImageFeatures(features_finder_, feature_find_imgs, features_, feature_find_masks);
|
||||
|
||||
// Do it to save memory
|
||||
feature_find_imgs.clear();
|
||||
feature_find_masks.clear();
|
||||
|
||||
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
|
||||
LOG("Pairwise matching");
|
||||
#if ENABLE_LOG
|
||||
t = getTickCount();
|
||||
#endif
|
||||
(*features_matcher_)(features_, pairwise_matches_, matching_mask_);
|
||||
features_matcher_->collectGarbage();
|
||||
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
|
||||
|
||||
// Leave only images we are sure are from the same panorama
|
||||
indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
|
||||
std::vector<UMat> seam_est_imgs_subset;
|
||||
std::vector<UMat> imgs_subset;
|
||||
std::vector<Size> full_img_sizes_subset;
|
||||
for (size_t i = 0; i < indices_.size(); ++i)
|
||||
{
|
||||
imgs_subset.push_back(imgs_[indices_[i]]);
|
||||
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
|
||||
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
|
||||
}
|
||||
seam_est_imgs_ = seam_est_imgs_subset;
|
||||
imgs_ = imgs_subset;
|
||||
full_img_sizes_ = full_img_sizes_subset;
|
||||
|
||||
if ((int)imgs_.size() < 2)
|
||||
{
|
||||
LOGLN("Need more images");
|
||||
return ERR_NEED_MORE_IMGS;
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::estimateCameraParams()
|
||||
{
|
||||
// estimate homography in global frame
|
||||
if (!(*estimator_)(features_, pairwise_matches_, cameras_))
|
||||
return ERR_HOMOGRAPHY_EST_FAIL;
|
||||
|
||||
for (size_t i = 0; i < cameras_.size(); ++i)
|
||||
{
|
||||
Mat R;
|
||||
cameras_[i].R.convertTo(R, CV_32F);
|
||||
cameras_[i].R = R;
|
||||
//LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
|
||||
}
|
||||
|
||||
bundle_adjuster_->setConfThresh(conf_thresh_);
|
||||
if (!(*bundle_adjuster_)(features_, pairwise_matches_, cameras_))
|
||||
return ERR_CAMERA_PARAMS_ADJUST_FAIL;
|
||||
|
||||
// Find median focal length and use it as final image scale
|
||||
std::vector<double> focals;
|
||||
for (size_t i = 0; i < cameras_.size(); ++i)
|
||||
{
|
||||
//LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
|
||||
focals.push_back(cameras_[i].focal);
|
||||
}
|
||||
|
||||
std::sort(focals.begin(), focals.end());
|
||||
if (focals.size() % 2 == 1)
|
||||
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
|
||||
else
|
||||
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
|
||||
|
||||
if (do_wave_correct_)
|
||||
{
|
||||
std::vector<Mat> rmats;
|
||||
for (size_t i = 0; i < cameras_.size(); ++i)
|
||||
rmats.push_back(cameras_[i].R.clone());
|
||||
detail::waveCorrect(rmats, wave_correct_kind_);
|
||||
for (size_t i = 0; i < cameras_.size(); ++i)
|
||||
cameras_[i].R = rmats[i];
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
Stitcher::Status Stitcher::setTransform(InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras)
|
||||
{
|
||||
std::vector<int> component;
|
||||
for (int i = 0; i < (int)images.total(); i++)
|
||||
component.push_back(i);
|
||||
|
||||
return setTransform(images, cameras, component);
|
||||
}
|
||||
|
||||
|
||||
Stitcher::Status Stitcher::setTransform(
|
||||
InputArrayOfArrays images, const std::vector<detail::CameraParams> &cameras, const std::vector<int> &component)
|
||||
{
|
||||
// CV_Assert(images.size() == cameras.size());
|
||||
|
||||
images.getUMatVector(imgs_);
|
||||
masks_.clear();
|
||||
|
||||
if ((int)imgs_.size() < 2)
|
||||
{
|
||||
LOGLN("Need more images");
|
||||
return ERR_NEED_MORE_IMGS;
|
||||
}
|
||||
|
||||
work_scale_ = 1;
|
||||
seam_work_aspect_ = 1;
|
||||
seam_scale_ = 1;
|
||||
bool is_work_scale_set = false;
|
||||
bool is_seam_scale_set = false;
|
||||
seam_est_imgs_.resize(imgs_.size());
|
||||
full_img_sizes_.resize(imgs_.size());
|
||||
|
||||
|
||||
for (size_t i = 0; i < imgs_.size(); ++i)
|
||||
{
|
||||
full_img_sizes_[i] = imgs_[i].size();
|
||||
if (registr_resol_ < 0)
|
||||
{
|
||||
work_scale_ = 1;
|
||||
is_work_scale_set = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!is_work_scale_set)
|
||||
{
|
||||
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
|
||||
is_work_scale_set = true;
|
||||
}
|
||||
}
|
||||
if (!is_seam_scale_set)
|
||||
{
|
||||
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
|
||||
seam_work_aspect_ = seam_scale_ / work_scale_;
|
||||
is_seam_scale_set = true;
|
||||
}
|
||||
|
||||
resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
|
||||
}
|
||||
|
||||
features_.clear();
|
||||
pairwise_matches_.clear();
|
||||
|
||||
indices_ = component;
|
||||
std::vector<UMat> seam_est_imgs_subset;
|
||||
std::vector<UMat> imgs_subset;
|
||||
std::vector<Size> full_img_sizes_subset;
|
||||
for (size_t i = 0; i < indices_.size(); ++i)
|
||||
{
|
||||
imgs_subset.push_back(imgs_[indices_[i]]);
|
||||
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
|
||||
full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
|
||||
}
|
||||
seam_est_imgs_ = seam_est_imgs_subset;
|
||||
imgs_ = imgs_subset;
|
||||
full_img_sizes_ = full_img_sizes_subset;
|
||||
|
||||
if ((int)imgs_.size() < 2)
|
||||
{
|
||||
LOGLN("Need more images");
|
||||
return ERR_NEED_MORE_IMGS;
|
||||
}
|
||||
|
||||
cameras_ = cameras;
|
||||
|
||||
std::vector<double> focals;
|
||||
for (size_t i = 0; i < cameras.size(); ++i)
|
||||
focals.push_back(cameras_[i].focal);
|
||||
|
||||
std::sort(focals.begin(), focals.end());
|
||||
if (focals.size() % 2 == 1)
|
||||
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
|
||||
else
|
||||
warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
|
||||
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
|
||||
CV_DEPRECATED Ptr<Stitcher> createStitcher(bool /*ignored*/)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
return Stitcher::create(Stitcher::PANORAMA);
|
||||
}
|
||||
|
||||
CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool /*ignored*/)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
return Stitcher::create(Stitcher::SCANS);
|
||||
}
|
||||
} // namespace cv
|
108
3rdparty/opencv-4.5.4/modules/stitching/src/timelapsers.cpp
vendored
Normal file
108
3rdparty/opencv-4.5.4/modules/stitching/src/timelapsers.cpp
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels_stitching.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
Ptr<Timelapser> Timelapser::createDefault(int type)
|
||||
{
|
||||
if (type == AS_IS)
|
||||
return makePtr<Timelapser>();
|
||||
if (type == CROP)
|
||||
return makePtr<TimelapserCrop>();
|
||||
CV_Error(Error::StsBadArg, "unsupported timelapsing method");
|
||||
}
|
||||
|
||||
|
||||
void Timelapser::initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes)
|
||||
{
|
||||
dst_roi_ = resultRoi(corners, sizes);
|
||||
dst_.create(dst_roi_.size(), CV_16SC3);
|
||||
}
|
||||
|
||||
void Timelapser::process(InputArray _img, InputArray /*_mask*/, Point tl)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
dst_.setTo(Scalar::all(0));
|
||||
|
||||
Mat img = _img.getMat();
|
||||
Mat dst = dst_.getMat(ACCESS_RW);
|
||||
|
||||
CV_Assert(img.type() == CV_16SC3);
|
||||
int dx = tl.x - dst_roi_.x;
|
||||
int dy = tl.y - dst_roi_.y;
|
||||
|
||||
for (int y = 0; y < img.rows; ++y)
|
||||
{
|
||||
const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
|
||||
|
||||
for (int x = 0; x < img.cols; ++x)
|
||||
{
|
||||
if (test_point(Point(tl.x + x, tl.y + y)))
|
||||
{
|
||||
Point3_<short> *dst_row = dst.ptr<Point3_<short> >(dy + y);
|
||||
dst_row[dx + x] = src_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Timelapser::test_point(Point pt)
|
||||
{
|
||||
return dst_roi_.contains(pt);
|
||||
}
|
||||
|
||||
|
||||
void TimelapserCrop::initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes)
|
||||
{
|
||||
dst_roi_ = resultRoiIntersection(corners, sizes);
|
||||
dst_.create(dst_roi_.size(), CV_16SC3);
|
||||
}
|
||||
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
188
3rdparty/opencv-4.5.4/modules/stitching/src/util.cpp
vendored
Normal file
188
3rdparty/opencv-4.5.4/modules/stitching/src/util.cpp
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace detail {
|
||||
|
||||
void DisjointSets::createOneElemSets(int n)
|
||||
{
|
||||
rank_.assign(n, 0);
|
||||
size.assign(n, 1);
|
||||
parent.resize(n);
|
||||
for (int i = 0; i < n; ++i)
|
||||
parent[i] = i;
|
||||
}
|
||||
|
||||
|
||||
int DisjointSets::findSetByElem(int elem)
|
||||
{
|
||||
int set = elem;
|
||||
while (set != parent[set])
|
||||
set = parent[set];
|
||||
int next;
|
||||
while (elem != parent[elem])
|
||||
{
|
||||
next = parent[elem];
|
||||
parent[elem] = set;
|
||||
elem = next;
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
|
||||
int DisjointSets::mergeSets(int set1, int set2)
|
||||
{
|
||||
if (rank_[set1] < rank_[set2])
|
||||
{
|
||||
parent[set1] = set2;
|
||||
size[set2] += size[set1];
|
||||
return set2;
|
||||
}
|
||||
if (rank_[set2] < rank_[set1])
|
||||
{
|
||||
parent[set2] = set1;
|
||||
size[set1] += size[set2];
|
||||
return set1;
|
||||
}
|
||||
parent[set1] = set2;
|
||||
rank_[set2]++;
|
||||
size[set2] += size[set1];
|
||||
return set2;
|
||||
}
|
||||
|
||||
|
||||
void Graph::addEdge(int from, int to, float weight)
|
||||
{
|
||||
edges_[from].push_back(GraphEdge(from, to, weight));
|
||||
}
|
||||
|
||||
|
||||
bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi)
|
||||
{
|
||||
int x_tl = std::max(tl1.x, tl2.x);
|
||||
int y_tl = std::max(tl1.y, tl2.y);
|
||||
int x_br = std::min(tl1.x + sz1.width, tl2.x + sz2.width);
|
||||
int y_br = std::min(tl1.y + sz1.height, tl2.y + sz2.height);
|
||||
if (x_tl < x_br && y_tl < y_br)
|
||||
{
|
||||
roi = Rect(x_tl, y_tl, x_br - x_tl, y_br - y_tl);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
Rect resultRoi(const std::vector<Point> &corners, const std::vector<UMat> &images)
|
||||
{
|
||||
std::vector<Size> sizes(images.size());
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
sizes[i] = images[i].size();
|
||||
return resultRoi(corners, sizes);
|
||||
}
|
||||
|
||||
|
||||
Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes)
|
||||
{
|
||||
CV_Assert(sizes.size() == corners.size());
|
||||
Point tl(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
|
||||
Point br(std::numeric_limits<int>::min(), std::numeric_limits<int>::min());
|
||||
for (size_t i = 0; i < corners.size(); ++i)
|
||||
{
|
||||
tl.x = std::min(tl.x, corners[i].x);
|
||||
tl.y = std::min(tl.y, corners[i].y);
|
||||
br.x = std::max(br.x, corners[i].x + sizes[i].width);
|
||||
br.y = std::max(br.y, corners[i].y + sizes[i].height);
|
||||
}
|
||||
return Rect(tl, br);
|
||||
}
|
||||
|
||||
Rect resultRoiIntersection(const std::vector<Point> &corners, const std::vector<Size> &sizes)
|
||||
{
|
||||
CV_Assert(sizes.size() == corners.size());
|
||||
Point tl(std::numeric_limits<int>::min(), std::numeric_limits<int>::min());
|
||||
Point br(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
|
||||
for (size_t i = 0; i < corners.size(); ++i)
|
||||
{
|
||||
tl.x = std::max(tl.x, corners[i].x);
|
||||
tl.y = std::max(tl.y, corners[i].y);
|
||||
br.x = std::min(br.x, corners[i].x + sizes[i].width);
|
||||
br.y = std::min(br.y, corners[i].y + sizes[i].height);
|
||||
}
|
||||
return Rect(tl, br);
|
||||
}
|
||||
|
||||
|
||||
Point resultTl(const std::vector<Point> &corners)
|
||||
{
|
||||
Point tl(std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
|
||||
for (size_t i = 0; i < corners.size(); ++i)
|
||||
{
|
||||
tl.x = std::min(tl.x, corners[i].x);
|
||||
tl.y = std::min(tl.y, corners[i].y);
|
||||
}
|
||||
return tl;
|
||||
}
|
||||
|
||||
|
||||
void selectRandomSubset(int count, int size, std::vector<int> &subset)
|
||||
{
|
||||
subset.clear();
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
if (randu<int>() % (size - i) < count)
|
||||
{
|
||||
subset.push_back(i);
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int& stitchingLogLevel()
|
||||
{
|
||||
static int _log_level=1;
|
||||
return _log_level;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
58
3rdparty/opencv-4.5.4/modules/stitching/src/util_log.hpp
vendored
Normal file
58
3rdparty/opencv-4.5.4/modules/stitching/src/util_log.hpp
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef __OPENCV_STITCHING_UTIL_LOG_HPP__
|
||||
#define __OPENCV_STITCHING_UTIL_LOG_HPP__
|
||||
|
||||
#ifndef ENABLE_LOG
|
||||
#define ENABLE_LOG 0
|
||||
#endif
|
||||
|
||||
// TODO remove LOG macros, add logging class
|
||||
#if ENABLE_LOG
|
||||
#ifdef __ANDROID__
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <android/log.h>
|
||||
#define LOG_STITCHING_MSG(msg) \
|
||||
do { \
|
||||
Stringstream _os; \
|
||||
_os << msg; \
|
||||
__android_log_print(ANDROID_LOG_DEBUG, "STITCHING", "%s", _os.str().c_str()); \
|
||||
} while(0);
|
||||
#else
|
||||
#include <iostream>
|
||||
#define LOG_STITCHING_MSG(msg) for(;;) { std::cout << msg; std::cout.flush(); break; }
|
||||
#endif
|
||||
#else
|
||||
#define LOG_STITCHING_MSG(msg)
|
||||
#endif
|
||||
|
||||
#define LOG_(_level, _msg) \
|
||||
for(;;) \
|
||||
{ \
|
||||
using namespace std; \
|
||||
if ((_level) >= ::cv::detail::stitchingLogLevel()) \
|
||||
{ \
|
||||
LOG_STITCHING_MSG(_msg); \
|
||||
} \
|
||||
break; \
|
||||
}
|
||||
|
||||
|
||||
#define LOG(msg) LOG_(1, msg)
|
||||
#define LOG_CHAT(msg) LOG_(0, msg)
|
||||
|
||||
#define LOGLN(msg) LOG(msg << std::endl)
|
||||
#define LOGLN_CHAT(msg) LOG_CHAT(msg << std::endl)
|
||||
|
||||
//#if DEBUG_LOG_CHAT
|
||||
// #define LOG_CHAT(msg) LOG(msg)
|
||||
// #define LOGLN_CHAT(msg) LOGLN(msg)
|
||||
//#else
|
||||
// #define LOG_CHAT(msg) do{}while(0)
|
||||
// #define LOGLN_CHAT(msg) do{}while(0)
|
||||
//#endif
|
||||
|
||||
#endif // __OPENCV_STITCHING_UTIL_LOG_HPP__
|
561
3rdparty/opencv-4.5.4/modules/stitching/src/warpers.cpp
vendored
Normal file
561
3rdparty/opencv-4.5.4/modules/stitching/src/warpers.cpp
vendored
Normal file
@ -0,0 +1,561 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels_stitching.hpp"
|
||||
#include <iostream>
|
||||
namespace cv {
|
||||
|
||||
PyRotationWarper::PyRotationWarper(String warp_type, float scale)
|
||||
{
|
||||
Ptr<WarperCreator> warper_creator;
|
||||
if (warp_type == "plane")
|
||||
warper_creator = makePtr<cv::PlaneWarper>();
|
||||
else if (warp_type == "affine")
|
||||
warper_creator = makePtr<cv::AffineWarper>();
|
||||
else if (warp_type == "cylindrical")
|
||||
warper_creator = makePtr<cv::CylindricalWarper>();
|
||||
else if (warp_type == "spherical")
|
||||
warper_creator = makePtr<cv::SphericalWarper>();
|
||||
else if (warp_type == "fisheye")
|
||||
warper_creator = makePtr<cv::FisheyeWarper>();
|
||||
else if (warp_type == "stereographic")
|
||||
warper_creator = makePtr<cv::StereographicWarper>();
|
||||
else if (warp_type == "compressedPlaneA2B1")
|
||||
warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
|
||||
else if (warp_type == "compressedPlaneA1.5B1")
|
||||
warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
|
||||
else if (warp_type == "compressedPlanePortraitA2B1")
|
||||
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
|
||||
else if (warp_type == "compressedPlanePortraitA1.5B1")
|
||||
warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
|
||||
else if (warp_type == "paniniA2B1")
|
||||
warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
|
||||
else if (warp_type == "paniniA1.5B1")
|
||||
warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
|
||||
else if (warp_type == "paniniPortraitA2B1")
|
||||
warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
|
||||
else if (warp_type == "paniniPortraitA1.5B1")
|
||||
warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
|
||||
else if (warp_type == "mercator")
|
||||
warper_creator = makePtr<cv::MercatorWarper>();
|
||||
else if (warp_type == "transverseMercator")
|
||||
warper_creator = makePtr<cv::TransverseMercatorWarper>();
|
||||
if (warper_creator.get() != nullptr)
|
||||
{
|
||||
rw = warper_creator->create(scale);
|
||||
|
||||
}
|
||||
else
|
||||
CV_Error(Error::StsError, "unknown warper :" + warp_type);
|
||||
}
|
||||
Point2f PyRotationWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
|
||||
{
|
||||
return rw.get()->warpPoint(pt, K, R);
|
||||
}
|
||||
|
||||
#if CV_VERSION_MAJOR != 4
|
||||
Point2f PyRotationWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
|
||||
{
|
||||
return rw.get()->warpPointBackward(pt, K, R);
|
||||
}
|
||||
#endif
|
||||
|
||||
Rect PyRotationWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
|
||||
{
|
||||
return rw.get()->buildMaps(src_size, K, R, xmap, ymap);
|
||||
}
|
||||
Point PyRotationWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
|
||||
OutputArray dst)
|
||||
{
|
||||
if (rw.get() == nullptr)
|
||||
CV_Error(Error::StsError, "Warper is null");
|
||||
Point p = rw.get()->warp(src, K, R, interp_mode, border_mode, dst);
|
||||
return p;
|
||||
|
||||
}
|
||||
void PyRotationWarper::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
|
||||
Size dst_size, OutputArray dst)
|
||||
{
|
||||
return rw.get()->warpBackward(src, K, R, interp_mode, border_mode, dst_size, dst);
|
||||
}
|
||||
Rect PyRotationWarper::warpRoi(Size src_size, InputArray K, InputArray R)
|
||||
{
|
||||
return rw.get()->warpRoi(src_size, K, R);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
void ProjectorBase::setCameraParams(InputArray _K, InputArray _R, InputArray _T)
|
||||
{
|
||||
Mat K = _K.getMat(), R = _R.getMat(), T = _T.getMat();
|
||||
|
||||
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
|
||||
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
|
||||
CV_Assert((T.size() == Size(1, 3) || T.size() == Size(3, 1)) && T.type() == CV_32F);
|
||||
|
||||
Mat_<float> K_(K);
|
||||
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
|
||||
k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);
|
||||
k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);
|
||||
|
||||
Mat_<float> Rinv = R.t();
|
||||
rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);
|
||||
rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);
|
||||
rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);
|
||||
|
||||
Mat_<float> R_Kinv = R * K.inv();
|
||||
r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2);
|
||||
r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2);
|
||||
r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2);
|
||||
|
||||
Mat_<float> K_Rinv = K * Rinv;
|
||||
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
|
||||
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
|
||||
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
|
||||
|
||||
Mat_<float> T_(T.reshape(0, 3));
|
||||
t[0] = T_(0,0); t[1] = T_(1,0); t[2] = T_(2,0);
|
||||
}
|
||||
|
||||
|
||||
Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T)
|
||||
{
|
||||
projector_.setCameraParams(K, R, T);
|
||||
Point2f uv;
|
||||
projector_.mapForward(pt.x, pt.y, uv.x, uv.y);
|
||||
return uv;
|
||||
}
|
||||
|
||||
Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R)
|
||||
{
|
||||
float tz[] = {0.f, 0.f, 0.f};
|
||||
Mat_<float> T(3, 1, tz);
|
||||
return warpPoint(pt, K, R, T);
|
||||
}
|
||||
Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T)
|
||||
{
|
||||
projector_.setCameraParams(K, R, T);
|
||||
Point2f xy;
|
||||
projector_.mapBackward(pt.x, pt.y, xy.x, xy.y);
|
||||
return xy;
|
||||
}
|
||||
|
||||
Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R)
|
||||
{
|
||||
float tz[] = { 0.f, 0.f, 0.f };
|
||||
Mat_<float> T(3, 1, tz);
|
||||
return warpPointBackward(pt, K, R, T);
|
||||
}
|
||||
|
||||
Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
|
||||
{
|
||||
return buildMaps(src_size, K, R, Mat::zeros(3, 1, CV_32FC1), xmap, ymap);
|
||||
}
|
||||
|
||||
Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray _xmap, OutputArray _ymap)
|
||||
{
|
||||
projector_.setCameraParams(K, R, T);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
|
||||
_xmap.create(dsize, CV_32FC1);
|
||||
_ymap.create(dsize, CV_32FC1);
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
if (ocl::isOpenCLActivated())
|
||||
{
|
||||
ocl::Kernel k("buildWarpPlaneMaps", ocl::stitching::warpers_oclsrc);
|
||||
if (!k.empty())
|
||||
{
|
||||
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv), t(1, 3, CV_32FC1, projector_.t);
|
||||
UMat uxmap = _xmap.getUMat(), uymap = _ymap.getUMat(),
|
||||
uk_rinv = k_rinv.getUMat(ACCESS_READ), ut = t.getUMat(ACCESS_READ);
|
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
|
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), ocl::KernelArg::PtrReadOnly(ut),
|
||||
dst_tl.x, dst_tl.y, 1/projector_.scale, rowsPerWI);
|
||||
|
||||
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
|
||||
if (k.run(2, globalsize, NULL, true))
|
||||
{
|
||||
CV_IMPL_ADD(CV_IMPL_OCL);
|
||||
return Rect(dst_tl, dst_br);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
Mat xmap = _xmap.getMat(), ymap = _ymap.getMat();
|
||||
|
||||
float x, y;
|
||||
for (int v = dst_tl.y; v <= dst_br.y; ++v)
|
||||
{
|
||||
for (int u = dst_tl.x; u <= dst_br.x; ++u)
|
||||
{
|
||||
projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
|
||||
xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
|
||||
ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
|
||||
}
|
||||
}
|
||||
|
||||
return Rect(dst_tl, dst_br);
|
||||
}
|
||||
|
||||
|
||||
Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode,
|
||||
OutputArray dst)
|
||||
{
|
||||
UMat uxmap, uymap;
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, T, uxmap, uymap);
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
Point PlaneWarper::warp(InputArray src, InputArray K, InputArray R,
|
||||
int interp_mode, int border_mode, OutputArray dst)
|
||||
{
|
||||
float tz[] = {0.f, 0.f, 0.f};
|
||||
Mat_<float> T(3, 1, tz);
|
||||
return warp(src, K, R, T, interp_mode, border_mode, dst);
|
||||
}
|
||||
|
||||
Rect PlaneWarper::warpRoi(Size src_size, InputArray K, InputArray R, InputArray T)
|
||||
{
|
||||
projector_.setCameraParams(K, R, T);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));
|
||||
}
|
||||
|
||||
Rect PlaneWarper::warpRoi(Size src_size, InputArray K, InputArray R)
|
||||
{
|
||||
float tz[] = {0.f, 0.f, 0.f};
|
||||
Mat_<float> T(3, 1, tz);
|
||||
return warpRoi(src_size, K, R, T);
|
||||
}
|
||||
|
||||
|
||||
void PlaneWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
|
||||
{
|
||||
float tl_uf = std::numeric_limits<float>::max();
|
||||
float tl_vf = std::numeric_limits<float>::max();
|
||||
float br_uf = -std::numeric_limits<float>::max();
|
||||
float br_vf = -std::numeric_limits<float>::max();
|
||||
|
||||
float u, v;
|
||||
|
||||
projector_.mapForward(0, 0, u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
projector_.mapForward(0, static_cast<float>(src_size.height - 1), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(src_size.width - 1), 0, u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(src_size.height - 1), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
|
||||
Point2f AffineWarper::warpPoint(const Point2f &pt, InputArray K, InputArray H)
|
||||
{
|
||||
Mat R, T;
|
||||
getRTfromHomogeneous(H, R, T);
|
||||
return PlaneWarper::warpPoint(pt, K, R, T);
|
||||
}
|
||||
|
||||
Point2f AffineWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray H)
|
||||
{
|
||||
Mat R, T;
|
||||
getRTfromHomogeneous(H, R, T);
|
||||
return PlaneWarper::warpPointBackward(pt, K, R, T);
|
||||
}
|
||||
|
||||
Rect AffineWarper::buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap)
|
||||
{
|
||||
Mat R, T;
|
||||
getRTfromHomogeneous(H, R, T);
|
||||
return PlaneWarper::buildMaps(src_size, K, R, T, xmap, ymap);
|
||||
}
|
||||
|
||||
|
||||
Point AffineWarper::warp(InputArray src, InputArray K, InputArray H,
|
||||
int interp_mode, int border_mode, OutputArray dst)
|
||||
{
|
||||
Mat R, T;
|
||||
getRTfromHomogeneous(H, R, T);
|
||||
return PlaneWarper::warp(src, K, R, T, interp_mode, border_mode, dst);
|
||||
}
|
||||
|
||||
|
||||
Rect AffineWarper::warpRoi(Size src_size, InputArray K, InputArray H)
|
||||
{
|
||||
Mat R, T;
|
||||
getRTfromHomogeneous(H, R, T);
|
||||
return PlaneWarper::warpRoi(src_size, K, R, T);
|
||||
}
|
||||
|
||||
|
||||
void AffineWarper::getRTfromHomogeneous(InputArray H_, Mat &R, Mat &T)
|
||||
{
|
||||
Mat H = H_.getMat();
|
||||
CV_Assert(H.size() == Size(3, 3) && H.type() == CV_32F);
|
||||
|
||||
T = Mat::zeros(3, 1, CV_32F);
|
||||
R = H.clone();
|
||||
|
||||
T.at<float>(0,0) = R.at<float>(0,2);
|
||||
T.at<float>(1,0) = R.at<float>(1,2);
|
||||
R.at<float>(0,2) = 0.f;
|
||||
R.at<float>(1,2) = 0.f;
|
||||
|
||||
// we want to compensate transform to fit into plane warper
|
||||
R = R.t();
|
||||
T = (R * T) * -1;
|
||||
}
|
||||
|
||||
|
||||
void SphericalWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
|
||||
{
|
||||
detectResultRoiByBorder(src_size, dst_tl, dst_br);
|
||||
|
||||
float tl_uf = static_cast<float>(dst_tl.x);
|
||||
float tl_vf = static_cast<float>(dst_tl.y);
|
||||
float br_uf = static_cast<float>(dst_br.x);
|
||||
float br_vf = static_cast<float>(dst_br.y);
|
||||
|
||||
float x = projector_.rinv[1];
|
||||
float y = projector_.rinv[4];
|
||||
float z = projector_.rinv[7];
|
||||
if (y > 0.f)
|
||||
{
|
||||
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
|
||||
float y_ = projector_.k[4] * y / z + projector_.k[5];
|
||||
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
|
||||
{
|
||||
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
}
|
||||
}
|
||||
|
||||
x = projector_.rinv[1];
|
||||
y = -projector_.rinv[4];
|
||||
z = projector_.rinv[7];
|
||||
if (y > 0.f)
|
||||
{
|
||||
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
|
||||
float y_ = projector_.k[4] * y / z + projector_.k[5];
|
||||
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
|
||||
{
|
||||
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(0));
|
||||
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(0));
|
||||
}
|
||||
}
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
void SphericalPortraitWarper::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
|
||||
{
|
||||
detectResultRoiByBorder(src_size, dst_tl, dst_br);
|
||||
|
||||
float tl_uf = static_cast<float>(dst_tl.x);
|
||||
float tl_vf = static_cast<float>(dst_tl.y);
|
||||
float br_uf = static_cast<float>(dst_br.x);
|
||||
float br_vf = static_cast<float>(dst_br.y);
|
||||
|
||||
float x = projector_.rinv[0];
|
||||
float y = projector_.rinv[3];
|
||||
float z = projector_.rinv[6];
|
||||
if (y > 0.f)
|
||||
{
|
||||
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
|
||||
float y_ = projector_.k[4] * y / z + projector_.k[5];
|
||||
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
|
||||
{
|
||||
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
}
|
||||
}
|
||||
|
||||
x = projector_.rinv[0];
|
||||
y = -projector_.rinv[3];
|
||||
z = projector_.rinv[6];
|
||||
if (y > 0.f)
|
||||
{
|
||||
float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];
|
||||
float y_ = projector_.k[4] * y / z + projector_.k[5];
|
||||
if (x_ > 0.f && x_ < src_size.width && y_ > 0.f && y_ < src_size.height)
|
||||
{
|
||||
tl_uf = std::min(tl_uf, 0.f); tl_vf = std::min(tl_vf, static_cast<float>(0));
|
||||
br_uf = std::max(br_uf, 0.f); br_vf = std::max(br_vf, static_cast<float>(0));
|
||||
}
|
||||
}
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////// SphericalWarper ////////////////////////////////////////
|
||||
|
||||
Rect SphericalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
if (ocl::isOpenCLActivated())
|
||||
{
|
||||
ocl::Kernel k("buildWarpSphericalMaps", ocl::stitching::warpers_oclsrc);
|
||||
if (!k.empty())
|
||||
{
|
||||
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
||||
projector_.setCameraParams(K, R);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
|
||||
xmap.create(dsize, CV_32FC1);
|
||||
ymap.create(dsize, CV_32FC1);
|
||||
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
|
||||
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
|
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
|
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, 1/projector_.scale, rowsPerWI);
|
||||
|
||||
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
|
||||
if (k.run(2, globalsize, NULL, true))
|
||||
{
|
||||
CV_IMPL_ADD(CV_IMPL_OCL);
|
||||
return Rect(dst_tl, dst_br);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return RotationWarperBase<SphericalProjector>::buildMaps(src_size, K, R, xmap, ymap);
|
||||
}
|
||||
|
||||
Point SphericalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
|
||||
{
|
||||
UMat uxmap, uymap;
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
|
||||
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////// CylindricalWarper ////////////////////////////////////////
|
||||
|
||||
Rect CylindricalWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap)
|
||||
{
|
||||
#ifdef HAVE_OPENCL
|
||||
if (ocl::isOpenCLActivated())
|
||||
{
|
||||
ocl::Kernel k("buildWarpCylindricalMaps", ocl::stitching::warpers_oclsrc);
|
||||
if (!k.empty())
|
||||
{
|
||||
int rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
|
||||
projector_.setCameraParams(K, R);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
Size dsize(dst_br.x - dst_tl.x + 1, dst_br.y - dst_tl.y + 1);
|
||||
xmap.create(dsize, CV_32FC1);
|
||||
ymap.create(dsize, CV_32FC1);
|
||||
|
||||
Mat k_rinv(1, 9, CV_32FC1, projector_.k_rinv);
|
||||
UMat uxmap = xmap.getUMat(), uymap = ymap.getUMat(), uk_rinv = k_rinv.getUMat(ACCESS_READ);
|
||||
|
||||
k.args(ocl::KernelArg::WriteOnlyNoSize(uxmap), ocl::KernelArg::WriteOnly(uymap),
|
||||
ocl::KernelArg::PtrReadOnly(uk_rinv), dst_tl.x, dst_tl.y, 1/projector_.scale,
|
||||
rowsPerWI);
|
||||
|
||||
size_t globalsize[2] = { (size_t)dsize.width, ((size_t)dsize.height + rowsPerWI - 1) / rowsPerWI };
|
||||
if (k.run(2, globalsize, NULL, true))
|
||||
{
|
||||
CV_IMPL_ADD(CV_IMPL_OCL);
|
||||
return Rect(dst_tl, dst_br);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return RotationWarperBase<CylindricalProjector>::buildMaps(src_size, K, R, xmap, ymap);
|
||||
}
|
||||
|
||||
Point CylindricalWarper::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst)
|
||||
{
|
||||
UMat uxmap, uymap;
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, uxmap, uymap);
|
||||
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
remap(src, dst, uxmap, uymap, interp_mode, border_mode);
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
} // namespace cv
|
292
3rdparty/opencv-4.5.4/modules/stitching/src/warpers_cuda.cpp
vendored
Normal file
292
3rdparty/opencv-4.5.4/modules/stitching/src/warpers_cuda.cpp
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/core/private.cuda.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::cuda;
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], const float t[3], float scale,
|
||||
cudaStream_t stream);
|
||||
|
||||
void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], float scale,
|
||||
cudaStream_t stream);
|
||||
|
||||
void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
|
||||
const float k_rinv[9], const float r_kinv[9], float scale,
|
||||
cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
static void buildWarpPlaneMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, InputArray _T,
|
||||
float scale, OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
|
||||
{
|
||||
CV_UNUSED(src_size);
|
||||
|
||||
Mat K = _K.getMat();
|
||||
Mat R = _R.getMat();
|
||||
Mat T = _T.getMat();
|
||||
|
||||
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
|
||||
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
|
||||
CV_Assert( (T.size() == Size(3,1) || T.size() == Size(1,3)) && T.type() == CV_32FC1 && T.isContinuous() );
|
||||
|
||||
Mat K_Rinv = K * R.t();
|
||||
Mat R_Kinv = R * K.inv();
|
||||
CV_Assert( K_Rinv.isContinuous() );
|
||||
CV_Assert( R_Kinv.isContinuous() );
|
||||
|
||||
_map_x.create(dst_roi.size(), CV_32FC1);
|
||||
_map_y.create(dst_roi.size(), CV_32FC1);
|
||||
|
||||
GpuMat map_x = _map_x.getGpuMat();
|
||||
GpuMat map_y = _map_y.getGpuMat();
|
||||
|
||||
device::imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
|
||||
T.ptr<float>(), scale, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
static void buildWarpSphericalMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, float scale,
|
||||
OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
|
||||
{
|
||||
CV_UNUSED(src_size);
|
||||
|
||||
Mat K = _K.getMat();
|
||||
Mat R = _R.getMat();
|
||||
|
||||
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
|
||||
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
|
||||
|
||||
Mat K_Rinv = K * R.t();
|
||||
Mat R_Kinv = R * K.inv();
|
||||
CV_Assert( K_Rinv.isContinuous() );
|
||||
CV_Assert( R_Kinv.isContinuous() );
|
||||
|
||||
_map_x.create(dst_roi.size(), CV_32FC1);
|
||||
_map_y.create(dst_roi.size(), CV_32FC1);
|
||||
|
||||
GpuMat map_x = _map_x.getGpuMat();
|
||||
GpuMat map_y = _map_y.getGpuMat();
|
||||
|
||||
device::imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
static void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, InputArray _K, InputArray _R, float scale,
|
||||
OutputArray _map_x, OutputArray _map_y, Stream& stream = Stream::Null())
|
||||
{
|
||||
CV_UNUSED(src_size);
|
||||
|
||||
Mat K = _K.getMat();
|
||||
Mat R = _R.getMat();
|
||||
|
||||
CV_Assert( K.size() == Size(3,3) && K.type() == CV_32FC1 );
|
||||
CV_Assert( R.size() == Size(3,3) && R.type() == CV_32FC1 );
|
||||
|
||||
Mat K_Rinv = K * R.t();
|
||||
Mat R_Kinv = R * K.inv();
|
||||
CV_Assert( K_Rinv.isContinuous() );
|
||||
CV_Assert( R_Kinv.isContinuous() );
|
||||
|
||||
_map_x.create(dst_roi.size(), CV_32FC1);
|
||||
_map_y.create(dst_roi.size(), CV_32FC1);
|
||||
|
||||
GpuMat map_x = _map_x.getGpuMat();
|
||||
GpuMat map_y = _map_y.getGpuMat();
|
||||
|
||||
device::imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
Rect cv::detail::PlaneWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R,
|
||||
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
|
||||
{
|
||||
return buildMaps(src_size, K, R, Mat::zeros(3, 1, CV_32F), xmap, ymap);
|
||||
}
|
||||
|
||||
Rect cv::detail::PlaneWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R, InputArray T,
|
||||
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
|
||||
{
|
||||
#ifndef HAVE_CUDA
|
||||
CV_UNUSED(src_size);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(T);
|
||||
CV_UNUSED(xmap);
|
||||
CV_UNUSED(ymap);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
projector_.setCameraParams(K, R, T);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
::buildWarpPlaneMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
|
||||
K, R, T, projector_.scale, xmap, ymap);
|
||||
|
||||
return Rect(dst_tl, dst_br);
|
||||
#endif
|
||||
}
|
||||
|
||||
Point cv::detail::PlaneWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
|
||||
int interp_mode, int border_mode,
|
||||
cuda::GpuMat & dst)
|
||||
{
|
||||
return warp(src, K, R, Mat::zeros(3, 1, CV_32F), interp_mode, border_mode, dst);
|
||||
}
|
||||
|
||||
|
||||
Point cv::detail::PlaneWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T,
|
||||
int interp_mode, int border_mode,
|
||||
cuda::GpuMat & dst)
|
||||
{
|
||||
#ifndef HAVE_OPENCV_CUDAWARPING
|
||||
CV_UNUSED(src);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(T);
|
||||
CV_UNUSED(interp_mode);
|
||||
CV_UNUSED(border_mode);
|
||||
CV_UNUSED(dst);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, T, d_xmap_, d_ymap_);
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
|
||||
return dst_roi.tl();
|
||||
#endif
|
||||
}
|
||||
|
||||
Rect cv::detail::SphericalWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap)
|
||||
{
|
||||
#ifndef HAVE_CUDA
|
||||
CV_UNUSED(src_size);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(xmap);
|
||||
CV_UNUSED(ymap);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
projector_.setCameraParams(K, R);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
::buildWarpSphericalMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
|
||||
K, R, projector_.scale, xmap, ymap);
|
||||
|
||||
return Rect(dst_tl, dst_br);
|
||||
#endif
|
||||
}
|
||||
|
||||
Point cv::detail::SphericalWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
|
||||
int interp_mode, int border_mode,
|
||||
cuda::GpuMat & dst)
|
||||
{
|
||||
#ifndef HAVE_OPENCV_CUDAWARPING
|
||||
CV_UNUSED(src);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(interp_mode);
|
||||
CV_UNUSED(border_mode);
|
||||
CV_UNUSED(dst);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, d_xmap_, d_ymap_);
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
|
||||
return dst_roi.tl();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
Rect cv::detail::CylindricalWarperGpu::buildMaps(Size src_size, InputArray K, InputArray R,
|
||||
cuda::GpuMat & xmap, cuda::GpuMat & ymap)
|
||||
{
|
||||
#ifndef HAVE_CUDA
|
||||
CV_UNUSED(src_size);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(xmap);
|
||||
CV_UNUSED(ymap);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
projector_.setCameraParams(K, R);
|
||||
|
||||
Point dst_tl, dst_br;
|
||||
detectResultRoi(src_size, dst_tl, dst_br);
|
||||
|
||||
::buildWarpCylindricalMaps(src_size, Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)),
|
||||
K, R, projector_.scale, xmap, ymap);
|
||||
|
||||
return Rect(dst_tl, dst_br);
|
||||
#endif
|
||||
}
|
||||
|
||||
Point cv::detail::CylindricalWarperGpu::warp(const cuda::GpuMat & src, InputArray K, InputArray R,
|
||||
int interp_mode, int border_mode,
|
||||
cuda::GpuMat & dst)
|
||||
{
|
||||
#ifndef HAVE_OPENCV_CUDAWARPING
|
||||
CV_UNUSED(src);
|
||||
CV_UNUSED(K);
|
||||
CV_UNUSED(R);
|
||||
CV_UNUSED(interp_mode);
|
||||
CV_UNUSED(border_mode);
|
||||
CV_UNUSED(dst);
|
||||
throw_no_cuda();
|
||||
#else
|
||||
Rect dst_roi = buildMaps(src.size(), K, R, d_xmap_, d_ymap_);
|
||||
dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
|
||||
cuda::remap(src, dst, d_xmap_, d_ymap_, interp_mode, border_mode);
|
||||
return dst_roi.tl();
|
||||
#endif
|
||||
}
|
Reference in New Issue
Block a user