feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
65
3rdparty/opencv-4.5.4/modules/videoio/src/backend.hpp
vendored
Normal file
65
3rdparty/opencv-4.5.4/modules/videoio/src/backend.hpp
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef BACKEND_HPP_DEFINED
|
||||
#define BACKEND_HPP_DEFINED
|
||||
|
||||
#include "cap_interface.hpp"
|
||||
#include "opencv2/videoio/registry.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
// TODO: move to public interface
|
||||
// TODO: allow runtime backend registration
|
||||
class IBackend
|
||||
{
|
||||
public:
|
||||
virtual ~IBackend() {}
|
||||
virtual Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const = 0;
|
||||
virtual Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const = 0;
|
||||
virtual Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps, const cv::Size& sz,
|
||||
const VideoWriterParameters& params) const = 0;
|
||||
};
|
||||
|
||||
class IBackendFactory
|
||||
{
|
||||
public:
|
||||
virtual ~IBackendFactory() {}
|
||||
virtual Ptr<IBackend> getBackend() const = 0;
|
||||
virtual bool isBuiltIn() const = 0;
|
||||
};
|
||||
|
||||
//=============================================================================
|
||||
|
||||
typedef Ptr<IVideoCapture> (*FN_createCaptureFile)(const std::string & filename);
|
||||
typedef Ptr<IVideoCapture> (*FN_createCaptureCamera)(int camera);
|
||||
typedef Ptr<IVideoCapture> (*FN_createCaptureFileWithParams)(const std::string & filename, const VideoCaptureParameters& params);
|
||||
typedef Ptr<IVideoCapture> (*FN_createCaptureCameraWithParams)(int camera, const VideoCaptureParameters& params);
|
||||
typedef Ptr<IVideoWriter> (*FN_createWriter)(const std::string& filename, int fourcc, double fps, const Size& sz,
|
||||
const VideoWriterParameters& params);
|
||||
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
|
||||
FN_createCaptureCamera createCaptureCamera,
|
||||
FN_createWriter createWriter);
|
||||
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
|
||||
FN_createCaptureCameraWithParams createCaptureCamera,
|
||||
FN_createWriter createWriter);
|
||||
|
||||
Ptr<IBackendFactory> createPluginBackendFactory(VideoCaptureAPIs id, const char* baseName);
|
||||
|
||||
void applyParametersFallback(const Ptr<IVideoCapture>& cap, const VideoCaptureParameters& params);
|
||||
|
||||
std::string getCapturePluginVersion(
|
||||
const Ptr<IBackendFactory>& backend_factory,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
);
|
||||
std::string getWriterPluginVersion(
|
||||
const Ptr<IBackendFactory>& backend_factory,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
);
|
||||
|
||||
} // namespace cv::
|
||||
|
||||
#endif // BACKEND_HPP_DEFINED
|
||||
782
3rdparty/opencv-4.5.4/modules/videoio/src/backend_plugin.cpp
vendored
Normal file
782
3rdparty/opencv-4.5.4/modules/videoio/src/backend_plugin.cpp
vendored
Normal file
@@ -0,0 +1,782 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "backend.hpp"
|
||||
#include "plugin_api.hpp"
|
||||
#include "plugin_capture_api.hpp"
|
||||
#include "plugin_writer_api.hpp"
|
||||
|
||||
#include "opencv2/core/utils/configuration.private.hpp"
|
||||
#include "opencv2/core/utils/logger.hpp"
|
||||
|
||||
#include "opencv2/core/private.hpp"
|
||||
#include "videoio_registry.hpp"
|
||||
|
||||
//==================================================================================================
|
||||
// Dynamic backend implementation
|
||||
|
||||
#include "opencv2/core/utils/plugin_loader.private.hpp"
|
||||
|
||||
|
||||
#include "backend_plugin_legacy.impl.hpp"
|
||||
|
||||
|
||||
namespace cv { namespace impl {
|
||||
|
||||
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
|
||||
using namespace cv::plugin::impl; // plugin_loader.hpp
|
||||
|
||||
static Mutex& getInitializationMutex()
|
||||
{
|
||||
static Mutex initializationMutex;
|
||||
return initializationMutex;
|
||||
}
|
||||
|
||||
|
||||
class PluginBackend: public IBackend
|
||||
{
|
||||
protected:
|
||||
|
||||
void initCaptureAPI()
|
||||
{
|
||||
const char* init_name = "opencv_videoio_capture_plugin_init_v1";
|
||||
FN_opencv_videoio_capture_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_capture_plugin_init_t>(lib_->getSymbol(init_name));
|
||||
if (fn_init)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
|
||||
for (int supported_api_version = CAPTURE_API_VERSION; supported_api_version >= 0; supported_api_version--)
|
||||
{
|
||||
capture_api_ = fn_init(CAPTURE_ABI_VERSION, supported_api_version, NULL);
|
||||
if (capture_api_)
|
||||
break;
|
||||
}
|
||||
if (!capture_api_)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
|
||||
return;
|
||||
}
|
||||
if (!checkCompatibility(
|
||||
capture_api_->api_header, CAPTURE_ABI_VERSION, CAPTURE_API_VERSION,
|
||||
capture_api_->v0.id != CAP_FFMPEG))
|
||||
{
|
||||
capture_api_ = NULL;
|
||||
return;
|
||||
}
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << capture_api_->api_header.api_description << "'");
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: missing plugin init function: '" << init_name << "', file: " << lib_->getName());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void initWriterAPI()
|
||||
{
|
||||
const char* init_name = "opencv_videoio_writer_plugin_init_v1";
|
||||
FN_opencv_videoio_writer_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_writer_plugin_init_t>(lib_->getSymbol(init_name));
|
||||
if (fn_init)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
|
||||
for (int supported_api_version = WRITER_API_VERSION; supported_api_version >= 0; supported_api_version--)
|
||||
{
|
||||
writer_api_ = fn_init(WRITER_ABI_VERSION, supported_api_version, NULL);
|
||||
if (writer_api_)
|
||||
break;
|
||||
}
|
||||
if (!writer_api_)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
|
||||
return;
|
||||
}
|
||||
if (!checkCompatibility(
|
||||
writer_api_->api_header, WRITER_ABI_VERSION, WRITER_API_VERSION,
|
||||
writer_api_->v0.id != CAP_FFMPEG))
|
||||
{
|
||||
writer_api_ = NULL;
|
||||
return;
|
||||
}
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << writer_api_->api_header.api_description << "'");
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: missing plugin init function: '" << init_name << "', file: " << lib_->getName());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void initPluginLegacyAPI()
|
||||
{
|
||||
const char* init_name = "opencv_videoio_plugin_init_v0";
|
||||
FN_opencv_videoio_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_plugin_init_t>(lib_->getSymbol(init_name));
|
||||
if (fn_init)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
|
||||
for (int supported_api_version = API_VERSION; supported_api_version >= 0; supported_api_version--)
|
||||
{
|
||||
plugin_api_ = fn_init(ABI_VERSION, supported_api_version, NULL);
|
||||
if (plugin_api_)
|
||||
break;
|
||||
}
|
||||
if (!plugin_api_)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
|
||||
return;
|
||||
}
|
||||
if (!checkCompatibility(
|
||||
plugin_api_->api_header, ABI_VERSION, API_VERSION,
|
||||
plugin_api_->v0.captureAPI != CAP_FFMPEG))
|
||||
{
|
||||
plugin_api_ = NULL;
|
||||
return;
|
||||
}
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << plugin_api_->api_header.api_description << "'");
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible, missing init function: '" << init_name << "', file: " << lib_->getName());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool checkCompatibility(const OpenCV_API_Header& api_header, unsigned int abi_version, unsigned int api_version, bool checkMinorOpenCVVersion)
|
||||
{
|
||||
if (api_header.opencv_version_major != CV_VERSION_MAJOR)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: wrong OpenCV major version used by plugin '" << api_header.api_description << "': " <<
|
||||
cv::format("%d.%d, OpenCV version is '" CV_VERSION "'", api_header.opencv_version_major, api_header.opencv_version_minor))
|
||||
return false;
|
||||
}
|
||||
if (!checkMinorOpenCVVersion)
|
||||
{
|
||||
// no checks for OpenCV minor version
|
||||
}
|
||||
else if (api_header.opencv_version_minor != CV_VERSION_MINOR)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: wrong OpenCV minor version used by plugin '" << api_header.api_description << "': " <<
|
||||
cv::format("%d.%d, OpenCV version is '" CV_VERSION "'", api_header.opencv_version_major, api_header.opencv_version_minor))
|
||||
return false;
|
||||
}
|
||||
CV_LOG_INFO(NULL, "Video I/O: initialized '" << api_header.api_description << "': built with "
|
||||
<< cv::format("OpenCV %d.%d (ABI/API = %d/%d)",
|
||||
api_header.opencv_version_major, api_header.opencv_version_minor,
|
||||
api_header.min_api_version, api_header.api_version)
|
||||
<< ", current OpenCV version is '" CV_VERSION "' (ABI/API = " << abi_version << "/" << api_version << ")"
|
||||
);
|
||||
if (api_header.min_api_version != abi_version) // future: range can be here
|
||||
{
|
||||
// actually this should never happen due to checks in plugin's init() function
|
||||
CV_LOG_ERROR(NULL, "Video I/O: plugin is not supported due to incompatible ABI = " << api_header.min_api_version);
|
||||
return false;
|
||||
}
|
||||
if (api_header.api_version != api_version)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: NOTE: plugin is supported, but there is API version mismath: "
|
||||
<< cv::format("plugin API level (%d) != OpenCV API level (%d)", api_header.api_version, api_version));
|
||||
if (api_header.api_version < api_version)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: NOTE: some functionality may be unavailable due to lack of support by plugin implementation");
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
Ptr<cv::plugin::impl::DynamicLib> lib_;
|
||||
const OpenCV_VideoIO_Capture_Plugin_API* capture_api_;
|
||||
const OpenCV_VideoIO_Writer_Plugin_API* writer_api_;
|
||||
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_; //!< deprecated
|
||||
|
||||
PluginBackend(const Ptr<cv::plugin::impl::DynamicLib>& lib)
|
||||
: lib_(lib)
|
||||
, capture_api_(NULL), writer_api_(NULL)
|
||||
, plugin_api_(NULL)
|
||||
{
|
||||
initCaptureAPI();
|
||||
initWriterAPI();
|
||||
if (capture_api_ == NULL && writer_api_ == NULL)
|
||||
{
|
||||
initPluginLegacyAPI();
|
||||
}
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> createCapture(int camera) const;
|
||||
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE;
|
||||
Ptr<IVideoCapture> createCapture(const std::string &filename) const;
|
||||
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE;
|
||||
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
|
||||
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE;
|
||||
|
||||
std::string getCapturePluginVersion(CV_OUT int& version_ABI, CV_OUT int& version_API)
|
||||
{
|
||||
CV_Assert(capture_api_ || plugin_api_);
|
||||
const OpenCV_API_Header& api_header = capture_api_ ? capture_api_->api_header : plugin_api_->api_header;
|
||||
version_ABI = api_header.min_api_version;
|
||||
version_API = api_header.api_version;
|
||||
return api_header.api_description;
|
||||
}
|
||||
|
||||
std::string getWriterPluginVersion(CV_OUT int& version_ABI, CV_OUT int& version_API)
|
||||
{
|
||||
CV_Assert(writer_api_ || plugin_api_);
|
||||
const OpenCV_API_Header& api_header = writer_api_ ? writer_api_->api_header : plugin_api_->api_header;
|
||||
version_ABI = api_header.min_api_version;
|
||||
version_API = api_header.api_version;
|
||||
return api_header.api_description;
|
||||
}
|
||||
};
|
||||
|
||||
class PluginBackendFactory : public IBackendFactory
|
||||
{
|
||||
public:
|
||||
VideoCaptureAPIs id_;
|
||||
const char* baseName_;
|
||||
Ptr<PluginBackend> backend;
|
||||
bool initialized;
|
||||
public:
|
||||
PluginBackendFactory(VideoCaptureAPIs id, const char* baseName) :
|
||||
id_(id), baseName_(baseName),
|
||||
initialized(false)
|
||||
{
|
||||
// nothing, plugins are loaded on demand
|
||||
}
|
||||
|
||||
Ptr<IBackend> getBackend() const CV_OVERRIDE
|
||||
{
|
||||
initBackend();
|
||||
return backend.staticCast<IBackend>();
|
||||
}
|
||||
|
||||
bool isBuiltIn() const CV_OVERRIDE { return false; }
|
||||
|
||||
std::string getCapturePluginVersion(
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API) const
|
||||
{
|
||||
initBackend();
|
||||
if (!backend)
|
||||
CV_Error_(Error::StsNotImplemented, ("Backend '%s' is not available", baseName_));
|
||||
return backend->getCapturePluginVersion(version_ABI, version_API);
|
||||
}
|
||||
|
||||
std::string getWriterPluginVersion(
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API) const
|
||||
{
|
||||
initBackend();
|
||||
if (!backend)
|
||||
CV_Error_(Error::StsNotImplemented, ("Backend '%s' is not available", baseName_));
|
||||
return backend->getWriterPluginVersion(version_ABI, version_API);
|
||||
}
|
||||
|
||||
protected:
|
||||
inline void initBackend() const
|
||||
{
|
||||
if (!initialized)
|
||||
{
|
||||
const_cast<PluginBackendFactory*>(this)->initBackend_();
|
||||
}
|
||||
}
|
||||
void initBackend_()
|
||||
{
|
||||
AutoLock lock(getInitializationMutex());
|
||||
try {
|
||||
if (!initialized)
|
||||
loadPlugin();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "Video I/O: exception during plugin loading: " << baseName_ << ". SKIP");
|
||||
}
|
||||
initialized = true;
|
||||
}
|
||||
void loadPlugin();
|
||||
};
|
||||
|
||||
static
|
||||
std::vector<FileSystemPath_t> getPluginCandidates(const std::string& baseName)
|
||||
{
|
||||
using namespace cv::utils;
|
||||
using namespace cv::utils::fs;
|
||||
const std::string baseName_l = toLowerCase(baseName);
|
||||
const std::string baseName_u = toUpperCase(baseName);
|
||||
const FileSystemPath_t baseName_l_fs = toFileSystemPath(baseName_l);
|
||||
std::vector<FileSystemPath_t> paths;
|
||||
const std::vector<std::string> paths_ = getConfigurationParameterPaths("OPENCV_VIDEOIO_PLUGIN_PATH", std::vector<std::string>());
|
||||
if (paths_.size() != 0)
|
||||
{
|
||||
for (size_t i = 0; i < paths_.size(); i++)
|
||||
{
|
||||
paths.push_back(toFileSystemPath(paths_[i]));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
FileSystemPath_t binaryLocation;
|
||||
if (getBinLocation(binaryLocation))
|
||||
{
|
||||
binaryLocation = getParent(binaryLocation);
|
||||
#ifndef CV_VIDEOIO_PLUGIN_SUBDIRECTORY
|
||||
paths.push_back(binaryLocation);
|
||||
#else
|
||||
paths.push_back(binaryLocation + toFileSystemPath("/") + toFileSystemPath(CV_VIDEOIO_PLUGIN_SUBDIRECTORY_STR));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
const std::string default_expr = libraryPrefix() + "opencv_videoio_" + baseName_l + "*" + librarySuffix();
|
||||
const std::string plugin_expr = getConfigurationParameterString((std::string("OPENCV_VIDEOIO_PLUGIN_") + baseName_u).c_str(), default_expr.c_str());
|
||||
std::vector<FileSystemPath_t> results;
|
||||
#ifdef _WIN32
|
||||
FileSystemPath_t moduleName = toFileSystemPath(libraryPrefix() + "opencv_videoio_" + baseName_l + librarySuffix());
|
||||
#ifndef WINRT
|
||||
if (baseName_u == "FFMPEG") // backward compatibility
|
||||
{
|
||||
const wchar_t* ffmpeg_env_path = _wgetenv(L"OPENCV_FFMPEG_DLL_DIR");
|
||||
if (ffmpeg_env_path)
|
||||
{
|
||||
results.push_back(FileSystemPath_t(ffmpeg_env_path) + L"\\" + moduleName);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (plugin_expr != default_expr)
|
||||
{
|
||||
moduleName = toFileSystemPath(plugin_expr);
|
||||
results.push_back(moduleName);
|
||||
}
|
||||
for (const FileSystemPath_t& path : paths)
|
||||
{
|
||||
results.push_back(path + L"\\" + moduleName);
|
||||
}
|
||||
results.push_back(moduleName);
|
||||
#if defined(_DEBUG) && defined(DEBUG_POSTFIX)
|
||||
if (baseName_u == "FFMPEG") // backward compatibility
|
||||
{
|
||||
const FileSystemPath_t templ = toFileSystemPath(CVAUX_STR(DEBUG_POSTFIX) ".dll");
|
||||
FileSystemPath_t nonDebugName(moduleName);
|
||||
size_t suf = nonDebugName.rfind(templ);
|
||||
if (suf != FileSystemPath_t::npos)
|
||||
{
|
||||
nonDebugName.replace(suf, suf + templ.size(), L".dll");
|
||||
results.push_back(nonDebugName);
|
||||
}
|
||||
}
|
||||
#endif // _DEBUG && DEBUG_POSTFIX
|
||||
#else
|
||||
CV_LOG_INFO(NULL, "VideoIO plugin (" << baseName << "): glob is '" << plugin_expr << "', " << paths.size() << " location(s)");
|
||||
for (const std::string& path : paths)
|
||||
{
|
||||
if (path.empty())
|
||||
continue;
|
||||
std::vector<std::string> candidates;
|
||||
cv::glob(utils::fs::join(path, plugin_expr), candidates);
|
||||
// Prefer candisates with higher versions
|
||||
// TODO: implemented accurate versions-based comparator
|
||||
std::sort(candidates.begin(), candidates.end(), std::greater<std::string>());
|
||||
CV_LOG_INFO(NULL, " - " << path << ": " << candidates.size());
|
||||
copy(candidates.begin(), candidates.end(), back_inserter(results));
|
||||
}
|
||||
#endif
|
||||
CV_LOG_INFO(NULL, "Found " << results.size() << " plugin(s) for " << baseName);
|
||||
return results;
|
||||
}
|
||||
|
||||
void PluginBackendFactory::loadPlugin()
|
||||
{
|
||||
for (const FileSystemPath_t& plugin : getPluginCandidates(baseName_))
|
||||
{
|
||||
auto lib = makePtr<cv::plugin::impl::DynamicLib>(plugin);
|
||||
if (!lib->isLoaded())
|
||||
continue;
|
||||
try
|
||||
{
|
||||
Ptr<PluginBackend> pluginBackend = makePtr<PluginBackend>(lib);
|
||||
if (!pluginBackend)
|
||||
return;
|
||||
if (pluginBackend->capture_api_)
|
||||
{
|
||||
if (pluginBackend->capture_api_->v0.id != id_)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->capture_api_->api_header.api_description <<
|
||||
"': unexpected backend ID: " <<
|
||||
pluginBackend->capture_api_->v0.id << " vs " << (int)id_ << " (expected)");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (pluginBackend->writer_api_)
|
||||
{
|
||||
if (pluginBackend->writer_api_->v0.id != id_)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->writer_api_->api_header.api_description <<
|
||||
"': unexpected backend ID: " <<
|
||||
pluginBackend->writer_api_->v0.id << " vs " << (int)id_ << " (expected)");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (pluginBackend->plugin_api_)
|
||||
{
|
||||
if (pluginBackend->plugin_api_->v0.captureAPI != id_)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->plugin_api_->api_header.api_description <<
|
||||
"': unexpected backend ID: " <<
|
||||
pluginBackend->plugin_api_->v0.captureAPI << " vs " << (int)id_ << " (expected)");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (pluginBackend->capture_api_ == NULL && pluginBackend->writer_api_ == NULL
|
||||
&& pluginBackend->plugin_api_ == NULL)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "Video I/O: no compatible plugin API for backend ID: " << (int)id_);
|
||||
return;
|
||||
}
|
||||
backend = pluginBackend;
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "Video I/O: exception during plugin initialization: " << toPrintablePath(plugin) << ". SKIP");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class PluginCapture : public cv::IVideoCapture
|
||||
{
|
||||
const OpenCV_VideoIO_Capture_Plugin_API* plugin_api_;
|
||||
CvPluginCapture capture_;
|
||||
|
||||
public:
|
||||
static
|
||||
Ptr<PluginCapture> create(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api,
|
||||
const std::string &filename, int camera, const VideoCaptureParameters& params)
|
||||
{
|
||||
CV_Assert(plugin_api);
|
||||
CV_Assert(plugin_api->v0.Capture_release);
|
||||
|
||||
CvPluginCapture capture = NULL;
|
||||
|
||||
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Capture_open_with_params)
|
||||
{
|
||||
std::vector<int> vint_params = params.getIntVector();
|
||||
int* c_params = vint_params.data();
|
||||
unsigned n_params = (unsigned)(vint_params.size() / 2);
|
||||
|
||||
if (CV_ERROR_OK == plugin_api->v1.Capture_open_with_params(
|
||||
filename.empty() ? 0 : filename.c_str(), camera, c_params, n_params, &capture))
|
||||
{
|
||||
CV_Assert(capture);
|
||||
return makePtr<PluginCapture>(plugin_api, capture);
|
||||
}
|
||||
}
|
||||
else if (plugin_api->v0.Capture_open)
|
||||
{
|
||||
if (CV_ERROR_OK == plugin_api->v0.Capture_open(filename.empty() ? 0 : filename.c_str(), camera, &capture))
|
||||
{
|
||||
CV_Assert(capture);
|
||||
Ptr<PluginCapture> cap = makePtr<PluginCapture>(plugin_api, capture);
|
||||
if (cap && !params.empty())
|
||||
{
|
||||
applyParametersFallback(cap, params);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
}
|
||||
|
||||
return Ptr<PluginCapture>();
|
||||
}
|
||||
|
||||
PluginCapture(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api, CvPluginCapture capture)
|
||||
: plugin_api_(plugin_api), capture_(capture)
|
||||
{
|
||||
CV_Assert(plugin_api_); CV_Assert(capture_);
|
||||
}
|
||||
|
||||
~PluginCapture()
|
||||
{
|
||||
CV_DbgAssert(plugin_api_->v0.Capture_release);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Capture_release(capture_))
|
||||
CV_LOG_ERROR(NULL, "Video I/O: Can't release capture by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
capture_ = NULL;
|
||||
}
|
||||
double getProperty(int prop) const CV_OVERRIDE
|
||||
{
|
||||
double val = -1;
|
||||
if (plugin_api_->v0.Capture_getProperty)
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Capture_getProperty(capture_, prop, &val))
|
||||
val = -1;
|
||||
return val;
|
||||
}
|
||||
bool setProperty(int prop, double val) CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Capture_setProperty)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_setProperty(capture_, prop, val))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Capture_grab)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_grab(capture_))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
static CvResult CV_API_CALL retrieve_callback(int stream_idx, const unsigned char* data, int step, int width, int height, int type, void* userdata)
|
||||
{
|
||||
CV_UNUSED(stream_idx);
|
||||
cv::_OutputArray* dst = static_cast<cv::_OutputArray*>(userdata);
|
||||
if (!dst)
|
||||
return CV_ERROR_FAIL;
|
||||
cv::Mat(cv::Size(width, height), type, (void*)data, step).copyTo(*dst);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
bool retrieveFrame(int idx, cv::OutputArray img) CV_OVERRIDE
|
||||
{
|
||||
bool res = false;
|
||||
if (plugin_api_->v0.Capture_retreive)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_retreive(capture_, idx, retrieve_callback, (cv::_OutputArray*)&img))
|
||||
res = true;
|
||||
return res;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return capture_ != NULL; // TODO always true
|
||||
}
|
||||
int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return plugin_api_->v0.id;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class PluginWriter : public cv::IVideoWriter
|
||||
{
|
||||
const OpenCV_VideoIO_Writer_Plugin_API* plugin_api_;
|
||||
CvPluginWriter writer_;
|
||||
|
||||
public:
|
||||
static
|
||||
Ptr<PluginWriter> create(const OpenCV_VideoIO_Writer_Plugin_API* plugin_api,
|
||||
const std::string& filename, int fourcc, double fps, const cv::Size& sz,
|
||||
const VideoWriterParameters& params)
|
||||
{
|
||||
CV_Assert(plugin_api);
|
||||
CV_Assert(plugin_api->v0.Writer_release);
|
||||
CV_Assert(!filename.empty());
|
||||
|
||||
CvPluginWriter writer = NULL;
|
||||
|
||||
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Writer_open_with_params)
|
||||
{
|
||||
std::vector<int> vint_params = params.getIntVector();
|
||||
int* c_params = &vint_params[0];
|
||||
unsigned n_params = (unsigned)(vint_params.size() / 2);
|
||||
|
||||
if (CV_ERROR_OK == plugin_api->v1.Writer_open_with_params(filename.c_str(), fourcc, fps, sz.width, sz.height, c_params, n_params, &writer))
|
||||
{
|
||||
CV_Assert(writer);
|
||||
return makePtr<PluginWriter>(plugin_api, writer);
|
||||
}
|
||||
}
|
||||
else if (plugin_api->v0.Writer_open)
|
||||
{
|
||||
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
|
||||
const int depth = params.get(VIDEOWRITER_PROP_DEPTH, CV_8U);
|
||||
if (depth != CV_8U)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "Video I/O plugin doesn't support (due to lower API level) creation of VideoWriter with depth != CV_8U");
|
||||
return Ptr<PluginWriter>();
|
||||
}
|
||||
if (params.warnUnusedParameters())
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO: unsupported parameters in VideoWriter, see logger INFO channel for details");
|
||||
return Ptr<PluginWriter>();
|
||||
}
|
||||
if (CV_ERROR_OK == plugin_api->v0.Writer_open(filename.c_str(), fourcc, fps, sz.width, sz.height, isColor, &writer))
|
||||
{
|
||||
CV_Assert(writer);
|
||||
return makePtr<PluginWriter>(plugin_api, writer);
|
||||
}
|
||||
}
|
||||
|
||||
return Ptr<PluginWriter>();
|
||||
}
|
||||
|
||||
PluginWriter(const OpenCV_VideoIO_Writer_Plugin_API* plugin_api, CvPluginWriter writer)
|
||||
: plugin_api_(plugin_api), writer_(writer)
|
||||
{
|
||||
CV_Assert(plugin_api_); CV_Assert(writer_);
|
||||
}
|
||||
|
||||
~PluginWriter()
|
||||
{
|
||||
CV_DbgAssert(plugin_api_->v0.Writer_release);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_release(writer_))
|
||||
CV_LOG_ERROR(NULL, "Video I/O: Can't release writer by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
writer_ = NULL;
|
||||
}
|
||||
double getProperty(int prop) const CV_OVERRIDE
|
||||
{
|
||||
double val = -1;
|
||||
if (plugin_api_->v0.Writer_getProperty)
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_getProperty(writer_, prop, &val))
|
||||
val = -1;
|
||||
return val;
|
||||
}
|
||||
bool setProperty(int prop, double val) CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Writer_setProperty)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Writer_setProperty(writer_, prop, val))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return writer_ != NULL; // TODO always true
|
||||
}
|
||||
void write(cv::InputArray arr) CV_OVERRIDE
|
||||
{
|
||||
cv::Mat img = arr.getMat();
|
||||
CV_DbgAssert(writer_);
|
||||
CV_Assert(plugin_api_->v0.Writer_write);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_write(writer_, img.data, (int)img.step[0], img.cols, img.rows, img.channels()))
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "Video I/O: Can't write frame by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
}
|
||||
// TODO return bool result?
|
||||
}
|
||||
int getCaptureDomain() const CV_OVERRIDE
|
||||
{
|
||||
return plugin_api_->v0.id;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Ptr<IVideoCapture> PluginBackend::createCapture(int camera, const VideoCaptureParameters& params) const
|
||||
{
|
||||
try
|
||||
{
|
||||
if (capture_api_)
|
||||
return PluginCapture::create(capture_api_, std::string(), camera, params); //.staticCast<IVideoCapture>();
|
||||
if (plugin_api_)
|
||||
{
|
||||
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, std::string(), camera); //.staticCast<IVideoCapture>();
|
||||
if (cap && !params.empty())
|
||||
{
|
||||
applyParametersFallback(cap, params);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "Video I/O: can't create camera capture: " << camera);
|
||||
throw;
|
||||
}
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> PluginBackend::createCapture(const std::string &filename, const VideoCaptureParameters& params) const
|
||||
{
|
||||
try
|
||||
{
|
||||
if (capture_api_)
|
||||
return PluginCapture::create(capture_api_, filename, 0, params); //.staticCast<IVideoCapture>();
|
||||
if (plugin_api_)
|
||||
{
|
||||
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, filename, 0); //.staticCast<IVideoCapture>();
|
||||
if (cap && !params.empty())
|
||||
{
|
||||
applyParametersFallback(cap, params);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "Video I/O: can't open file capture: " << filename);
|
||||
throw;
|
||||
}
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
|
||||
Ptr<IVideoWriter> PluginBackend::createWriter(const std::string& filename, int fourcc, double fps,
|
||||
const cv::Size& sz, const VideoWriterParameters& params) const
|
||||
{
|
||||
try
|
||||
{
|
||||
if (writer_api_)
|
||||
return PluginWriter::create(writer_api_, filename, fourcc, fps, sz, params); //.staticCast<IVideoWriter>();
|
||||
if (plugin_api_)
|
||||
return legacy::PluginWriter::create(plugin_api_, filename, fourcc, fps, sz, params); //.staticCast<IVideoWriter>();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "Video I/O: can't open writer: " << filename);
|
||||
}
|
||||
return Ptr<IVideoWriter>();
|
||||
}
|
||||
|
||||
#endif // OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
|
||||
} // namespace
|
||||
|
||||
Ptr<IBackendFactory> createPluginBackendFactory(VideoCaptureAPIs id, const char* baseName)
|
||||
{
|
||||
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
return makePtr<impl::PluginBackendFactory>(id, baseName); //.staticCast<IBackendFactory>();
|
||||
#else
|
||||
CV_UNUSED(id);
|
||||
CV_UNUSED(baseName);
|
||||
return Ptr<IBackendFactory>();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
std::string getCapturePluginVersion(
|
||||
const Ptr<IBackendFactory>& backend_factory,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
)
|
||||
{
|
||||
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
using namespace impl;
|
||||
CV_Assert(backend_factory);
|
||||
PluginBackendFactory* plugin_backend_factory = dynamic_cast<PluginBackendFactory*>(backend_factory.get());
|
||||
CV_Assert(plugin_backend_factory);
|
||||
return plugin_backend_factory->getCapturePluginVersion(version_ABI, version_API);
|
||||
#else
|
||||
CV_UNUSED(backend_factory);
|
||||
CV_UNUSED(version_ABI);
|
||||
CV_UNUSED(version_API);
|
||||
CV_Error(Error::StsBadFunc, "Plugins are not available in this build");
|
||||
#endif
|
||||
}
|
||||
|
||||
std::string getWriterPluginVersion(
|
||||
const Ptr<IBackendFactory>& backend_factory,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
)
|
||||
{
|
||||
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
using namespace impl;
|
||||
CV_Assert(backend_factory);
|
||||
PluginBackendFactory* plugin_backend_factory = dynamic_cast<PluginBackendFactory*>(backend_factory.get());
|
||||
CV_Assert(plugin_backend_factory);
|
||||
return plugin_backend_factory->getWriterPluginVersion(version_ABI, version_API);
|
||||
#else
|
||||
CV_UNUSED(backend_factory);
|
||||
CV_UNUSED(version_ABI);
|
||||
CV_UNUSED(version_API);
|
||||
CV_Error(Error::StsBadFunc, "Plugins are not available in this build");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace
|
||||
199
3rdparty/opencv-4.5.4/modules/videoio/src/backend_plugin_legacy.impl.hpp
vendored
Normal file
199
3rdparty/opencv-4.5.4/modules/videoio/src/backend_plugin_legacy.impl.hpp
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
//
|
||||
// Not a standalone header.
|
||||
//
|
||||
|
||||
namespace cv { namespace impl { namespace legacy {
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class PluginCapture : public cv::IVideoCapture
|
||||
{
|
||||
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_;
|
||||
CvPluginCapture capture_;
|
||||
|
||||
public:
|
||||
static
|
||||
Ptr<PluginCapture> create(const OpenCV_VideoIO_Plugin_API_preview* plugin_api,
|
||||
const std::string &filename, int camera)
|
||||
{
|
||||
CV_Assert(plugin_api);
|
||||
CvPluginCapture capture = NULL;
|
||||
if (plugin_api->v0.Capture_open)
|
||||
{
|
||||
CV_Assert(plugin_api->v0.Capture_release);
|
||||
if (CV_ERROR_OK == plugin_api->v0.Capture_open(filename.empty() ? 0 : filename.c_str(), camera, &capture))
|
||||
{
|
||||
CV_Assert(capture);
|
||||
return makePtr<PluginCapture>(plugin_api, capture);
|
||||
}
|
||||
}
|
||||
return Ptr<PluginCapture>();
|
||||
}
|
||||
|
||||
PluginCapture(const OpenCV_VideoIO_Plugin_API_preview* plugin_api, CvPluginCapture capture)
|
||||
: plugin_api_(plugin_api), capture_(capture)
|
||||
{
|
||||
CV_Assert(plugin_api_); CV_Assert(capture_);
|
||||
}
|
||||
|
||||
~PluginCapture()
|
||||
{
|
||||
CV_DbgAssert(plugin_api_->v0.Capture_release);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Capture_release(capture_))
|
||||
CV_LOG_ERROR(NULL, "Video I/O: Can't release capture by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
capture_ = NULL;
|
||||
}
|
||||
double getProperty(int prop) const CV_OVERRIDE
|
||||
{
|
||||
double val = -1;
|
||||
if (plugin_api_->v0.Capture_getProperty)
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Capture_getProperty(capture_, prop, &val))
|
||||
val = -1;
|
||||
return val;
|
||||
}
|
||||
bool setProperty(int prop, double val) CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Capture_setProperty)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_setProperty(capture_, prop, val))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Capture_grab)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_grab(capture_))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
static CvResult CV_API_CALL retrieve_callback(int stream_idx, const unsigned char* data, int step, int width, int height, int cn, void* userdata)
|
||||
{
|
||||
CV_UNUSED(stream_idx);
|
||||
cv::_OutputArray* dst = static_cast<cv::_OutputArray*>(userdata);
|
||||
if (!dst)
|
||||
return CV_ERROR_FAIL;
|
||||
cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, cn), (void*)data, step).copyTo(*dst);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
bool retrieveFrame(int idx, cv::OutputArray img) CV_OVERRIDE
|
||||
{
|
||||
bool res = false;
|
||||
if (plugin_api_->v0.Capture_retreive)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Capture_retreive(capture_, idx, retrieve_callback, (cv::_OutputArray*)&img))
|
||||
res = true;
|
||||
return res;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return capture_ != NULL; // TODO always true
|
||||
}
|
||||
int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return plugin_api_->v0.captureAPI;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class PluginWriter : public cv::IVideoWriter
|
||||
{
|
||||
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_;
|
||||
CvPluginWriter writer_;
|
||||
|
||||
public:
|
||||
static
|
||||
Ptr<PluginWriter> create(const OpenCV_VideoIO_Plugin_API_preview* plugin_api,
|
||||
const std::string& filename, int fourcc, double fps, const cv::Size& sz,
|
||||
const VideoWriterParameters& params)
|
||||
{
|
||||
CV_Assert(plugin_api);
|
||||
CvPluginWriter writer = NULL;
|
||||
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Writer_open_with_params)
|
||||
{
|
||||
CV_Assert(plugin_api->v0.Writer_release);
|
||||
CV_Assert(!filename.empty());
|
||||
std::vector<int> vint_params = params.getIntVector();
|
||||
int* c_params = &vint_params[0];
|
||||
unsigned n_params = (unsigned)(vint_params.size() / 2);
|
||||
|
||||
if (CV_ERROR_OK == plugin_api->v1.Writer_open_with_params(filename.c_str(), fourcc, fps, sz.width, sz.height, c_params, n_params, &writer))
|
||||
{
|
||||
CV_Assert(writer);
|
||||
return makePtr<PluginWriter>(plugin_api, writer);
|
||||
}
|
||||
}
|
||||
else if (plugin_api->v0.Writer_open)
|
||||
{
|
||||
CV_Assert(plugin_api->v0.Writer_release);
|
||||
CV_Assert(!filename.empty());
|
||||
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
|
||||
const int depth = params.get(VIDEOWRITER_PROP_DEPTH, CV_8U);
|
||||
if (depth != CV_8U)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "Video I/O plugin doesn't support (due to lower API level) creation of VideoWriter with depth != CV_8U");
|
||||
return Ptr<PluginWriter>();
|
||||
}
|
||||
if (CV_ERROR_OK == plugin_api->v0.Writer_open(filename.c_str(), fourcc, fps, sz.width, sz.height, isColor, &writer))
|
||||
{
|
||||
CV_Assert(writer);
|
||||
return makePtr<PluginWriter>(plugin_api, writer);
|
||||
}
|
||||
}
|
||||
return Ptr<PluginWriter>();
|
||||
}
|
||||
|
||||
PluginWriter(const OpenCV_VideoIO_Plugin_API_preview* plugin_api, CvPluginWriter writer)
|
||||
: plugin_api_(plugin_api), writer_(writer)
|
||||
{
|
||||
CV_Assert(plugin_api_); CV_Assert(writer_);
|
||||
}
|
||||
|
||||
~PluginWriter()
|
||||
{
|
||||
CV_DbgAssert(plugin_api_->v0.Writer_release);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_release(writer_))
|
||||
CV_LOG_ERROR(NULL, "Video I/O: Can't release writer by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
writer_ = NULL;
|
||||
}
|
||||
double getProperty(int prop) const CV_OVERRIDE
|
||||
{
|
||||
double val = -1;
|
||||
if (plugin_api_->v0.Writer_getProperty)
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_getProperty(writer_, prop, &val))
|
||||
val = -1;
|
||||
return val;
|
||||
}
|
||||
bool setProperty(int prop, double val) CV_OVERRIDE
|
||||
{
|
||||
if (plugin_api_->v0.Writer_setProperty)
|
||||
if (CV_ERROR_OK == plugin_api_->v0.Writer_setProperty(writer_, prop, val))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return writer_ != NULL; // TODO always true
|
||||
}
|
||||
void write(cv::InputArray arr) CV_OVERRIDE
|
||||
{
|
||||
cv::Mat img = arr.getMat();
|
||||
CV_DbgAssert(writer_);
|
||||
CV_Assert(plugin_api_->v0.Writer_write);
|
||||
if (CV_ERROR_OK != plugin_api_->v0.Writer_write(writer_, img.data, (int)img.step[0], img.cols, img.rows, img.channels()))
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "Video I/O: Can't write frame by plugin '" << plugin_api_->api_header.api_description << "'");
|
||||
}
|
||||
// TODO return bool result?
|
||||
}
|
||||
int getCaptureDomain() const CV_OVERRIDE
|
||||
{
|
||||
return plugin_api_->v0.captureAPI;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}}} // namespace
|
||||
183
3rdparty/opencv-4.5.4/modules/videoio/src/backend_static.cpp
vendored
Normal file
183
3rdparty/opencv-4.5.4/modules/videoio/src/backend_static.cpp
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "backend.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
|
||||
void applyParametersFallback(const Ptr<IVideoCapture>& cap, const VideoCaptureParameters& params)
|
||||
{
|
||||
std::vector<int> props = params.getUnused();
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: Backend '" << videoio_registry::getBackendName((VideoCaptureAPIs)cap->getCaptureDomain()) <<
|
||||
"' implementation doesn't support parameters in .open(). Applying " <<
|
||||
props.size() << " properties through .setProperty()");
|
||||
for (int prop : props)
|
||||
{
|
||||
double value = params.get<double>(prop, -1);
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: apply parameter: [" << prop << "]=" <<
|
||||
cv::format("%g / %lld / 0x%016llx", value, (long long)value, (long long)value));
|
||||
if (!cap->setProperty(prop, value))
|
||||
{
|
||||
if (prop != CAP_PROP_HW_ACCELERATION && prop != CAP_PROP_HW_DEVICE) { // optional parameters
|
||||
CV_Error_(cv::Error::StsNotImplemented, ("VIDEOIO: Failed to apply invalid or unsupported parameter: [%d]=%g / %lld / 0x%08llx", prop, value, (long long)value, (long long)value));
|
||||
}
|
||||
}
|
||||
}
|
||||
// NB: there is no dedicated "commit" parameters event, implementations should commit after each property automatically
|
||||
}
|
||||
|
||||
// Legacy API. Modern API with parameters is below
|
||||
class StaticBackend: public IBackend
|
||||
{
|
||||
public:
|
||||
FN_createCaptureFile fn_createCaptureFile_;
|
||||
FN_createCaptureCamera fn_createCaptureCamera_;
|
||||
FN_createWriter fn_createWriter_;
|
||||
|
||||
StaticBackend(FN_createCaptureFile fn_createCaptureFile, FN_createCaptureCamera fn_createCaptureCamera, FN_createWriter fn_createWriter)
|
||||
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
|
||||
~StaticBackend() CV_OVERRIDE {}
|
||||
|
||||
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createCaptureCamera_)
|
||||
{
|
||||
Ptr<IVideoCapture> cap = fn_createCaptureCamera_(camera);
|
||||
if (cap && !params.empty())
|
||||
{
|
||||
applyParametersFallback(cap, params);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createCaptureFile_)
|
||||
{
|
||||
Ptr<IVideoCapture> cap = fn_createCaptureFile_(filename);
|
||||
if (cap && !params.empty())
|
||||
{
|
||||
applyParametersFallback(cap, params);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
|
||||
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createWriter_)
|
||||
return fn_createWriter_(filename, fourcc, fps, sz, params);
|
||||
return Ptr<IVideoWriter>();
|
||||
}
|
||||
}; // StaticBackend
|
||||
|
||||
class StaticBackendFactory : public IBackendFactory
|
||||
{
|
||||
protected:
|
||||
Ptr<StaticBackend> backend;
|
||||
|
||||
public:
|
||||
StaticBackendFactory(FN_createCaptureFile createCaptureFile, FN_createCaptureCamera createCaptureCamera, FN_createWriter createWriter)
|
||||
: backend(makePtr<StaticBackend>(createCaptureFile, createCaptureCamera, createWriter))
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
|
||||
~StaticBackendFactory() CV_OVERRIDE {}
|
||||
|
||||
Ptr<IBackend> getBackend() const CV_OVERRIDE
|
||||
{
|
||||
return backend.staticCast<IBackend>();
|
||||
}
|
||||
|
||||
bool isBuiltIn() const CV_OVERRIDE { return true; }
|
||||
};
|
||||
|
||||
|
||||
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
|
||||
FN_createCaptureCamera createCaptureCamera,
|
||||
FN_createWriter createWriter)
|
||||
{
|
||||
return makePtr<StaticBackendFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
|
||||
}
|
||||
|
||||
|
||||
|
||||
class StaticBackendWithParams: public IBackend
|
||||
{
|
||||
public:
|
||||
FN_createCaptureFileWithParams fn_createCaptureFile_;
|
||||
FN_createCaptureCameraWithParams fn_createCaptureCamera_;
|
||||
FN_createWriter fn_createWriter_;
|
||||
|
||||
StaticBackendWithParams(FN_createCaptureFileWithParams fn_createCaptureFile, FN_createCaptureCameraWithParams fn_createCaptureCamera, FN_createWriter fn_createWriter)
|
||||
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
|
||||
~StaticBackendWithParams() CV_OVERRIDE {}
|
||||
|
||||
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createCaptureCamera_)
|
||||
return fn_createCaptureCamera_(camera, params);
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createCaptureFile_)
|
||||
return fn_createCaptureFile_(filename, params);
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
|
||||
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
|
||||
{
|
||||
if (fn_createWriter_)
|
||||
return fn_createWriter_(filename, fourcc, fps, sz, params);
|
||||
return Ptr<IVideoWriter>();
|
||||
}
|
||||
}; // StaticBackendWithParams
|
||||
|
||||
class StaticBackendWithParamsFactory : public IBackendFactory
|
||||
{
|
||||
protected:
|
||||
Ptr<StaticBackendWithParams> backend;
|
||||
|
||||
public:
|
||||
StaticBackendWithParamsFactory(FN_createCaptureFileWithParams createCaptureFile, FN_createCaptureCameraWithParams createCaptureCamera, FN_createWriter createWriter)
|
||||
: backend(makePtr<StaticBackendWithParams>(createCaptureFile, createCaptureCamera, createWriter))
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
|
||||
~StaticBackendWithParamsFactory() CV_OVERRIDE {}
|
||||
|
||||
Ptr<IBackend> getBackend() const CV_OVERRIDE
|
||||
{
|
||||
return backend.staticCast<IBackend>();
|
||||
}
|
||||
|
||||
bool isBuiltIn() const CV_OVERRIDE { return true; }
|
||||
};
|
||||
|
||||
|
||||
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
|
||||
FN_createCaptureCameraWithParams createCaptureCamera,
|
||||
FN_createWriter createWriter)
|
||||
{
|
||||
return makePtr<StaticBackendWithParamsFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
|
||||
}
|
||||
|
||||
|
||||
} // namespace
|
||||
701
3rdparty/opencv-4.5.4/modules/videoio/src/cap.cpp
vendored
Normal file
701
3rdparty/opencv-4.5.4/modules/videoio/src/cap.cpp
vendored
Normal file
@@ -0,0 +1,701 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "opencv2/videoio/registry.hpp"
|
||||
#include "videoio_registry.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
static bool param_VIDEOIO_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_DEBUG", false);
|
||||
static bool param_VIDEOCAPTURE_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOCAPTURE_DEBUG", false);
|
||||
static bool param_VIDEOWRITER_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOWRITER_DEBUG", false);
|
||||
|
||||
#define CV_CAPTURE_LOG_DEBUG(tag, ...) \
|
||||
if (param_VIDEOIO_DEBUG || param_VIDEOCAPTURE_DEBUG) \
|
||||
{ \
|
||||
CV_LOG_WARNING(nullptr, __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define CV_WRITER_LOG_DEBUG(tag, ...) \
|
||||
if (param_VIDEOIO_DEBUG || param_VIDEOWRITER_DEBUG) \
|
||||
{ \
|
||||
CV_LOG_WARNING(nullptr, __VA_ARGS__) \
|
||||
}
|
||||
|
||||
void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const { cvReleaseCapture(&obj); }
|
||||
void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const { cvReleaseVideoWriter(&obj); }
|
||||
|
||||
|
||||
VideoCapture::VideoCapture() : throwOnFail(false)
|
||||
{}
|
||||
|
||||
VideoCapture::VideoCapture(const String& filename, int apiPreference) : throwOnFail(false)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
open(filename, apiPreference);
|
||||
}
|
||||
|
||||
VideoCapture::VideoCapture(const String& filename, int apiPreference, const std::vector<int>& params)
|
||||
: throwOnFail(false)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
open(filename, apiPreference, params);
|
||||
}
|
||||
|
||||
VideoCapture::VideoCapture(int index, int apiPreference) : throwOnFail(false)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
open(index, apiPreference);
|
||||
}
|
||||
|
||||
VideoCapture::VideoCapture(int index, int apiPreference, const std::vector<int>& params)
|
||||
: throwOnFail(false)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
open(index, apiPreference, params);
|
||||
}
|
||||
|
||||
VideoCapture::~VideoCapture()
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
icap.release();
|
||||
}
|
||||
|
||||
bool VideoCapture::open(const String& filename, int apiPreference)
|
||||
{
|
||||
return open(filename, apiPreference, std::vector<int>());
|
||||
}
|
||||
|
||||
bool VideoCapture::open(const String& filename, int apiPreference, const std::vector<int>& params)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
if (isOpened())
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
const VideoCaptureParameters parameters(params);
|
||||
const std::vector<VideoBackendInfo> backends = cv::videoio_registry::getAvailableBackends_CaptureByFilename();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (apiPreference == CAP_ANY || apiPreference == info.id)
|
||||
{
|
||||
if (!info.backendFactory)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO(" << info.name << "): factory is not available (plugins require filesystem support)");
|
||||
continue;
|
||||
}
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): trying capture filename='%s' ...",
|
||||
info.name, filename.c_str()));
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
const Ptr<IBackend> backend = info.backendFactory->getBackend();
|
||||
if (!backend.empty())
|
||||
{
|
||||
try
|
||||
{
|
||||
icap = backend->createCapture(filename, parameters);
|
||||
if (!icap.empty())
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): created, isOpened=%d",
|
||||
info.name, icap->isOpened()));
|
||||
if (icap->isOpened())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
icap.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): can't create capture",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): backend is not available "
|
||||
"(plugin is missing, or can't be loaded due "
|
||||
"dependencies or it is not compatible)",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (throwOnFail)
|
||||
{
|
||||
CV_Error_(Error::StsError, ("could not open '%s'", filename.c_str()));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VideoCapture::open(int cameraNum, int apiPreference)
|
||||
{
|
||||
return open(cameraNum, apiPreference, std::vector<int>());
|
||||
}
|
||||
|
||||
bool VideoCapture::open(int cameraNum, int apiPreference, const std::vector<int>& params)
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
|
||||
if (isOpened())
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
if (apiPreference == CAP_ANY)
|
||||
{
|
||||
// interpret preferred interface (0 = autodetect)
|
||||
int backendID = (cameraNum / 100) * 100;
|
||||
if (backendID)
|
||||
{
|
||||
cameraNum %= 100;
|
||||
apiPreference = backendID;
|
||||
}
|
||||
}
|
||||
|
||||
const VideoCaptureParameters parameters(params);
|
||||
const std::vector<VideoBackendInfo> backends = cv::videoio_registry::getAvailableBackends_CaptureByIndex();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (apiPreference == CAP_ANY || apiPreference == info.id)
|
||||
{
|
||||
if (!info.backendFactory)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO(" << info.name << "): factory is not available (plugins require filesystem support)");
|
||||
continue;
|
||||
}
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): trying capture cameraNum=%d ...",
|
||||
info.name, cameraNum));
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
const Ptr<IBackend> backend = info.backendFactory->getBackend();
|
||||
if (!backend.empty())
|
||||
{
|
||||
try
|
||||
{
|
||||
icap = backend->createCapture(cameraNum, parameters);
|
||||
if (!icap.empty())
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): created, isOpened=%d",
|
||||
info.name, icap->isOpened()));
|
||||
if (icap->isOpened())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
icap.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): can't create capture",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throwOnFail && apiPreference != CAP_ANY)
|
||||
{
|
||||
throw;
|
||||
}
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_CAPTURE_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): backend is not available "
|
||||
"(plugin is missing, or can't be loaded due "
|
||||
"dependencies or it is not compatible)",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (throwOnFail)
|
||||
{
|
||||
CV_Error_(Error::StsError, ("could not open camera %d", cameraNum));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VideoCapture::isOpened() const
|
||||
{
|
||||
return !icap.empty() ? icap->isOpened() : false;
|
||||
}
|
||||
|
||||
String VideoCapture::getBackendName() const
|
||||
{
|
||||
int api = 0;
|
||||
if (icap)
|
||||
{
|
||||
api = icap->isOpened() ? icap->getCaptureDomain() : 0;
|
||||
}
|
||||
CV_Assert(api != 0);
|
||||
return cv::videoio_registry::getBackendName(static_cast<VideoCaptureAPIs>(api));
|
||||
}
|
||||
|
||||
void VideoCapture::release()
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
icap.release();
|
||||
}
|
||||
|
||||
bool VideoCapture::grab()
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
bool ret = !icap.empty() ? icap->grabFrame() : false;
|
||||
if (!ret && throwOnFail)
|
||||
{
|
||||
CV_Error(Error::StsError, "");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool VideoCapture::retrieve(OutputArray image, int channel)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
bool ret = false;
|
||||
if (!icap.empty())
|
||||
{
|
||||
ret = icap->retrieveFrame(channel, image);
|
||||
}
|
||||
if (!ret && throwOnFail)
|
||||
{
|
||||
CV_Error_(Error::StsError, ("could not retrieve channel %d", channel));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool VideoCapture::read(OutputArray image)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
if (grab())
|
||||
{
|
||||
retrieve(image);
|
||||
} else {
|
||||
image.release();
|
||||
}
|
||||
return !image.empty();
|
||||
}
|
||||
|
||||
VideoCapture& VideoCapture::operator >> (Mat& image)
|
||||
{
|
||||
#ifdef WINRT_VIDEO
|
||||
// FIXIT grab/retrieve methods() should work too
|
||||
if (grab())
|
||||
{
|
||||
if (retrieve(image))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(VideoioBridge::getInstance().inputBufferMutex);
|
||||
VideoioBridge& bridge = VideoioBridge::getInstance();
|
||||
|
||||
// double buffering
|
||||
bridge.swapInputBuffers();
|
||||
auto p = bridge.frontInputPtr;
|
||||
|
||||
bridge.bIsFrameNew = false;
|
||||
|
||||
// needed here because setting Mat 'image' is not allowed by OutputArray in read()
|
||||
Mat m(bridge.getHeight(), bridge.getWidth(), CV_8UC3, p);
|
||||
image = m;
|
||||
}
|
||||
}
|
||||
#else
|
||||
read(image);
|
||||
#endif
|
||||
return *this;
|
||||
}
|
||||
|
||||
VideoCapture& VideoCapture::operator >> (UMat& image)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
read(image);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool VideoCapture::set(int propId, double value)
|
||||
{
|
||||
CV_CheckNE(propId, (int)CAP_PROP_BACKEND, "Can't set read-only property");
|
||||
bool ret = !icap.empty() ? icap->setProperty(propId, value) : false;
|
||||
if (!ret && throwOnFail)
|
||||
{
|
||||
CV_Error_(Error::StsError, ("could not set prop %d = %f", propId, value));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
double VideoCapture::get(int propId) const
|
||||
{
|
||||
if (propId == CAP_PROP_BACKEND)
|
||||
{
|
||||
int api = 0;
|
||||
if (icap && icap->isOpened())
|
||||
{
|
||||
api = icap->getCaptureDomain();
|
||||
}
|
||||
if (api <= 0)
|
||||
{
|
||||
return -1.0;
|
||||
}
|
||||
return static_cast<double>(api);
|
||||
}
|
||||
return !icap.empty() ? icap->getProperty(propId) : 0;
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::waitAny(const std::vector<VideoCapture>& streams,
|
||||
CV_OUT std::vector<int>& readyIndex, int64 timeoutNs)
|
||||
{
|
||||
CV_Assert(!streams.empty());
|
||||
|
||||
VideoCaptureAPIs backend = (VideoCaptureAPIs)streams[0].icap->getCaptureDomain();
|
||||
|
||||
for (size_t i = 1; i < streams.size(); ++i)
|
||||
{
|
||||
VideoCaptureAPIs backend_i = (VideoCaptureAPIs)streams[i].icap->getCaptureDomain();
|
||||
CV_CheckEQ((int)backend, (int)backend_i, "All captures must have the same backend");
|
||||
}
|
||||
|
||||
#if (defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO) // see cap_v4l.cpp guard
|
||||
if (backend == CAP_V4L2)
|
||||
{
|
||||
return VideoCapture_V4L_waitAny(streams, readyIndex, timeoutNs);
|
||||
}
|
||||
#else
|
||||
CV_UNUSED(readyIndex);
|
||||
CV_UNUSED(timeoutNs);
|
||||
#endif
|
||||
CV_Error(Error::StsNotImplemented, "VideoCapture::waitAny() is supported by V4L backend only");
|
||||
}
|
||||
|
||||
|
||||
//=================================================================================================
|
||||
|
||||
|
||||
|
||||
VideoWriter::VideoWriter()
|
||||
{}
|
||||
|
||||
VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize,
|
||||
bool isColor)
|
||||
{
|
||||
open(filename, _fourcc, fps, frameSize, isColor);
|
||||
}
|
||||
|
||||
|
||||
VideoWriter::VideoWriter(const String& filename, int apiPreference, int _fourcc, double fps,
|
||||
Size frameSize, bool isColor)
|
||||
{
|
||||
open(filename, apiPreference, _fourcc, fps, frameSize, isColor);
|
||||
}
|
||||
|
||||
VideoWriter::VideoWriter(const cv::String& filename, int fourcc, double fps,
|
||||
const cv::Size& frameSize, const std::vector<int>& params)
|
||||
{
|
||||
open(filename, fourcc, fps, frameSize, params);
|
||||
}
|
||||
|
||||
VideoWriter::VideoWriter(const cv::String& filename, int apiPreference, int fourcc, double fps,
|
||||
const cv::Size& frameSize, const std::vector<int>& params)
|
||||
{
|
||||
open(filename, apiPreference, fourcc, fps, frameSize, params);
|
||||
}
|
||||
|
||||
void VideoWriter::release()
|
||||
{
|
||||
iwriter.release();
|
||||
}
|
||||
|
||||
VideoWriter::~VideoWriter()
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize,
|
||||
bool isColor)
|
||||
{
|
||||
return open(filename, CAP_ANY, _fourcc, fps, frameSize,
|
||||
std::vector<int> { VIDEOWRITER_PROP_IS_COLOR, static_cast<int>(isColor) });
|
||||
}
|
||||
|
||||
bool VideoWriter::open(const String& filename, int apiPreference, int _fourcc, double fps,
|
||||
Size frameSize, bool isColor)
|
||||
{
|
||||
return open(filename, apiPreference, _fourcc, fps, frameSize,
|
||||
std::vector<int> { VIDEOWRITER_PROP_IS_COLOR, static_cast<int>(isColor) });
|
||||
}
|
||||
|
||||
|
||||
bool VideoWriter::open(const String& filename, int fourcc, double fps, const Size& frameSize,
|
||||
const std::vector<int>& params)
|
||||
{
|
||||
return open(filename, CAP_ANY, fourcc, fps, frameSize, params);
|
||||
}
|
||||
|
||||
bool VideoWriter::open(const String& filename, int apiPreference, int fourcc, double fps,
|
||||
const Size& frameSize, const std::vector<int>& params)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
if (isOpened())
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
const VideoWriterParameters parameters(params);
|
||||
for (const auto& info : videoio_registry::getAvailableBackends_Writer())
|
||||
{
|
||||
if (apiPreference == CAP_ANY || apiPreference == info.id)
|
||||
{
|
||||
CV_WRITER_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): trying writer with filename='%s' "
|
||||
"fourcc=0x%08x fps=%g sz=%dx%d isColor=%d...",
|
||||
info.name, filename.c_str(), (unsigned)fourcc, fps,
|
||||
frameSize.width, frameSize.height,
|
||||
parameters.get(VIDEOWRITER_PROP_IS_COLOR, true)));
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
const Ptr<IBackend> backend = info.backendFactory->getBackend();
|
||||
if (!backend.empty())
|
||||
{
|
||||
try
|
||||
{
|
||||
iwriter = backend->createWriter(filename, fourcc, fps, frameSize, parameters);
|
||||
if (!iwriter.empty())
|
||||
{
|
||||
|
||||
CV_WRITER_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): created, isOpened=%d",
|
||||
info.name, iwriter->isOpened()));
|
||||
if (param_VIDEOIO_DEBUG || param_VIDEOWRITER_DEBUG)
|
||||
{
|
||||
for (int key: parameters.getUnused())
|
||||
{
|
||||
CV_LOG_WARNING(NULL,
|
||||
cv::format("VIDEOIO(%s): parameter with key '%d' was unused",
|
||||
info.name, key));
|
||||
}
|
||||
}
|
||||
if (iwriter->isOpened())
|
||||
{
|
||||
return true;
|
||||
}
|
||||
iwriter.release();
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_WRITER_LOG_DEBUG(NULL, cv::format("VIDEOIO(%s): can't create writer",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
catch (const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
|
||||
info.name, e.what()));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_ERROR(NULL,
|
||||
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_WRITER_LOG_DEBUG(NULL,
|
||||
cv::format("VIDEOIO(%s): backend is not available "
|
||||
"(plugin is missing, or can't be loaded due "
|
||||
"dependencies or it is not compatible)",
|
||||
info.name));
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VideoWriter::isOpened() const
|
||||
{
|
||||
return !iwriter.empty();
|
||||
}
|
||||
|
||||
|
||||
bool VideoWriter::set(int propId, double value)
|
||||
{
|
||||
CV_CheckNE(propId, (int)CAP_PROP_BACKEND, "Can't set read-only property");
|
||||
|
||||
if (!iwriter.empty())
|
||||
{
|
||||
return iwriter->setProperty(propId, value);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
double VideoWriter::get(int propId) const
|
||||
{
|
||||
if (propId == CAP_PROP_BACKEND)
|
||||
{
|
||||
int api = 0;
|
||||
if (iwriter)
|
||||
{
|
||||
api = iwriter->getCaptureDomain();
|
||||
}
|
||||
return (api <= 0) ? -1. : static_cast<double>(api);
|
||||
}
|
||||
if (!iwriter.empty())
|
||||
{
|
||||
return iwriter->getProperty(propId);
|
||||
}
|
||||
return 0.;
|
||||
}
|
||||
|
||||
String VideoWriter::getBackendName() const
|
||||
{
|
||||
int api = 0;
|
||||
if (iwriter)
|
||||
{
|
||||
api = iwriter->getCaptureDomain();
|
||||
}
|
||||
CV_Assert(api != 0);
|
||||
return cv::videoio_registry::getBackendName(static_cast<VideoCaptureAPIs>(api));
|
||||
}
|
||||
|
||||
void VideoWriter::write(InputArray image)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
if (iwriter)
|
||||
{
|
||||
iwriter->write(image);
|
||||
}
|
||||
}
|
||||
|
||||
VideoWriter& VideoWriter::operator << (const Mat& image)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
|
||||
write(image);
|
||||
return *this;
|
||||
}
|
||||
|
||||
VideoWriter& VideoWriter::operator << (const UMat& image)
|
||||
{
|
||||
CV_INSTRUMENT_REGION();
|
||||
write(image);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// FIXIT OpenCV 4.0: make inline
|
||||
int VideoWriter::fourcc(char c1, char c2, char c3, char c4)
|
||||
{
|
||||
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
|
||||
}
|
||||
|
||||
} // namespace cv
|
||||
770
3rdparty/opencv-4.5.4/modules/videoio/src/cap_android_camera.cpp
vendored
Normal file
770
3rdparty/opencv-4.5.4/modules/videoio/src/cap_android_camera.cpp
vendored
Normal file
@@ -0,0 +1,770 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
// Contributed by Giles Payne
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <android/log.h>
|
||||
#include <camera/NdkCameraManager.h>
|
||||
#include <camera/NdkCameraError.h>
|
||||
#include <camera/NdkCameraDevice.h>
|
||||
#include <camera/NdkCameraMetadataTags.h>
|
||||
#include <media/NdkImageReader.h>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
#define TAG "NativeCamera"
|
||||
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
|
||||
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
|
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
|
||||
#define MAX_BUF_COUNT 4
|
||||
|
||||
#define COLOR_FormatUnknown -1
|
||||
#define COLOR_FormatYUV420Planar 19
|
||||
#define COLOR_FormatYUV420SemiPlanar 21
|
||||
|
||||
#define FOURCC_BGR CV_FOURCC_MACRO('B','G','R','3')
|
||||
#define FOURCC_RGB CV_FOURCC_MACRO('R','G','B','3')
|
||||
#define FOURCC_GRAY CV_FOURCC_MACRO('G','R','E','Y')
|
||||
#define FOURCC_NV21 CV_FOURCC_MACRO('N','V','2','1')
|
||||
#define FOURCC_YV12 CV_FOURCC_MACRO('Y','V','1','2')
|
||||
#define FOURCC_UNKNOWN 0xFFFFFFFF
|
||||
|
||||
template <typename T> class RangeValue {
|
||||
public:
|
||||
T min, max;
|
||||
/**
|
||||
* return absolute value from relative value
|
||||
* * value: in percent (50 for 50%)
|
||||
* */
|
||||
T value(int percent) {
|
||||
return static_cast<T>(min + (max - min) * percent / 100);
|
||||
}
|
||||
RangeValue() { min = max = static_cast<T>(0); }
|
||||
bool Supported(void) const { return (min != max); }
|
||||
};
|
||||
|
||||
static inline void deleter_ACameraManager(ACameraManager *cameraManager) {
|
||||
ACameraManager_delete(cameraManager);
|
||||
}
|
||||
|
||||
static inline void deleter_ACameraIdList(ACameraIdList *cameraIdList) {
|
||||
ACameraManager_deleteCameraIdList(cameraIdList);
|
||||
}
|
||||
|
||||
static inline void deleter_ACameraDevice(ACameraDevice *cameraDevice) {
|
||||
ACameraDevice_close(cameraDevice);
|
||||
}
|
||||
|
||||
static inline void deleter_ACameraMetadata(ACameraMetadata *cameraMetadata) {
|
||||
ACameraMetadata_free(cameraMetadata);
|
||||
}
|
||||
|
||||
static inline void deleter_AImageReader(AImageReader *imageReader) {
|
||||
AImageReader_delete(imageReader);
|
||||
}
|
||||
|
||||
static inline void deleter_ACaptureSessionOutputContainer(ACaptureSessionOutputContainer *outputContainer) {
|
||||
ACaptureSessionOutputContainer_free(outputContainer);
|
||||
}
|
||||
|
||||
static inline void deleter_ACameraCaptureSession(ACameraCaptureSession *captureSession) {
|
||||
ACameraCaptureSession_close(captureSession);
|
||||
}
|
||||
|
||||
static inline void deleter_AImage(AImage *image) {
|
||||
AImage_delete(image);
|
||||
}
|
||||
|
||||
static inline void deleter_ANativeWindow(ANativeWindow *nativeWindow) {
|
||||
ANativeWindow_release(nativeWindow);
|
||||
}
|
||||
|
||||
static inline void deleter_ACaptureSessionOutput(ACaptureSessionOutput *sessionOutput) {
|
||||
ACaptureSessionOutput_free(sessionOutput);
|
||||
}
|
||||
|
||||
static inline void deleter_ACameraOutputTarget(ACameraOutputTarget *outputTarget) {
|
||||
ACameraOutputTarget_free(outputTarget);
|
||||
}
|
||||
|
||||
static inline void deleter_ACaptureRequest(ACaptureRequest *captureRequest) {
|
||||
ACaptureRequest_free(captureRequest);
|
||||
}
|
||||
|
||||
/*
|
||||
* CameraDevice callbacks
|
||||
*/
|
||||
static void OnDeviceDisconnect(void* /* ctx */, ACameraDevice* dev) {
|
||||
std::string id(ACameraDevice_getId(dev));
|
||||
LOGW("Device %s disconnected", id.c_str());
|
||||
}
|
||||
|
||||
static void OnDeviceError(void* /* ctx */, ACameraDevice* dev, int err) {
|
||||
std::string id(ACameraDevice_getId(dev));
|
||||
LOGI("Camera Device Error: %#x, Device %s", err, id.c_str());
|
||||
|
||||
switch (err) {
|
||||
case ERROR_CAMERA_IN_USE:
|
||||
LOGI("Camera in use");
|
||||
break;
|
||||
case ERROR_CAMERA_SERVICE:
|
||||
LOGI("Fatal Error occured in Camera Service");
|
||||
break;
|
||||
case ERROR_CAMERA_DEVICE:
|
||||
LOGI("Fatal Error occured in Camera Device");
|
||||
break;
|
||||
case ERROR_CAMERA_DISABLED:
|
||||
LOGI("Camera disabled");
|
||||
break;
|
||||
case ERROR_MAX_CAMERAS_IN_USE:
|
||||
LOGI("System limit for maximum concurrent cameras used was exceeded");
|
||||
break;
|
||||
default:
|
||||
LOGI("Unknown Camera Device Error: %#x", err);
|
||||
}
|
||||
}
|
||||
|
||||
enum class CaptureSessionState {
|
||||
INITIALIZING, // session is ready
|
||||
READY, // session is ready
|
||||
ACTIVE, // session is busy
|
||||
CLOSED // session was closed
|
||||
};
|
||||
|
||||
void OnSessionClosed(void* context, ACameraCaptureSession* session);
|
||||
|
||||
void OnSessionReady(void* context, ACameraCaptureSession* session);
|
||||
|
||||
void OnSessionActive(void* context, ACameraCaptureSession* session);
|
||||
|
||||
void OnCaptureCompleted(void* context,
|
||||
ACameraCaptureSession* session,
|
||||
ACaptureRequest* request,
|
||||
const ACameraMetadata* result);
|
||||
|
||||
void OnCaptureFailed(void* context,
|
||||
ACameraCaptureSession* session,
|
||||
ACaptureRequest* request,
|
||||
ACameraCaptureFailure* failure);
|
||||
|
||||
#define CAPTURE_TIMEOUT_SECONDS 2
|
||||
|
||||
/**
|
||||
* Range of Camera Exposure Time:
|
||||
* Camera's capability range have a very long range which may be disturbing
|
||||
* on camera. For this sample purpose, clamp to a range showing visible
|
||||
* video on preview: 100000ns ~ 250000000ns
|
||||
*/
|
||||
static const long kMinExposureTime = 1000000L;
|
||||
static const long kMaxExposureTime = 250000000L;
|
||||
|
||||
class AndroidCameraCapture : public IVideoCapture
|
||||
{
|
||||
int cachedIndex;
|
||||
std::shared_ptr<ACameraManager> cameraManager;
|
||||
std::shared_ptr<ACameraDevice> cameraDevice;
|
||||
std::shared_ptr<AImageReader> imageReader;
|
||||
std::shared_ptr<ACaptureSessionOutputContainer> outputContainer;
|
||||
std::shared_ptr<ACaptureSessionOutput> sessionOutput;
|
||||
std::shared_ptr<ANativeWindow> nativeWindow;
|
||||
std::shared_ptr<ACameraOutputTarget> outputTarget;
|
||||
std::shared_ptr<ACaptureRequest> captureRequest;
|
||||
std::shared_ptr<ACameraCaptureSession> captureSession;
|
||||
CaptureSessionState sessionState = CaptureSessionState::INITIALIZING;
|
||||
int32_t frameWidth = 0;
|
||||
int32_t frameHeight = 0;
|
||||
int32_t colorFormat;
|
||||
std::vector<uint8_t> buffer;
|
||||
bool sessionOutputAdded = false;
|
||||
bool targetAdded = false;
|
||||
// properties
|
||||
uint32_t fourCC = FOURCC_UNKNOWN;
|
||||
bool settingWidth = false;
|
||||
bool settingHeight = false;
|
||||
int desiredWidth = 640;
|
||||
int desiredHeight = 480;
|
||||
bool autoExposure = true;
|
||||
int64_t exposureTime = 0L;
|
||||
RangeValue<int64_t> exposureRange;
|
||||
int32_t sensitivity = 0;
|
||||
RangeValue<int32_t> sensitivityRange;
|
||||
|
||||
public:
|
||||
// for synchronization with NDK capture callback
|
||||
bool waitingCapture = false;
|
||||
bool captureSuccess = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable condition;
|
||||
|
||||
public:
|
||||
AndroidCameraCapture() {}
|
||||
|
||||
~AndroidCameraCapture() { cleanUp(); }
|
||||
|
||||
ACameraDevice_stateCallbacks* GetDeviceListener() {
|
||||
static ACameraDevice_stateCallbacks cameraDeviceListener = {
|
||||
.onDisconnected = ::OnDeviceDisconnect,
|
||||
.onError = ::OnDeviceError,
|
||||
};
|
||||
return &cameraDeviceListener;
|
||||
}
|
||||
|
||||
ACameraCaptureSession_stateCallbacks sessionListener;
|
||||
|
||||
ACameraCaptureSession_stateCallbacks* GetSessionListener() {
|
||||
sessionListener = {
|
||||
.context = this,
|
||||
.onClosed = ::OnSessionClosed,
|
||||
.onReady = ::OnSessionReady,
|
||||
.onActive = ::OnSessionActive,
|
||||
};
|
||||
return &sessionListener;
|
||||
}
|
||||
|
||||
ACameraCaptureSession_captureCallbacks captureListener;
|
||||
|
||||
ACameraCaptureSession_captureCallbacks* GetCaptureCallback() {
|
||||
captureListener = {
|
||||
.context = this,
|
||||
.onCaptureStarted = nullptr,
|
||||
.onCaptureProgressed = nullptr,
|
||||
.onCaptureCompleted = ::OnCaptureCompleted,
|
||||
.onCaptureFailed = ::OnCaptureFailed,
|
||||
.onCaptureSequenceCompleted = nullptr,
|
||||
.onCaptureSequenceAborted = nullptr,
|
||||
.onCaptureBufferLost = nullptr,
|
||||
};
|
||||
return &captureListener;
|
||||
}
|
||||
|
||||
void setSessionState(CaptureSessionState newSessionState) {
|
||||
this->sessionState = newSessionState;
|
||||
}
|
||||
|
||||
bool isOpened() const CV_OVERRIDE { return imageReader.get() != nullptr && captureSession.get() != nullptr; }
|
||||
|
||||
int getCaptureDomain() CV_OVERRIDE { return CAP_ANDROID; }
|
||||
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
AImage* img;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
media_status_t mStatus = AImageReader_acquireLatestImage(imageReader.get(), &img);
|
||||
if (mStatus != AMEDIA_OK) {
|
||||
if (mStatus == AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE) {
|
||||
// this error is not fatal - we just need to wait for a buffer to become available
|
||||
LOGW("No Buffer Available error occured - waiting for callback");
|
||||
waitingCapture = true;
|
||||
captureSuccess = false;
|
||||
bool captured = condition.wait_for(lock, std::chrono::seconds(CAPTURE_TIMEOUT_SECONDS), [this]{ return captureSuccess; });
|
||||
waitingCapture = false;
|
||||
if (captured) {
|
||||
mStatus = AImageReader_acquireLatestImage(imageReader.get(), &img);
|
||||
if (mStatus != AMEDIA_OK) {
|
||||
LOGE("Acquire image failed with error code: %d", mStatus);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
LOGE("Capture failed or callback timed out");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
LOGE("Acquire image failed with error code: %d", mStatus);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::shared_ptr<AImage> image = std::shared_ptr<AImage>(img, deleter_AImage);
|
||||
int32_t srcFormat = -1;
|
||||
AImage_getFormat(image.get(), &srcFormat);
|
||||
if (srcFormat != AIMAGE_FORMAT_YUV_420_888) {
|
||||
LOGE("Incorrect image format");
|
||||
return false;
|
||||
}
|
||||
int32_t srcPlanes = 0;
|
||||
AImage_getNumberOfPlanes(image.get(), &srcPlanes);
|
||||
if (srcPlanes != 3) {
|
||||
LOGE("Incorrect number of planes in image data");
|
||||
return false;
|
||||
}
|
||||
int32_t yStride, uvStride;
|
||||
uint8_t *yPixel, *uPixel, *vPixel;
|
||||
int32_t yLen, uLen, vLen;
|
||||
int32_t uvPixelStride;
|
||||
AImage_getPlaneRowStride(image.get(), 0, &yStride);
|
||||
AImage_getPlaneRowStride(image.get(), 1, &uvStride);
|
||||
AImage_getPlaneData(image.get(), 0, &yPixel, &yLen);
|
||||
AImage_getPlaneData(image.get(), 1, &uPixel, &uLen);
|
||||
AImage_getPlaneData(image.get(), 2, &vPixel, &vLen);
|
||||
AImage_getPlanePixelStride(image.get(), 1, &uvPixelStride);
|
||||
|
||||
if ( (uvPixelStride == 2) && (vPixel == uPixel + 1) && (yLen == frameWidth * frameHeight) && (uLen == ((yLen / 2) - 1)) && (vLen == uLen) ) {
|
||||
colorFormat = COLOR_FormatYUV420SemiPlanar;
|
||||
if (fourCC == FOURCC_UNKNOWN) {
|
||||
fourCC = FOURCC_NV21;
|
||||
}
|
||||
} else if ( (uvPixelStride == 1) && (vPixel == uPixel + uLen) && (yLen == frameWidth * frameHeight) && (uLen == yLen / 4) && (vLen == uLen) ) {
|
||||
colorFormat = COLOR_FormatYUV420Planar;
|
||||
if (fourCC == FOURCC_UNKNOWN) {
|
||||
fourCC = FOURCC_YV12;
|
||||
}
|
||||
} else {
|
||||
colorFormat = COLOR_FormatUnknown;
|
||||
fourCC = FOURCC_UNKNOWN;
|
||||
LOGE("Unsupported format");
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer.clear();
|
||||
buffer.insert(buffer.end(), yPixel, yPixel + yLen);
|
||||
buffer.insert(buffer.end(), uPixel, uPixel + yLen / 2);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool retrieveFrame(int, OutputArray out) CV_OVERRIDE
|
||||
{
|
||||
if (buffer.empty()) {
|
||||
return false;
|
||||
}
|
||||
Mat yuv(frameHeight + frameHeight/2, frameWidth, CV_8UC1, buffer.data());
|
||||
if (colorFormat == COLOR_FormatYUV420Planar) {
|
||||
switch (fourCC) {
|
||||
case FOURCC_BGR:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_YV12);
|
||||
break;
|
||||
case FOURCC_RGB:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2RGB_YV12);
|
||||
break;
|
||||
case FOURCC_GRAY:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2GRAY_YV12);
|
||||
break;
|
||||
case FOURCC_YV12:
|
||||
yuv.copyTo(out);
|
||||
break;
|
||||
default:
|
||||
LOGE("Unexpected FOURCC value: %d", fourCC);
|
||||
break;
|
||||
}
|
||||
} else if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
|
||||
switch (fourCC) {
|
||||
case FOURCC_BGR:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_NV21);
|
||||
break;
|
||||
case FOURCC_RGB:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2RGB_NV21);
|
||||
break;
|
||||
case FOURCC_GRAY:
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2GRAY_NV21);
|
||||
break;
|
||||
case FOURCC_NV21:
|
||||
yuv.copyTo(out);
|
||||
break;
|
||||
default:
|
||||
LOGE("Unexpected FOURCC value: %d", fourCC);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
LOGE("Unsupported video format: %d", colorFormat);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
double getProperty(int property_id) const CV_OVERRIDE
|
||||
{
|
||||
switch (property_id) {
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
return isOpened() ? frameWidth : desiredWidth;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
return isOpened() ? frameHeight : desiredHeight;
|
||||
case CAP_PROP_AUTO_EXPOSURE:
|
||||
return autoExposure ? 1 : 0;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
return exposureTime;
|
||||
case CV_CAP_PROP_ISO_SPEED:
|
||||
return sensitivity;
|
||||
case CV_CAP_PROP_FOURCC:
|
||||
return fourCC;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
// unknown parameter or value not available
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool setProperty(int property_id, double value) CV_OVERRIDE
|
||||
{
|
||||
switch (property_id) {
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
desiredWidth = value;
|
||||
settingWidth = true;
|
||||
if (settingWidth && settingHeight) {
|
||||
setWidthHeight();
|
||||
settingWidth = false;
|
||||
settingHeight = false;
|
||||
}
|
||||
return true;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
desiredHeight = value;
|
||||
settingHeight = true;
|
||||
if (settingWidth && settingHeight) {
|
||||
setWidthHeight();
|
||||
settingWidth = false;
|
||||
settingHeight = false;
|
||||
}
|
||||
return true;
|
||||
case CV_CAP_PROP_FOURCC:
|
||||
{
|
||||
uint32_t newFourCC = cvRound(value);
|
||||
if (fourCC == newFourCC) {
|
||||
return true;
|
||||
} else {
|
||||
switch (newFourCC) {
|
||||
case FOURCC_BGR:
|
||||
case FOURCC_RGB:
|
||||
case FOURCC_GRAY:
|
||||
fourCC = newFourCC;
|
||||
return true;
|
||||
case FOURCC_YV12:
|
||||
if (colorFormat == COLOR_FormatYUV420Planar) {
|
||||
fourCC = newFourCC;
|
||||
return true;
|
||||
} else {
|
||||
LOGE("Unsupported FOURCC conversion COLOR_FormatYUV420SemiPlanar -> COLOR_FormatYUV420Planar");
|
||||
return false;
|
||||
}
|
||||
case FOURCC_NV21:
|
||||
if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
|
||||
fourCC = newFourCC;
|
||||
return true;
|
||||
} else {
|
||||
LOGE("Unsupported FOURCC conversion COLOR_FormatYUV420Planar -> COLOR_FormatYUV420SemiPlanar");
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
LOGE("Unsupported FOURCC value: %d\n", fourCC);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
case CAP_PROP_AUTO_EXPOSURE:
|
||||
autoExposure = (value != 0);
|
||||
if (isOpened()) {
|
||||
uint8_t aeMode = autoExposure ? ACAMERA_CONTROL_AE_MODE_ON : ACAMERA_CONTROL_AE_MODE_OFF;
|
||||
camera_status_t status = ACaptureRequest_setEntry_u8(captureRequest.get(), ACAMERA_CONTROL_AE_MODE, 1, &aeMode);
|
||||
return status == ACAMERA_OK;
|
||||
}
|
||||
return true;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
if (isOpened() && exposureRange.Supported()) {
|
||||
exposureTime = (int64_t)value;
|
||||
LOGI("Setting CV_CAP_PROP_EXPOSURE will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
|
||||
camera_status_t status = ACaptureRequest_setEntry_i64(captureRequest.get(), ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime);
|
||||
return status == ACAMERA_OK;
|
||||
}
|
||||
return false;
|
||||
case CV_CAP_PROP_ISO_SPEED:
|
||||
if (isOpened() && sensitivityRange.Supported()) {
|
||||
sensitivity = (int32_t)value;
|
||||
LOGI("Setting CV_CAP_PROP_ISO_SPEED will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
|
||||
camera_status_t status = ACaptureRequest_setEntry_i32(captureRequest.get(), ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity);
|
||||
return status == ACAMERA_OK;
|
||||
}
|
||||
return false;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void setWidthHeight() {
|
||||
cleanUp();
|
||||
initCapture(cachedIndex);
|
||||
}
|
||||
|
||||
// calculate a score based on how well the width and height match the desired width and height
|
||||
// basically draw the 2 rectangle on top of each other and take the ratio of the non-overlapping
|
||||
// area to the overlapping area
|
||||
double getScore(int32_t width, int32_t height) {
|
||||
double area1 = width * height;
|
||||
double area2 = desiredWidth * desiredHeight;
|
||||
if ((width < desiredWidth) == (height < desiredHeight)) {
|
||||
return (width < desiredWidth) ? (area2 - area1)/area1 : (area1 - area2)/area2;
|
||||
} else {
|
||||
int32_t overlappedWidth = std::min(width, desiredWidth);
|
||||
int32_t overlappedHeight = std::min(height, desiredHeight);
|
||||
double overlappedArea = overlappedWidth * overlappedHeight;
|
||||
return (area1 + area2 - overlappedArea)/overlappedArea;
|
||||
}
|
||||
}
|
||||
|
||||
bool initCapture(int index)
|
||||
{
|
||||
cachedIndex = index;
|
||||
cameraManager = std::shared_ptr<ACameraManager>(ACameraManager_create(), deleter_ACameraManager);
|
||||
if (!cameraManager) {
|
||||
return false;
|
||||
}
|
||||
ACameraIdList* cameraIds = nullptr;
|
||||
camera_status_t cStatus = ACameraManager_getCameraIdList(cameraManager.get(), &cameraIds);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("Get camera list failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
std::shared_ptr<ACameraIdList> cameraIdList = std::shared_ptr<ACameraIdList>(cameraIds, deleter_ACameraIdList);
|
||||
if (index < 0 || index >= cameraIds->numCameras) {
|
||||
LOGE("Camera index out of range %d (Number of cameras: %d)", index, cameraIds->numCameras);
|
||||
return false;
|
||||
}
|
||||
ACameraDevice* camera = nullptr;
|
||||
cStatus = ACameraManager_openCamera(cameraManager.get(), cameraIdList.get()->cameraIds[index], GetDeviceListener(), &camera);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("Open camera failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
cameraDevice = std::shared_ptr<ACameraDevice>(camera, deleter_ACameraDevice);
|
||||
ACameraMetadata* metadata;
|
||||
cStatus = ACameraManager_getCameraCharacteristics(cameraManager.get(), cameraIdList.get()->cameraIds[index], &metadata);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("Get camera characteristics failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
std::shared_ptr<ACameraMetadata> cameraMetadata = std::shared_ptr<ACameraMetadata>(metadata, deleter_ACameraMetadata);
|
||||
ACameraMetadata_const_entry entry;
|
||||
ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, &entry);
|
||||
|
||||
double bestScore = std::numeric_limits<double>::max();
|
||||
int32_t bestMatchWidth = 0;
|
||||
int32_t bestMatchHeight = 0;
|
||||
|
||||
for (uint32_t i = 0; i < entry.count; i += 4) {
|
||||
int32_t input = entry.data.i32[i + 3];
|
||||
int32_t format = entry.data.i32[i + 0];
|
||||
if (input) {
|
||||
continue;
|
||||
}
|
||||
if (format == AIMAGE_FORMAT_YUV_420_888) {
|
||||
int32_t width = entry.data.i32[i + 1];
|
||||
int32_t height = entry.data.i32[i + 2];
|
||||
if (width == desiredWidth && height == desiredHeight) {
|
||||
bestMatchWidth = width;
|
||||
bestMatchHeight = height;
|
||||
bestScore = 0;
|
||||
break;
|
||||
} else {
|
||||
double score = getScore(width, height);
|
||||
if (score < bestScore) {
|
||||
bestMatchWidth = width;
|
||||
bestMatchHeight = height;
|
||||
bestScore = score;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACameraMetadata_const_entry val = { 0, };
|
||||
camera_status_t status = ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE, &val);
|
||||
if (status == ACAMERA_OK) {
|
||||
exposureRange.min = val.data.i64[0];
|
||||
if (exposureRange.min < kMinExposureTime) {
|
||||
exposureRange.min = kMinExposureTime;
|
||||
}
|
||||
exposureRange.max = val.data.i64[1];
|
||||
if (exposureRange.max > kMaxExposureTime) {
|
||||
exposureRange.max = kMaxExposureTime;
|
||||
}
|
||||
exposureTime = exposureRange.value(2);
|
||||
} else {
|
||||
LOGW("Unsupported ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE");
|
||||
exposureRange.min = exposureRange.max = 0l;
|
||||
exposureTime = 0l;
|
||||
}
|
||||
status = ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE, &val);
|
||||
if (status == ACAMERA_OK){
|
||||
sensitivityRange.min = val.data.i32[0];
|
||||
sensitivityRange.max = val.data.i32[1];
|
||||
sensitivity = sensitivityRange.value(2);
|
||||
} else {
|
||||
LOGW("Unsupported ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE");
|
||||
sensitivityRange.min = sensitivityRange.max = 0;
|
||||
sensitivity = 0;
|
||||
}
|
||||
|
||||
AImageReader* reader;
|
||||
media_status_t mStatus = AImageReader_new(bestMatchWidth, bestMatchHeight, AIMAGE_FORMAT_YUV_420_888, MAX_BUF_COUNT, &reader);
|
||||
if (mStatus != AMEDIA_OK) {
|
||||
LOGE("ImageReader creation failed with error code: %d", mStatus);
|
||||
return false;
|
||||
}
|
||||
frameWidth = bestMatchWidth;
|
||||
frameHeight = bestMatchHeight;
|
||||
imageReader = std::shared_ptr<AImageReader>(reader, deleter_AImageReader);
|
||||
|
||||
ANativeWindow *window;
|
||||
mStatus = AImageReader_getWindow(imageReader.get(), &window);
|
||||
if (mStatus != AMEDIA_OK) {
|
||||
LOGE("Could not get ANativeWindow: %d", mStatus);
|
||||
return false;
|
||||
}
|
||||
nativeWindow = std::shared_ptr<ANativeWindow>(window, deleter_ANativeWindow);
|
||||
|
||||
ACaptureSessionOutputContainer* container;
|
||||
cStatus = ACaptureSessionOutputContainer_create(&container);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CaptureSessionOutputContainer creation failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
outputContainer = std::shared_ptr<ACaptureSessionOutputContainer>(container, deleter_ACaptureSessionOutputContainer);
|
||||
|
||||
ANativeWindow_acquire(nativeWindow.get());
|
||||
ACaptureSessionOutput* output;
|
||||
cStatus = ACaptureSessionOutput_create(nativeWindow.get(), &output);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CaptureSessionOutput creation failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
sessionOutput = std::shared_ptr<ACaptureSessionOutput>(output, deleter_ACaptureSessionOutput);
|
||||
ACaptureSessionOutputContainer_add(outputContainer.get(), sessionOutput.get());
|
||||
sessionOutputAdded = true;
|
||||
|
||||
ACameraOutputTarget* target;
|
||||
cStatus = ACameraOutputTarget_create(nativeWindow.get(), &target);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CameraOutputTarget creation failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
outputTarget = std::shared_ptr<ACameraOutputTarget>(target, deleter_ACameraOutputTarget);
|
||||
|
||||
ACaptureRequest * request;
|
||||
cStatus = ACameraDevice_createCaptureRequest(cameraDevice.get(), TEMPLATE_PREVIEW, &request);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CaptureRequest creation failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
captureRequest = std::shared_ptr<ACaptureRequest>(request, deleter_ACaptureRequest);
|
||||
|
||||
cStatus = ACaptureRequest_addTarget(captureRequest.get(), outputTarget.get());
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("Add target to CaptureRequest failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
targetAdded = true;
|
||||
|
||||
ACameraCaptureSession *session;
|
||||
cStatus = ACameraDevice_createCaptureSession(cameraDevice.get(), outputContainer.get(), GetSessionListener(), &session);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CaptureSession creation failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
captureSession = std::shared_ptr<ACameraCaptureSession>(session, deleter_ACameraCaptureSession);
|
||||
uint8_t aeMode = autoExposure ? ACAMERA_CONTROL_AE_MODE_ON : ACAMERA_CONTROL_AE_MODE_OFF;
|
||||
ACaptureRequest_setEntry_u8(captureRequest.get(), ACAMERA_CONTROL_AE_MODE, 1, &aeMode);
|
||||
ACaptureRequest_setEntry_i32(captureRequest.get(), ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity);
|
||||
if (!autoExposure) {
|
||||
ACaptureRequest_setEntry_i64(captureRequest.get(), ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime);
|
||||
}
|
||||
|
||||
cStatus = ACameraCaptureSession_setRepeatingRequest(captureSession.get(), GetCaptureCallback(), 1, &request, nullptr);
|
||||
if (cStatus != ACAMERA_OK) {
|
||||
LOGE("CameraCaptureSession set repeating request failed with error code: %d", cStatus);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void cleanUp() {
|
||||
captureListener.context = nullptr;
|
||||
sessionListener.context = nullptr;
|
||||
if (sessionState == CaptureSessionState::ACTIVE) {
|
||||
ACameraCaptureSession_stopRepeating(captureSession.get());
|
||||
}
|
||||
captureSession = nullptr;
|
||||
if (targetAdded) {
|
||||
ACaptureRequest_removeTarget(captureRequest.get(), outputTarget.get());
|
||||
targetAdded = false;
|
||||
}
|
||||
captureRequest = nullptr;
|
||||
outputTarget = nullptr;
|
||||
if (sessionOutputAdded) {
|
||||
ACaptureSessionOutputContainer_remove(outputContainer.get(), sessionOutput.get());
|
||||
sessionOutputAdded = false;
|
||||
}
|
||||
sessionOutput = nullptr;
|
||||
nativeWindow = nullptr;
|
||||
outputContainer = nullptr;
|
||||
cameraDevice = nullptr;
|
||||
cameraManager = nullptr;
|
||||
imageReader = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
/******************************** Session management *******************************/
|
||||
|
||||
void OnSessionClosed(void* context, ACameraCaptureSession* session) {
|
||||
if (context == nullptr) return;
|
||||
LOGW("session %p closed", session);
|
||||
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::CLOSED);
|
||||
}
|
||||
|
||||
void OnSessionReady(void* context, ACameraCaptureSession* session) {
|
||||
if (context == nullptr) return;
|
||||
LOGW("session %p ready", session);
|
||||
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::READY);
|
||||
}
|
||||
|
||||
void OnSessionActive(void* context, ACameraCaptureSession* session) {
|
||||
if (context == nullptr) return;
|
||||
LOGW("session %p active", session);
|
||||
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::ACTIVE);
|
||||
}
|
||||
|
||||
void OnCaptureCompleted(void* context,
|
||||
ACameraCaptureSession* session,
|
||||
ACaptureRequest* /* request */,
|
||||
const ACameraMetadata* /* result */) {
|
||||
if (context == nullptr) return;
|
||||
LOGV("session %p capture completed", session);
|
||||
AndroidCameraCapture* cameraCapture = reinterpret_cast<AndroidCameraCapture*>(context);
|
||||
std::unique_lock<std::mutex> lock(cameraCapture->mtx);
|
||||
|
||||
if (cameraCapture->waitingCapture) {
|
||||
cameraCapture->waitingCapture = false;
|
||||
cameraCapture->captureSuccess = true;
|
||||
cameraCapture->condition.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
void OnCaptureFailed(void* context,
|
||||
ACameraCaptureSession* session,
|
||||
ACaptureRequest* /* request */,
|
||||
ACameraCaptureFailure* /* failure */) {
|
||||
if (context == nullptr) return;
|
||||
LOGV("session %p capture failed", session);
|
||||
AndroidCameraCapture* cameraCapture = reinterpret_cast<AndroidCameraCapture*>(context);
|
||||
std::unique_lock<std::mutex> lock(cameraCapture->mtx);
|
||||
|
||||
if (cameraCapture->waitingCapture) {
|
||||
cameraCapture->waitingCapture = false;
|
||||
cameraCapture->captureSuccess = false;
|
||||
cameraCapture->condition.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
/****************** Implementation of interface functions ********************/
|
||||
|
||||
Ptr<IVideoCapture> cv::createAndroidCapture_cam( int index ) {
|
||||
Ptr<AndroidCameraCapture> res = makePtr<AndroidCameraCapture>();
|
||||
if (res && res->initCapture(index))
|
||||
return res;
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
248
3rdparty/opencv-4.5.4/modules/videoio/src/cap_android_mediandk.cpp
vendored
Normal file
248
3rdparty/opencv-4.5.4/modules/videoio/src/cap_android_mediandk.cpp
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <fcntl.h>
|
||||
#include <android/log.h>
|
||||
|
||||
#include "media/NdkMediaCodec.h"
|
||||
#include "media/NdkMediaExtractor.h"
|
||||
|
||||
#define INPUT_TIMEOUT_MS 2000
|
||||
|
||||
#define COLOR_FormatYUV420Planar 19
|
||||
#define COLOR_FormatYUV420SemiPlanar 21
|
||||
|
||||
using namespace cv;
|
||||
|
||||
#define TAG "NativeCodec"
|
||||
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
|
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
|
||||
|
||||
|
||||
static inline void deleter_AMediaExtractor(AMediaExtractor *extractor) {
|
||||
AMediaExtractor_delete(extractor);
|
||||
}
|
||||
|
||||
static inline void deleter_AMediaCodec(AMediaCodec *codec) {
|
||||
AMediaCodec_stop(codec);
|
||||
AMediaCodec_delete(codec);
|
||||
}
|
||||
|
||||
static inline void deleter_AMediaFormat(AMediaFormat *format) {
|
||||
AMediaFormat_delete(format);
|
||||
}
|
||||
|
||||
class AndroidMediaNdkCapture : public IVideoCapture
|
||||
{
|
||||
|
||||
public:
|
||||
AndroidMediaNdkCapture():
|
||||
sawInputEOS(false), sawOutputEOS(false),
|
||||
frameWidth(0), frameHeight(0), colorFormat(0) {}
|
||||
std::shared_ptr<AMediaExtractor> mediaExtractor;
|
||||
std::shared_ptr<AMediaCodec> mediaCodec;
|
||||
bool sawInputEOS;
|
||||
bool sawOutputEOS;
|
||||
int32_t frameWidth;
|
||||
int32_t frameHeight;
|
||||
int32_t colorFormat;
|
||||
std::vector<uint8_t> buffer;
|
||||
|
||||
~AndroidMediaNdkCapture() { cleanUp(); }
|
||||
|
||||
bool decodeFrame() {
|
||||
while (!sawInputEOS || !sawOutputEOS) {
|
||||
if (!sawInputEOS) {
|
||||
auto bufferIndex = AMediaCodec_dequeueInputBuffer(mediaCodec.get(), INPUT_TIMEOUT_MS);
|
||||
LOGV("input buffer %zd", bufferIndex);
|
||||
if (bufferIndex >= 0) {
|
||||
size_t bufferSize;
|
||||
auto inputBuffer = AMediaCodec_getInputBuffer(mediaCodec.get(), bufferIndex, &bufferSize);
|
||||
auto sampleSize = AMediaExtractor_readSampleData(mediaExtractor.get(), inputBuffer, bufferSize);
|
||||
if (sampleSize < 0) {
|
||||
sampleSize = 0;
|
||||
sawInputEOS = true;
|
||||
LOGV("EOS");
|
||||
}
|
||||
auto presentationTimeUs = AMediaExtractor_getSampleTime(mediaExtractor.get());
|
||||
|
||||
AMediaCodec_queueInputBuffer(mediaCodec.get(), bufferIndex, 0, sampleSize,
|
||||
presentationTimeUs, sawInputEOS ? AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM : 0);
|
||||
AMediaExtractor_advance(mediaExtractor.get());
|
||||
}
|
||||
}
|
||||
|
||||
if (!sawOutputEOS) {
|
||||
AMediaCodecBufferInfo info;
|
||||
auto bufferIndex = AMediaCodec_dequeueOutputBuffer(mediaCodec.get(), &info, 0);
|
||||
if (bufferIndex >= 0) {
|
||||
size_t bufferSize = 0;
|
||||
auto mediaFormat = std::shared_ptr<AMediaFormat>(AMediaCodec_getOutputFormat(mediaCodec.get()), deleter_AMediaFormat);
|
||||
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_WIDTH, &frameWidth);
|
||||
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_HEIGHT, &frameHeight);
|
||||
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_COLOR_FORMAT, &colorFormat);
|
||||
uint8_t* codecBuffer = AMediaCodec_getOutputBuffer(mediaCodec.get(), bufferIndex, &bufferSize);
|
||||
buffer = std::vector<uint8_t>(codecBuffer + info.offset, codecBuffer + bufferSize);
|
||||
LOGV("colorFormat: %d", colorFormat);
|
||||
LOGV("buffer size: %zu", bufferSize);
|
||||
LOGV("width (frame): %d", frameWidth);
|
||||
LOGV("height (frame): %d", frameHeight);
|
||||
if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
|
||||
LOGV("output EOS");
|
||||
sawOutputEOS = true;
|
||||
}
|
||||
AMediaCodec_releaseOutputBuffer(mediaCodec.get(), bufferIndex, info.size != 0);
|
||||
return true;
|
||||
} else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED) {
|
||||
LOGV("output buffers changed");
|
||||
} else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
|
||||
auto format = AMediaCodec_getOutputFormat(mediaCodec.get());
|
||||
LOGV("format changed to: %s", AMediaFormat_toString(format));
|
||||
AMediaFormat_delete(format);
|
||||
} else if (bufferIndex == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
|
||||
LOGV("no output buffer right now");
|
||||
} else {
|
||||
LOGV("unexpected info code: %zd", bufferIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isOpened() const CV_OVERRIDE { return mediaCodec.get() != nullptr; }
|
||||
|
||||
int getCaptureDomain() CV_OVERRIDE { return CAP_ANDROID; }
|
||||
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
// clear the previous frame
|
||||
buffer.clear();
|
||||
return decodeFrame();
|
||||
}
|
||||
|
||||
bool retrieveFrame(int, OutputArray out) CV_OVERRIDE
|
||||
{
|
||||
if (buffer.empty()) {
|
||||
return false;
|
||||
}
|
||||
Mat yuv(frameHeight + frameHeight/2, frameWidth, CV_8UC1, buffer.data());
|
||||
if (colorFormat == COLOR_FormatYUV420Planar) {
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_YV12);
|
||||
} else if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
|
||||
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_NV21);
|
||||
} else {
|
||||
LOGE("Unsupported video format: %d", colorFormat);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
double getProperty(int property_id) const CV_OVERRIDE
|
||||
{
|
||||
switch (property_id)
|
||||
{
|
||||
case CV_CAP_PROP_FRAME_WIDTH: return frameWidth;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT: return frameHeight;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool setProperty(int /* property_id */, double /* value */) CV_OVERRIDE
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool initCapture(const char * filename)
|
||||
{
|
||||
struct stat statBuffer;
|
||||
if (stat(filename, &statBuffer) != 0) {
|
||||
LOGE("failed to stat file: %s (%s)", filename, strerror(errno));
|
||||
return false;
|
||||
}
|
||||
|
||||
int fd = open(filename, O_RDONLY);
|
||||
|
||||
if (fd < 0) {
|
||||
LOGE("failed to open file: %s %d (%s)", filename, fd, strerror(errno));
|
||||
return false;
|
||||
}
|
||||
|
||||
mediaExtractor = std::shared_ptr<AMediaExtractor>(AMediaExtractor_new(), deleter_AMediaExtractor);
|
||||
if (!mediaExtractor) {
|
||||
return false;
|
||||
}
|
||||
media_status_t err = AMediaExtractor_setDataSourceFd(mediaExtractor.get(), fd, 0, statBuffer.st_size);
|
||||
close(fd);
|
||||
if (err != AMEDIA_OK) {
|
||||
LOGV("setDataSource error: %d", err);
|
||||
return false;
|
||||
}
|
||||
|
||||
int numtracks = AMediaExtractor_getTrackCount(mediaExtractor.get());
|
||||
|
||||
LOGV("input has %d tracks", numtracks);
|
||||
for (int i = 0; i < numtracks; i++) {
|
||||
auto format = std::shared_ptr<AMediaFormat>(AMediaExtractor_getTrackFormat(mediaExtractor.get(), i), deleter_AMediaFormat);
|
||||
if (!format) {
|
||||
continue;
|
||||
}
|
||||
const char *s = AMediaFormat_toString(format.get());
|
||||
LOGV("track %d format: %s", i, s);
|
||||
const char *mime;
|
||||
if (!AMediaFormat_getString(format.get(), AMEDIAFORMAT_KEY_MIME, &mime)) {
|
||||
LOGV("no mime type");
|
||||
} else if (!strncmp(mime, "video/", 6)) {
|
||||
int32_t trackWidth, trackHeight;
|
||||
AMediaFormat_getInt32(format.get(), AMEDIAFORMAT_KEY_WIDTH, &trackWidth);
|
||||
AMediaFormat_getInt32(format.get(), AMEDIAFORMAT_KEY_HEIGHT, &trackHeight);
|
||||
LOGV("width (track): %d", trackWidth);
|
||||
LOGV("height (track): %d", trackHeight);
|
||||
if (AMediaExtractor_selectTrack(mediaExtractor.get(), i) != AMEDIA_OK) {
|
||||
continue;
|
||||
}
|
||||
mediaCodec = std::shared_ptr<AMediaCodec>(AMediaCodec_createDecoderByType(mime), deleter_AMediaCodec);
|
||||
if (!mediaCodec) {
|
||||
continue;
|
||||
}
|
||||
if (AMediaCodec_configure(mediaCodec.get(), format.get(), NULL, NULL, 0) != AMEDIA_OK) {
|
||||
continue;
|
||||
}
|
||||
sawInputEOS = false;
|
||||
sawOutputEOS = false;
|
||||
if (AMediaCodec_start(mediaCodec.get()) != AMEDIA_OK) {
|
||||
continue;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void cleanUp() {
|
||||
sawInputEOS = true;
|
||||
sawOutputEOS = true;
|
||||
frameWidth = 0;
|
||||
frameHeight = 0;
|
||||
colorFormat = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/****************** Implementation of interface functions ********************/
|
||||
|
||||
Ptr<IVideoCapture> cv::createAndroidCapture_file(const std::string &filename) {
|
||||
Ptr<AndroidMediaNdkCapture> res = makePtr<AndroidMediaNdkCapture>();
|
||||
if (res && res->initCapture(filename.c_str()))
|
||||
return res;
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
639
3rdparty/opencv-4.5.4/modules/videoio/src/cap_aravis.cpp
vendored
Normal file
639
3rdparty/opencv-4.5.4/modules/videoio/src/cap_aravis.cpp
vendored
Normal file
@@ -0,0 +1,639 @@
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//
|
||||
|
||||
//
|
||||
// The code has been contributed by Arkadiusz Raj on 2016 Oct
|
||||
//
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
#ifdef HAVE_ARAVIS_API
|
||||
|
||||
#include <arv.h>
|
||||
|
||||
//
|
||||
// This file provides wrapper for using Aravis SDK library to access GigE Vision cameras.
|
||||
// Aravis library (version 0.4 or 0.6) shall be installed else this code will not be included in build.
|
||||
//
|
||||
// To include this module invoke cmake with -DWITH_ARAVIS=ON
|
||||
//
|
||||
// Please obvserve, that jumbo frames are required when high fps & 16bit data is selected.
|
||||
// (camera, switches/routers and the computer this software is running on)
|
||||
//
|
||||
// Basic usage: VideoCapture cap(<camera id>, CAP_ARAVIS);
|
||||
//
|
||||
// Supported properties:
|
||||
// read/write
|
||||
// CAP_PROP_AUTO_EXPOSURE(0|1)
|
||||
// CAP_PROP_EXPOSURE(t), t in seconds
|
||||
// CAP_PROP_BRIGHTNESS (ev), exposure compensation in EV for auto exposure algorithm
|
||||
// CAP_PROP_GAIN(g), g >=0 or -1 for automatic control if CAP_PROP_AUTO_EXPOSURE is true
|
||||
// CAP_PROP_FPS(f)
|
||||
// CAP_PROP_FOURCC(type)
|
||||
// CAP_PROP_BUFFERSIZE(n)
|
||||
// read only:
|
||||
// CAP_PROP_POS_MSEC
|
||||
// CAP_PROP_FRAME_WIDTH
|
||||
// CAP_PROP_FRAME_HEIGHT
|
||||
//
|
||||
// Supported types of data:
|
||||
// video/x-raw, fourcc:'GREY' -> 8bit, 1 channel
|
||||
// video/x-raw, fourcc:'Y800' -> 8bit, 1 channel
|
||||
// video/x-raw, fourcc:'Y12 ' -> 12bit, 1 channel
|
||||
// video/x-raw, fourcc:'Y16 ' -> 16bit, 1 channel
|
||||
// video/x-raw, fourcc:'GRBG' -> 8bit, 1 channel
|
||||
//
|
||||
|
||||
#define MODE_GREY CV_FOURCC_MACRO('G','R','E','Y')
|
||||
#define MODE_Y800 CV_FOURCC_MACRO('Y','8','0','0')
|
||||
#define MODE_Y12 CV_FOURCC_MACRO('Y','1','2',' ')
|
||||
#define MODE_Y16 CV_FOURCC_MACRO('Y','1','6',' ')
|
||||
#define MODE_GRBG CV_FOURCC_MACRO('G','R','B','G')
|
||||
|
||||
#define CLIP(a,b,c) (cv::max(cv::min((a),(c)),(b)))
|
||||
|
||||
/********************* Capturing video from camera via Aravis *********************/
|
||||
|
||||
class CvCaptureCAM_Aravis : public CvCapture
|
||||
{
|
||||
public:
|
||||
CvCaptureCAM_Aravis();
|
||||
virtual ~CvCaptureCAM_Aravis()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
virtual bool open(int);
|
||||
virtual void close();
|
||||
virtual double getProperty(int) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE;
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return cv::CAP_ARAVIS;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool create(int);
|
||||
bool init_buffers();
|
||||
|
||||
void stopCapture();
|
||||
bool startCapture();
|
||||
|
||||
bool getDeviceNameById(int id, std::string &device);
|
||||
|
||||
void autoExposureControl(IplImage*);
|
||||
|
||||
ArvCamera *camera; // Camera to control.
|
||||
ArvStream *stream; // Object for video stream reception.
|
||||
void *framebuffer; //
|
||||
|
||||
unsigned int payload; // Width x height x Pixel width.
|
||||
|
||||
int widthMin; // Camera sensor minimum width.
|
||||
int widthMax; // Camera sensor maximum width.
|
||||
int heightMin; // Camera sensor minimum height.
|
||||
int heightMax; // Camera sensor maximum height.
|
||||
bool fpsAvailable;
|
||||
double fpsMin; // Camera minimum fps.
|
||||
double fpsMax; // Camera maximum fps.
|
||||
bool gainAvailable;
|
||||
double gainMin; // Camera minimum gain.
|
||||
double gainMax; // Camera maximum gain.
|
||||
bool exposureAvailable;
|
||||
double exposureMin; // Camera's minimum exposure time.
|
||||
double exposureMax; // Camera's maximum exposure time.
|
||||
|
||||
bool controlExposure; // Flag if automatic exposure shall be done by this SW
|
||||
double exposureCompensation;
|
||||
bool autoGain;
|
||||
double targetGrey; // Target grey value (mid grey))
|
||||
bool softwareTriggered; // Flag if the camera is software triggered
|
||||
bool allowAutoTrigger; // Flag that user allowed to trigger software triggered cameras automatically
|
||||
|
||||
gint64 *pixelFormats;
|
||||
guint pixelFormatsCnt;
|
||||
|
||||
|
||||
int num_buffers; // number of payload transmission buffers
|
||||
|
||||
ArvPixelFormat pixelFormat; // pixel format
|
||||
|
||||
int xoffset; // current frame region x offset
|
||||
int yoffset; // current frame region y offset
|
||||
int width; // current frame width of frame
|
||||
int height; // current frame height of image
|
||||
|
||||
double fps; // current value of fps
|
||||
double exposure; // current value of exposure time
|
||||
double gain; // current value of gain
|
||||
double midGrey; // current value of mid grey (brightness)
|
||||
|
||||
unsigned frameID; // current frame id
|
||||
unsigned prevFrameID;
|
||||
|
||||
IplImage *frame; // local frame copy
|
||||
};
|
||||
|
||||
|
||||
CvCaptureCAM_Aravis::CvCaptureCAM_Aravis()
|
||||
{
|
||||
camera = NULL;
|
||||
stream = NULL;
|
||||
framebuffer = NULL;
|
||||
|
||||
payload = 0;
|
||||
|
||||
widthMin = widthMax = heightMin = heightMax = 0;
|
||||
xoffset = yoffset = width = height = 0;
|
||||
fpsMin = fpsMax = gainMin = gainMax = exposureMin = exposureMax = 0;
|
||||
controlExposure = false;
|
||||
exposureCompensation = 0;
|
||||
targetGrey = 0;
|
||||
frameID = prevFrameID = 0;
|
||||
allowAutoTrigger = false;
|
||||
|
||||
num_buffers = 10;
|
||||
frame = NULL;
|
||||
}
|
||||
|
||||
void CvCaptureCAM_Aravis::close()
|
||||
{
|
||||
if(camera) {
|
||||
stopCapture();
|
||||
|
||||
g_object_unref(camera);
|
||||
camera = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::getDeviceNameById(int id, std::string &device)
|
||||
{
|
||||
arv_update_device_list();
|
||||
|
||||
if((id >= 0) && (id < (int)arv_get_n_devices())) {
|
||||
device = arv_get_device_id(id);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::create( int index )
|
||||
{
|
||||
std::string deviceName;
|
||||
if(!getDeviceNameById(index, deviceName))
|
||||
return false;
|
||||
|
||||
return NULL != (camera = arv_camera_new(deviceName.c_str()));
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::init_buffers()
|
||||
{
|
||||
if(stream) {
|
||||
g_object_unref(stream);
|
||||
stream = NULL;
|
||||
}
|
||||
if( (stream = arv_camera_create_stream(camera, NULL, NULL)) ) {
|
||||
if( arv_camera_is_gv_device(camera) ) {
|
||||
g_object_set(stream,
|
||||
"socket-buffer", ARV_GV_STREAM_SOCKET_BUFFER_AUTO,
|
||||
"socket-buffer-size", 0, NULL);
|
||||
g_object_set(stream,
|
||||
"packet-resend", ARV_GV_STREAM_PACKET_RESEND_NEVER, NULL);
|
||||
g_object_set(stream,
|
||||
"packet-timeout", (unsigned) 40000,
|
||||
"frame-retention", (unsigned) 200000, NULL);
|
||||
}
|
||||
payload = arv_camera_get_payload (camera);
|
||||
|
||||
for (int i = 0; i < num_buffers; i++)
|
||||
arv_stream_push_buffer(stream, arv_buffer_new(payload, NULL));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::open( int index )
|
||||
{
|
||||
if(create(index)) {
|
||||
// fetch properties bounds
|
||||
pixelFormats = arv_camera_get_available_pixel_formats(camera, &pixelFormatsCnt);
|
||||
|
||||
arv_camera_get_width_bounds(camera, &widthMin, &widthMax);
|
||||
arv_camera_get_height_bounds(camera, &heightMin, &heightMax);
|
||||
arv_camera_set_region(camera, 0, 0, widthMax, heightMax);
|
||||
|
||||
if( (fpsAvailable = arv_camera_is_frame_rate_available(camera)) )
|
||||
arv_camera_get_frame_rate_bounds(camera, &fpsMin, &fpsMax);
|
||||
if( (gainAvailable = arv_camera_is_gain_available(camera)) )
|
||||
arv_camera_get_gain_bounds (camera, &gainMin, &gainMax);
|
||||
if( (exposureAvailable = arv_camera_is_exposure_time_available(camera)) )
|
||||
arv_camera_get_exposure_time_bounds (camera, &exposureMin, &exposureMax);
|
||||
|
||||
// get initial values
|
||||
pixelFormat = arv_camera_get_pixel_format(camera);
|
||||
exposure = exposureAvailable ? arv_camera_get_exposure_time(camera) : 0;
|
||||
gain = gainAvailable ? arv_camera_get_gain(camera) : 0;
|
||||
fps = arv_camera_get_frame_rate(camera);
|
||||
softwareTriggered = (strcmp(arv_camera_get_trigger_source(camera), "Software") == 0);
|
||||
|
||||
return startCapture();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::grabFrame()
|
||||
{
|
||||
// remove content of previous frame
|
||||
framebuffer = NULL;
|
||||
|
||||
if(stream) {
|
||||
ArvBuffer *arv_buffer = NULL;
|
||||
int max_tries = 10;
|
||||
int tries = 0;
|
||||
if (softwareTriggered && allowAutoTrigger) {
|
||||
arv_camera_software_trigger (camera);
|
||||
}
|
||||
for(; tries < max_tries; tries ++) {
|
||||
arv_buffer = arv_stream_timeout_pop_buffer (stream, 200000);
|
||||
if (arv_buffer != NULL && arv_buffer_get_status (arv_buffer) != ARV_BUFFER_STATUS_SUCCESS) {
|
||||
arv_stream_push_buffer (stream, arv_buffer);
|
||||
} else break;
|
||||
}
|
||||
if(arv_buffer != NULL && tries < max_tries) {
|
||||
size_t buffer_size;
|
||||
framebuffer = (void*)arv_buffer_get_data (arv_buffer, &buffer_size);
|
||||
|
||||
// retrieve image size properties
|
||||
arv_buffer_get_image_region (arv_buffer, &xoffset, &yoffset, &width, &height);
|
||||
|
||||
// retrieve image ID set by camera
|
||||
frameID = arv_buffer_get_frame_id(arv_buffer);
|
||||
|
||||
arv_stream_push_buffer(stream, arv_buffer);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
|
||||
{
|
||||
if(framebuffer) {
|
||||
int depth = 0, channels = 0;
|
||||
switch(pixelFormat) {
|
||||
case ARV_PIXEL_FORMAT_MONO_8:
|
||||
case ARV_PIXEL_FORMAT_BAYER_GR_8:
|
||||
depth = IPL_DEPTH_8U;
|
||||
channels = 1;
|
||||
break;
|
||||
case ARV_PIXEL_FORMAT_MONO_12:
|
||||
case ARV_PIXEL_FORMAT_MONO_16:
|
||||
depth = IPL_DEPTH_16U;
|
||||
channels = 1;
|
||||
break;
|
||||
}
|
||||
if(depth && channels) {
|
||||
IplImage src;
|
||||
cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );
|
||||
|
||||
cvSetData( &src, framebuffer, src.widthStep );
|
||||
if( !frame ||
|
||||
frame->width != src.width ||
|
||||
frame->height != src.height ||
|
||||
frame->depth != src.depth ||
|
||||
frame->nChannels != src.nChannels) {
|
||||
|
||||
cvReleaseImage( &frame );
|
||||
frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
|
||||
}
|
||||
cvCopy(&src, frame);
|
||||
|
||||
if(controlExposure && ((frameID - prevFrameID) >= 3)) {
|
||||
// control exposure every third frame
|
||||
// i.e. skip frame taken with previous exposure setup
|
||||
autoExposureControl(frame);
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
|
||||
{
|
||||
// Software control of exposure parameters utilizing
|
||||
// automatic change of exposure time & gain
|
||||
|
||||
// Priority is set as follows:
|
||||
// - to increase brightness, first increase time then gain
|
||||
// - to decrease brightness, first decrease gain then time
|
||||
|
||||
cv::Mat m = cv::cvarrToMat(image);
|
||||
|
||||
// calc mean value for luminance or green channel
|
||||
double brightness = cv::mean(m)[image->nChannels > 1 ? 1 : 0];
|
||||
if(brightness < 1) brightness = 1;
|
||||
|
||||
// mid point - 100 % means no change
|
||||
static const double dmid = 100;
|
||||
|
||||
// distance from optimal value as a percentage
|
||||
double d = (targetGrey * dmid) / brightness;
|
||||
if(d >= dmid) d = ( d + (dmid * 2) ) / 3;
|
||||
|
||||
prevFrameID = frameID;
|
||||
midGrey = brightness;
|
||||
|
||||
double maxe = 1e6 / fps;
|
||||
double ne = CLIP( ( exposure * d ) / ( dmid * pow(sqrt(2), -2 * exposureCompensation) ), exposureMin, maxe);
|
||||
|
||||
// if change of value requires intervention
|
||||
if(std::fabs(d-dmid) > 5) {
|
||||
double ev, ng = 0;
|
||||
|
||||
if(gainAvailable && autoGain) {
|
||||
ev = log( d / dmid ) / log(2);
|
||||
ng = CLIP( gain + ev + exposureCompensation, gainMin, gainMax);
|
||||
|
||||
if( ng < gain ) {
|
||||
// priority 1 - reduce gain
|
||||
arv_camera_set_gain(camera, (gain = ng));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if(exposureAvailable) {
|
||||
// priority 2 - control of exposure time
|
||||
if(std::fabs(exposure - ne) > 2) {
|
||||
// we have not yet reach the max-e level
|
||||
arv_camera_set_exposure_time(camera, (exposure = ne) );
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if(gainAvailable && autoGain) {
|
||||
if(exposureAvailable) {
|
||||
// exposure at maximum - increase gain if possible
|
||||
if(ng > gain && ng < gainMax && ne >= maxe) {
|
||||
arv_camera_set_gain(camera, (gain = ng));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// priority 3 - increase gain
|
||||
arv_camera_set_gain(camera, (gain = ng));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if gain can be reduced - do it
|
||||
if(gainAvailable && autoGain && exposureAvailable) {
|
||||
if(gain > gainMin && exposure < maxe) {
|
||||
exposure = CLIP( ne * 1.05, exposureMin, maxe);
|
||||
arv_camera_set_exposure_time(camera, exposure );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double CvCaptureCAM_Aravis::getProperty( int property_id ) const
|
||||
{
|
||||
switch(property_id) {
|
||||
case CV_CAP_PROP_POS_MSEC:
|
||||
return (double)frameID/fps;
|
||||
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
return width;
|
||||
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
return height;
|
||||
|
||||
case CV_CAP_PROP_AUTO_EXPOSURE:
|
||||
return (controlExposure ? 1 : 0);
|
||||
|
||||
case CV_CAP_PROP_BRIGHTNESS:
|
||||
return exposureCompensation;
|
||||
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
if(exposureAvailable) {
|
||||
/* exposure time in seconds, like 1/100 s */
|
||||
return arv_camera_get_exposure_time(camera) / 1e6;
|
||||
}
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_FPS:
|
||||
if(fpsAvailable) {
|
||||
return arv_camera_get_frame_rate(camera);
|
||||
}
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_GAIN:
|
||||
if(gainAvailable) {
|
||||
return arv_camera_get_gain(camera);
|
||||
}
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_FOURCC:
|
||||
{
|
||||
ArvPixelFormat currFormat = arv_camera_get_pixel_format(camera);
|
||||
switch( currFormat ) {
|
||||
case ARV_PIXEL_FORMAT_MONO_8:
|
||||
return MODE_Y800;
|
||||
case ARV_PIXEL_FORMAT_MONO_12:
|
||||
return MODE_Y12;
|
||||
case ARV_PIXEL_FORMAT_MONO_16:
|
||||
return MODE_Y16;
|
||||
case ARV_PIXEL_FORMAT_BAYER_GR_8:
|
||||
return MODE_GRBG;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_BUFFERSIZE:
|
||||
if(stream) {
|
||||
int in, out;
|
||||
arv_stream_get_n_buffers(stream, &in, &out);
|
||||
// return number of available buffers in Aravis output queue
|
||||
return out;
|
||||
}
|
||||
break;
|
||||
|
||||
case cv::CAP_PROP_ARAVIS_AUTOTRIGGER:
|
||||
{
|
||||
return allowAutoTrigger ? 1. : 0.;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return -1.0;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
|
||||
{
|
||||
switch(property_id) {
|
||||
case CV_CAP_PROP_AUTO_EXPOSURE:
|
||||
if(exposureAvailable || gainAvailable) {
|
||||
if( (controlExposure = (bool)(int)value) ) {
|
||||
exposure = exposureAvailable ? arv_camera_get_exposure_time(camera) : 0;
|
||||
gain = gainAvailable ? arv_camera_get_gain(camera) : 0;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CV_CAP_PROP_BRIGHTNESS:
|
||||
exposureCompensation = CLIP(value, -3., 3.);
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
if(exposureAvailable) {
|
||||
/* exposure time in seconds, like 1/100 s */
|
||||
value *= 1e6; // -> from s to us
|
||||
|
||||
arv_camera_set_exposure_time(camera, exposure = CLIP(value, exposureMin, exposureMax));
|
||||
break;
|
||||
} else return false;
|
||||
|
||||
case CV_CAP_PROP_FPS:
|
||||
if(fpsAvailable) {
|
||||
arv_camera_set_frame_rate(camera, fps = CLIP(value, fpsMin, fpsMax));
|
||||
break;
|
||||
} else return false;
|
||||
|
||||
case CV_CAP_PROP_GAIN:
|
||||
if(gainAvailable) {
|
||||
if ( (autoGain = (-1 == value) ) )
|
||||
break;
|
||||
|
||||
arv_camera_set_gain(camera, gain = CLIP(value, gainMin, gainMax));
|
||||
break;
|
||||
} else return false;
|
||||
|
||||
case CV_CAP_PROP_FOURCC:
|
||||
{
|
||||
ArvPixelFormat newFormat = pixelFormat;
|
||||
switch((int)value) {
|
||||
case MODE_GREY:
|
||||
case MODE_Y800:
|
||||
newFormat = ARV_PIXEL_FORMAT_MONO_8;
|
||||
targetGrey = 128;
|
||||
break;
|
||||
case MODE_Y12:
|
||||
newFormat = ARV_PIXEL_FORMAT_MONO_12;
|
||||
targetGrey = 2048;
|
||||
break;
|
||||
case MODE_Y16:
|
||||
newFormat = ARV_PIXEL_FORMAT_MONO_16;
|
||||
targetGrey = 32768;
|
||||
break;
|
||||
case MODE_GRBG:
|
||||
newFormat = ARV_PIXEL_FORMAT_BAYER_GR_8;
|
||||
targetGrey = 128;
|
||||
break;
|
||||
}
|
||||
if(newFormat != pixelFormat) {
|
||||
stopCapture();
|
||||
arv_camera_set_pixel_format(camera, pixelFormat = newFormat);
|
||||
startCapture();
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CV_CAP_PROP_BUFFERSIZE:
|
||||
{
|
||||
int x = (int)value;
|
||||
if((x > 0) && (x != num_buffers)) {
|
||||
stopCapture();
|
||||
num_buffers = x;
|
||||
startCapture();
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case cv::CAP_PROP_ARAVIS_AUTOTRIGGER:
|
||||
{
|
||||
allowAutoTrigger = (bool) value;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CvCaptureCAM_Aravis::stopCapture()
|
||||
{
|
||||
arv_camera_stop_acquisition(camera);
|
||||
|
||||
if(stream) {
|
||||
g_object_unref(stream);
|
||||
stream = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_Aravis::startCapture()
|
||||
{
|
||||
if(init_buffers() ) {
|
||||
arv_camera_set_acquisition_mode(camera, ARV_ACQUISITION_MODE_CONTINUOUS);
|
||||
arv_camera_start_acquisition(camera);
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
cv::Ptr<cv::IVideoCapture> cv::create_Aravis_capture( int index )
|
||||
{
|
||||
CvCaptureCAM_Aravis* capture = new CvCaptureCAM_Aravis;
|
||||
|
||||
if(capture->open(index)) {
|
||||
return cv::makePtr<cv::LegacyCapture>(capture);
|
||||
}
|
||||
|
||||
delete capture;
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
1428
3rdparty/opencv-4.5.4/modules/videoio/src/cap_avfoundation.mm
vendored
Normal file
1428
3rdparty/opencv-4.5.4/modules/videoio/src/cap_avfoundation.mm
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1387
3rdparty/opencv-4.5.4/modules/videoio/src/cap_avfoundation_mac.mm
vendored
Normal file
1387
3rdparty/opencv-4.5.4/modules/videoio/src/cap_avfoundation_mac.mm
vendored
Normal file
File diff suppressed because it is too large
Load Diff
702
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dc1394_v2.cpp
vendored
Normal file
702
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dc1394_v2.cpp
vendored
Normal file
@@ -0,0 +1,702 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
#ifdef HAVE_DC1394_2
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
#ifdef _WIN32
|
||||
// On Windows, we have no sys/select.h, but we need to pick up
|
||||
// select() which is in winsock2.
|
||||
#ifndef __SYS_SELECT_H__
|
||||
#define __SYS_SELECT_H__ 1
|
||||
#include <winsock2.h>
|
||||
#endif
|
||||
#else
|
||||
#include <sys/select.h>
|
||||
#endif /*_WIN32*/
|
||||
#include <dc1394/dc1394.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
struct CvDC1394
|
||||
{
|
||||
CvDC1394();
|
||||
~CvDC1394();
|
||||
|
||||
dc1394_t* dc;
|
||||
fd_set camFds;
|
||||
};
|
||||
|
||||
CvDC1394::CvDC1394()
|
||||
{
|
||||
dc = dc1394_new();
|
||||
FD_ZERO(&camFds);
|
||||
}
|
||||
|
||||
CvDC1394::~CvDC1394()
|
||||
{
|
||||
if (dc)
|
||||
dc1394_free(dc);
|
||||
dc = 0;
|
||||
}
|
||||
|
||||
static CvDC1394& getDC1394()
|
||||
{
|
||||
static CvDC1394 dc1394;
|
||||
return dc1394;
|
||||
}
|
||||
|
||||
class CvCaptureCAM_DC1394_v2_CPP : public CvCapture
|
||||
{
|
||||
public:
|
||||
static int dc1394properties[CV_CAP_PROP_MAX_DC1394];
|
||||
CvCaptureCAM_DC1394_v2_CPP();
|
||||
virtual ~CvCaptureCAM_DC1394_v2_CPP()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
virtual bool open(int index);
|
||||
virtual void close();
|
||||
|
||||
virtual double getProperty(int) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE;
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_DC1394; }
|
||||
|
||||
|
||||
protected:
|
||||
virtual bool startCapture();
|
||||
|
||||
uint64_t guid;
|
||||
dc1394camera_t* dcCam;
|
||||
int isoSpeed;
|
||||
int videoMode;
|
||||
int frameWidth, frameHeight;
|
||||
double fps;
|
||||
int nDMABufs;
|
||||
bool started;
|
||||
int userMode;
|
||||
|
||||
enum { VIDERE = 0x5505 };
|
||||
|
||||
int cameraId;
|
||||
bool colorStereo;
|
||||
dc1394bayer_method_t bayer;
|
||||
dc1394color_filter_t bayerFilter;
|
||||
|
||||
enum { NIMG = 2 };
|
||||
IplImage *img[NIMG];
|
||||
dc1394video_frame_t* frameC;
|
||||
int nimages;
|
||||
|
||||
dc1394featureset_t feature_set;
|
||||
};
|
||||
//mapping CV_CAP_PROP_ to DC1394_FEATUREs
|
||||
int CvCaptureCAM_DC1394_v2_CPP::dc1394properties[CV_CAP_PROP_MAX_DC1394] = {
|
||||
-1, //no corresponding feature for CV_CAP_PROP_POS_MSEC
|
||||
-1,-1,-1,-1,
|
||||
DC1394_FEATURE_FRAME_RATE, //CV_CAP_PROP_FPS - fps can be set for format 7 only!
|
||||
-1,-1,-1,-1,
|
||||
DC1394_FEATURE_BRIGHTNESS, //CV_CAP_PROP_BRIGHTNESS 10
|
||||
-1,
|
||||
DC1394_FEATURE_SATURATION, //CV_CAP_PROP_SATURATION
|
||||
DC1394_FEATURE_HUE,
|
||||
DC1394_FEATURE_GAIN,
|
||||
DC1394_FEATURE_SHUTTER, //CV_CAP_PROP_EXPOSURE
|
||||
-1, //CV_CAP_PROP_CONVERT_RGB
|
||||
DC1394_FEATURE_WHITE_BALANCE, //corresponds to CV_CAP_PROP_WHITE_BALANCE_BLUE_U and CV_CAP_PROP_WHITE_BALANCE_RED_V, see set function to check these props are set
|
||||
-1,-1,
|
||||
DC1394_FEATURE_SHARPNESS, //20
|
||||
DC1394_FEATURE_EXPOSURE, //CV_CAP_PROP_AUTO_EXPOSURE - this is auto exposure according to the IIDC standard
|
||||
DC1394_FEATURE_GAMMA, //CV_CAP_PROP_GAMMA
|
||||
DC1394_FEATURE_TEMPERATURE, //CV_CAP_PROP_TEMPERATURE
|
||||
DC1394_FEATURE_TRIGGER, //CV_CAP_PROP_TRIGGER
|
||||
DC1394_FEATURE_TRIGGER_DELAY, //CV_CAP_PROP_TRIGGER_DELAY
|
||||
DC1394_FEATURE_WHITE_BALANCE, //CV_CAP_PROP_WHITE_BALANCE_RED_V
|
||||
DC1394_FEATURE_ZOOM, //CV_CAP_PROP_ZOOM
|
||||
DC1394_FEATURE_FOCUS, //CV_CAP_PROP_FOCUS
|
||||
-1 //CV_CAP_PROP_GUID
|
||||
};
|
||||
CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
|
||||
{
|
||||
guid = 0;
|
||||
dcCam = 0;
|
||||
isoSpeed = 400;
|
||||
fps = 15;
|
||||
// Reset the value here to 1 in order to ensure only a single frame is stored in the buffer!
|
||||
nDMABufs = 8;
|
||||
started = false;
|
||||
cameraId = 0;
|
||||
colorStereo = false;
|
||||
bayer = DC1394_BAYER_METHOD_BILINEAR;
|
||||
bayerFilter = DC1394_COLOR_FILTER_GRBG;
|
||||
frameWidth = 640;
|
||||
frameHeight = 480;
|
||||
|
||||
for (int i = 0; i < NIMG; i++)
|
||||
img[i] = 0;
|
||||
frameC = 0;
|
||||
nimages = 1;
|
||||
userMode = -1;
|
||||
}
|
||||
|
||||
|
||||
bool CvCaptureCAM_DC1394_v2_CPP::startCapture()
|
||||
{
|
||||
int i;
|
||||
int code = 0;
|
||||
if (!dcCam)
|
||||
return false;
|
||||
if (isoSpeed > 0)
|
||||
{
|
||||
// if capable set operation mode to 1394b for iso speeds above 400
|
||||
if (isoSpeed > 400 && dcCam->bmode_capable == DC1394_TRUE)
|
||||
{
|
||||
dc1394_video_set_operation_mode(dcCam, DC1394_OPERATION_MODE_1394B);
|
||||
}
|
||||
code = dc1394_video_set_iso_speed(dcCam,
|
||||
isoSpeed <= 100 ? DC1394_ISO_SPEED_100 :
|
||||
isoSpeed <= 200 ? DC1394_ISO_SPEED_200 :
|
||||
isoSpeed <= 400 ? DC1394_ISO_SPEED_400 :
|
||||
isoSpeed <= 800 ? DC1394_ISO_SPEED_800 :
|
||||
isoSpeed == 1600 ? DC1394_ISO_SPEED_1600 :
|
||||
DC1394_ISO_SPEED_3200);
|
||||
}
|
||||
|
||||
dc1394video_modes_t videoModes;
|
||||
dc1394_video_get_supported_modes(dcCam, &videoModes);
|
||||
|
||||
// should a specific mode be used
|
||||
while (userMode >= 0) // 'if' semantic, no real loop here
|
||||
{
|
||||
dc1394video_mode_t wantedMode;
|
||||
|
||||
if (userMode < (int)videoModes.num)
|
||||
{
|
||||
// set mode from number, for example the second supported mode, i.e userMode = 1
|
||||
wantedMode = videoModes.modes[userMode];
|
||||
}
|
||||
else if ((userMode >= DC1394_VIDEO_MODE_MIN) && (userMode <= DC1394_VIDEO_MODE_MAX))
|
||||
{
|
||||
// set modes directly from DC134 constants (from dc1394video_mode_t)
|
||||
|
||||
//search for wanted mode, to check if camera supports it
|
||||
int j = 0;
|
||||
while ((j < (int)videoModes.num) && videoModes.modes[j] != userMode)
|
||||
{
|
||||
j++;
|
||||
}
|
||||
if (!(j < (int)videoModes.num))
|
||||
{
|
||||
userMode = -1; // wanted mode not supported, search for best mode
|
||||
break;
|
||||
}
|
||||
|
||||
wantedMode = videoModes.modes[j];
|
||||
}
|
||||
else
|
||||
{
|
||||
userMode = -1; // wanted mode not supported, search for best mode
|
||||
break;
|
||||
}
|
||||
|
||||
//if userMode is available: set it and update size
|
||||
{
|
||||
code = dc1394_video_set_mode(dcCam, wantedMode);
|
||||
uint32_t width = 0, height = 0;
|
||||
dc1394_get_image_size_from_video_mode(dcCam, wantedMode, &width, &height);
|
||||
frameWidth = (int)width;
|
||||
frameHeight = (int)height;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (userMode == -1 && (frameWidth > 0 || frameHeight > 0))
|
||||
{
|
||||
dc1394video_mode_t bestMode = (dc1394video_mode_t)(-1);
|
||||
for (i = 0; i < (int)videoModes.num; i++)
|
||||
{
|
||||
dc1394video_mode_t mode = videoModes.modes[i];
|
||||
if (mode >= DC1394_VIDEO_MODE_FORMAT7_MIN && mode <= DC1394_VIDEO_MODE_FORMAT7_MAX)
|
||||
continue;
|
||||
int pref = -1;
|
||||
dc1394color_coding_t colorCoding;
|
||||
dc1394_get_color_coding_from_video_mode(dcCam, mode, &colorCoding);
|
||||
|
||||
uint32_t width, height;
|
||||
dc1394_get_image_size_from_video_mode(dcCam, mode, &width, &height);
|
||||
if ((int)width == frameWidth || (int)height == frameHeight)
|
||||
{
|
||||
if (colorCoding == DC1394_COLOR_CODING_RGB8 ||
|
||||
colorCoding == DC1394_COLOR_CODING_RAW8)
|
||||
{
|
||||
bestMode = mode;
|
||||
break;
|
||||
}
|
||||
|
||||
if (colorCoding == DC1394_COLOR_CODING_YUV411 ||
|
||||
colorCoding == DC1394_COLOR_CODING_YUV422 ||
|
||||
(colorCoding == DC1394_COLOR_CODING_YUV444 &&
|
||||
pref < 1))
|
||||
{
|
||||
bestMode = mode;
|
||||
pref = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (colorCoding == DC1394_COLOR_CODING_MONO8)
|
||||
{
|
||||
bestMode = mode;
|
||||
pref = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((int)bestMode >= 0)
|
||||
code = dc1394_video_set_mode(dcCam, bestMode);
|
||||
}
|
||||
|
||||
if (fps > 0)
|
||||
{
|
||||
dc1394video_mode_t mode;
|
||||
dc1394framerates_t framerates;
|
||||
double minDiff = DBL_MAX;
|
||||
dc1394framerate_t bestFps = (dc1394framerate_t) - 1;
|
||||
|
||||
dc1394_video_get_mode(dcCam, &mode);
|
||||
dc1394_video_get_supported_framerates(dcCam, mode, &framerates);
|
||||
|
||||
for (i = 0; i < (int)framerates.num; i++)
|
||||
{
|
||||
dc1394framerate_t ifps = framerates.framerates[i];
|
||||
double fps1 = (1 << (ifps - DC1394_FRAMERATE_1_875)) * 1.875;
|
||||
double diff = fabs(fps1 - fps);
|
||||
if (diff < minDiff)
|
||||
{
|
||||
minDiff = diff;
|
||||
bestFps = ifps;
|
||||
}
|
||||
}
|
||||
if ((int)bestFps >= 0)
|
||||
code = dc1394_video_set_framerate(dcCam, bestFps);
|
||||
}
|
||||
|
||||
if (cameraId == VIDERE)
|
||||
{
|
||||
bayerFilter = DC1394_COLOR_FILTER_GBRG;
|
||||
nimages = 2;
|
||||
uint32_t value = 0;
|
||||
dc1394_get_control_register(dcCam, 0x50c, &value);
|
||||
colorStereo = (value & 0x80000000) != 0;
|
||||
}
|
||||
|
||||
code = dc1394_capture_setup(dcCam, nDMABufs, DC1394_CAPTURE_FLAGS_DEFAULT);
|
||||
if (code >= 0)
|
||||
{
|
||||
FD_SET(dc1394_capture_get_fileno(dcCam), &getDC1394().camFds);
|
||||
dc1394_video_set_transmission(dcCam, DC1394_ON);
|
||||
started = true;
|
||||
}
|
||||
|
||||
return code >= 0;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_DC1394_v2_CPP::open(int index)
|
||||
{
|
||||
bool result = false;
|
||||
dc1394camera_list_t* cameraList = 0;
|
||||
dc1394error_t err;
|
||||
|
||||
close();
|
||||
|
||||
if (!getDC1394().dc)
|
||||
goto _exit_;
|
||||
|
||||
err = dc1394_camera_enumerate(getDC1394().dc, &cameraList);
|
||||
if (err < 0 || !cameraList || (unsigned)index >= (unsigned)cameraList->num)
|
||||
goto _exit_;
|
||||
|
||||
guid = cameraList->ids[index].guid;
|
||||
dcCam = dc1394_camera_new(getDC1394().dc, guid);
|
||||
if (!dcCam)
|
||||
goto _exit_;
|
||||
|
||||
cameraId = dcCam->vendor_id;
|
||||
//get all features
|
||||
if (dc1394_feature_get_all(dcCam,&feature_set) == DC1394_SUCCESS)
|
||||
result = true;
|
||||
else
|
||||
result = false;
|
||||
|
||||
_exit_:
|
||||
if (cameraList)
|
||||
dc1394_camera_free_list(cameraList);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void CvCaptureCAM_DC1394_v2_CPP::close()
|
||||
{
|
||||
if (dcCam)
|
||||
{
|
||||
// check for fileno valid before using
|
||||
int fileno=dc1394_capture_get_fileno(dcCam);
|
||||
|
||||
if (fileno>=0 && FD_ISSET(fileno, &getDC1394().camFds))
|
||||
FD_CLR(fileno, &getDC1394().camFds);
|
||||
dc1394_video_set_transmission(dcCam, DC1394_OFF);
|
||||
dc1394_capture_stop(dcCam);
|
||||
dc1394_camera_free(dcCam);
|
||||
dcCam = 0;
|
||||
started = false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < NIMG; i++)
|
||||
{
|
||||
cvReleaseImage(&img[i]);
|
||||
}
|
||||
if (frameC)
|
||||
{
|
||||
if (frameC->image)
|
||||
free(frameC->image);
|
||||
free(frameC);
|
||||
frameC = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
|
||||
{
|
||||
dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
|
||||
bool code = false, isColor;
|
||||
dc1394video_frame_t *dcFrame = 0, *fs = 0;
|
||||
int i, nch;
|
||||
|
||||
if (!dcCam || (!started && !startCapture()))
|
||||
return false;
|
||||
|
||||
dc1394_capture_dequeue(dcCam, policy, &dcFrame);
|
||||
|
||||
if (!dcFrame)
|
||||
return false;
|
||||
|
||||
if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
|
||||
{
|
||||
goto _exit_;
|
||||
}
|
||||
|
||||
isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
|
||||
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
|
||||
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;
|
||||
|
||||
if (nimages == 2)
|
||||
{
|
||||
fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));
|
||||
dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
|
||||
dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
|
||||
dcFrame = 0;
|
||||
if (!fs->image)
|
||||
goto _exit_;
|
||||
isColor = colorStereo;
|
||||
}
|
||||
nch = isColor ? 3 : 1;
|
||||
|
||||
for (i = 0; i < nimages; i++)
|
||||
{
|
||||
IplImage fhdr;
|
||||
dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
|
||||
f.size[1] /= nimages;
|
||||
f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
|
||||
if (isColor)
|
||||
{
|
||||
if (!frameC)
|
||||
frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
|
||||
frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
|
||||
if (nimages == 1)
|
||||
{
|
||||
dc1394_convert_frames(&f, frameC);
|
||||
dc1394_capture_enqueue(dcCam, dcFrame);
|
||||
dcFrame = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
f.color_filter = bayerFilter;
|
||||
dc1394_debayer_frames(&f, frameC, bayer);
|
||||
}
|
||||
fc = frameC;
|
||||
}
|
||||
if (!img[i])
|
||||
img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
|
||||
cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
|
||||
cvSetData(&fhdr, fc->image, fc->size[0]*nch);
|
||||
|
||||
// Swap R&B channels:
|
||||
if (nch==3)
|
||||
{
|
||||
cv::Mat tmp = cv::cvarrToMat(&fhdr);
|
||||
cv::cvtColor(tmp, tmp, cv::COLOR_RGB2BGR, tmp.channels());
|
||||
}
|
||||
|
||||
cvCopy(&fhdr, img[i]);
|
||||
}
|
||||
|
||||
code = true;
|
||||
|
||||
_exit_:
|
||||
if (dcFrame)
|
||||
dc1394_capture_enqueue(dcCam, dcFrame);
|
||||
if (fs)
|
||||
{
|
||||
if (fs->image)
|
||||
free(fs->image);
|
||||
free(fs);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
IplImage* CvCaptureCAM_DC1394_v2_CPP::retrieveFrame(int idx)
|
||||
{
|
||||
return 0 <= idx && idx < nimages ? img[idx] : 0;
|
||||
}
|
||||
|
||||
double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const
|
||||
{
|
||||
// Simulate mutable (C++11-like) member variable
|
||||
dc1394featureset_t& fs = const_cast<dc1394featureset_t&>(feature_set);
|
||||
|
||||
switch (propId)
|
||||
{
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
return frameWidth ? frameWidth : frameHeight*4 / 3;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
return frameHeight ? frameHeight : frameWidth*3 / 4;
|
||||
case CV_CAP_PROP_FPS:
|
||||
return fps;
|
||||
case CV_CAP_PROP_RECTIFICATION:
|
||||
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
|
||||
return 0;
|
||||
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
|
||||
if (dc1394_feature_whitebalance_get_value(dcCam,
|
||||
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
|
||||
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
|
||||
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value;
|
||||
break;
|
||||
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
|
||||
if (dc1394_feature_whitebalance_get_value(dcCam,
|
||||
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
|
||||
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
|
||||
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value;
|
||||
break;
|
||||
case CV_CAP_PROP_GUID:
|
||||
//the least 32 bits are enough to identify the camera
|
||||
return (double) (guid & 0x00000000FFFFFFFF);
|
||||
break;
|
||||
case CV_CAP_PROP_MODE:
|
||||
return (double) userMode;
|
||||
break;
|
||||
case CV_CAP_PROP_ISO_SPEED:
|
||||
return (double) isoSpeed;
|
||||
case CV_CAP_PROP_BUFFERSIZE:
|
||||
return (double) nDMABufs;
|
||||
default:
|
||||
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
|
||||
&& dcCam)
|
||||
//&& feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].on_off_capable)
|
||||
if (dc1394_feature_get_value(dcCam,(dc1394feature_t)dc1394properties[propId],
|
||||
&fs.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value) == DC1394_SUCCESS)
|
||||
return feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value;
|
||||
}
|
||||
return -1; // the value of the feature can be 0, so returning 0 as an error is wrong
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
|
||||
{
|
||||
switch (propId)
|
||||
{
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
if(started)
|
||||
return false;
|
||||
frameWidth = cvRound(value);
|
||||
frameHeight = 0;
|
||||
break;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
if(started)
|
||||
return false;
|
||||
frameWidth = 0;
|
||||
frameHeight = cvRound(value);
|
||||
break;
|
||||
case CV_CAP_PROP_FPS:
|
||||
if(started)
|
||||
return false;
|
||||
fps = value;
|
||||
break;
|
||||
case CV_CAP_PROP_RECTIFICATION:
|
||||
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
|
||||
return false;
|
||||
case CV_CAP_PROP_MODE:
|
||||
if(started)
|
||||
return false;
|
||||
userMode = cvRound(value);
|
||||
break;
|
||||
case CV_CAP_PROP_ISO_SPEED:
|
||||
if(started)
|
||||
return false;
|
||||
isoSpeed = cvRound(value);
|
||||
break;
|
||||
case CV_CAP_PROP_BUFFERSIZE:
|
||||
if(started)
|
||||
return false;
|
||||
nDMABufs = value;
|
||||
break;
|
||||
//The code below is based on coriander, callbacks.c:795, refer to case RANGE_MENU_MAN :
|
||||
default:
|
||||
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
|
||||
&& dcCam)
|
||||
{
|
||||
//get the corresponding feature from property-id
|
||||
dc1394feature_info_t *act_feature = &feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN];
|
||||
|
||||
if (cvRound(value) == CV_CAP_PROP_DC1394_OFF)
|
||||
{
|
||||
if ( (act_feature->on_off_capable)
|
||||
&& (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_OFF) == DC1394_SUCCESS))
|
||||
{
|
||||
act_feature->is_on=DC1394_OFF;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
//try to turn the feature ON, feature can be ON and at the same time it can be not capable to change state to OFF
|
||||
if ( (act_feature->is_on == DC1394_OFF) && (act_feature->on_off_capable == DC1394_TRUE))
|
||||
{
|
||||
if (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_ON) == DC1394_SUCCESS)
|
||||
feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].is_on=DC1394_ON;
|
||||
}
|
||||
//turn off absolute mode - the actual value will be stored in the value field,
|
||||
//otherwise it would be stored into CSR (control and status register) absolute value
|
||||
if (act_feature->absolute_capable
|
||||
&& dc1394_feature_set_absolute_control(dcCam, act_feature->id, DC1394_OFF) !=DC1394_SUCCESS)
|
||||
return false;
|
||||
else
|
||||
act_feature->abs_control=DC1394_OFF;
|
||||
//set AUTO
|
||||
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_AUTO)
|
||||
{
|
||||
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_AUTO)!=DC1394_SUCCESS)
|
||||
return false;
|
||||
act_feature->current_mode=DC1394_FEATURE_MODE_AUTO;
|
||||
return true;
|
||||
}
|
||||
//set ONE PUSH
|
||||
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)
|
||||
{
|
||||
//have to set to manual first, otherwise one push will be ignored (AVT manual 4.3.0 p. 115)
|
||||
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_ONE_PUSH_AUTO)!=DC1394_SUCCESS)
|
||||
return false;
|
||||
//will change to
|
||||
act_feature->current_mode=DC1394_FEATURE_MODE_ONE_PUSH_AUTO;
|
||||
return true;
|
||||
}
|
||||
//set the feature to MANUAL mode,
|
||||
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_MANUAL)!=DC1394_SUCCESS)
|
||||
return false;
|
||||
else
|
||||
act_feature->current_mode=DC1394_FEATURE_MODE_MANUAL;
|
||||
// if property is one of the white balance features treat it in different way
|
||||
if (propId == CV_CAP_PROP_WHITE_BALANCE_BLUE_U)
|
||||
{
|
||||
if (dc1394_feature_whitebalance_set_value(dcCam,cvRound(value), act_feature->RV_value)!=DC1394_SUCCESS)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
act_feature->BU_value = cvRound(value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (propId == CV_CAP_PROP_WHITE_BALANCE_RED_V)
|
||||
{
|
||||
if (dc1394_feature_whitebalance_set_value(dcCam, act_feature->BU_value, cvRound(value))!=DC1394_SUCCESS)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
act_feature->RV_value = cvRound(value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
//first: check boundaries
|
||||
if (value < act_feature->min)
|
||||
{
|
||||
value = act_feature->min;
|
||||
}
|
||||
else if (value > act_feature->max)
|
||||
{
|
||||
value = act_feature->max;
|
||||
}
|
||||
|
||||
if (dc1394_feature_set_value(dcCam, act_feature->id, cvRound(value)) == DC1394_SUCCESS)
|
||||
{
|
||||
act_feature->value = value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
cv::Ptr<cv::IVideoCapture> cv::create_DC1394_capture(int index)
|
||||
{
|
||||
CvCaptureCAM_DC1394_v2_CPP* capture = new CvCaptureCAM_DC1394_v2_CPP;
|
||||
if (capture->open(index))
|
||||
return cv::makePtr<cv::LegacyCapture>(capture);
|
||||
delete capture;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
3629
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dshow.cpp
vendored
Normal file
3629
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dshow.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
47
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dshow.hpp
vendored
Normal file
47
3rdparty/opencv-4.5.4/modules/videoio/src/cap_dshow.hpp
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2014, Itseez, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef _CAP_DSHOW_HPP_
|
||||
#define _CAP_DSHOW_HPP_
|
||||
|
||||
#ifdef HAVE_DSHOW
|
||||
|
||||
class videoInput;
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class VideoCapture_DShow : public IVideoCapture
|
||||
{
|
||||
public:
|
||||
VideoCapture_DShow(int index);
|
||||
virtual ~VideoCapture_DShow();
|
||||
|
||||
virtual double getProperty(int propIdx) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int propIdx, double propVal) CV_OVERRIDE;
|
||||
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE;
|
||||
virtual bool isOpened() const;
|
||||
protected:
|
||||
void open(int index);
|
||||
void close();
|
||||
|
||||
int m_index, m_width, m_height, m_fourcc;
|
||||
int m_widthSet, m_heightSet;
|
||||
bool m_convertRGBSet;
|
||||
static videoInput g_VI;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif //HAVE_DSHOW
|
||||
#endif //_CAP_DSHOW_HPP_
|
||||
656
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg.cpp
vendored
Normal file
656
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg.cpp
vendored
Normal file
@@ -0,0 +1,656 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if !defined(HAVE_FFMPEG)
|
||||
#error "Build configuration error"
|
||||
#endif
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "cap_ffmpeg_impl.hpp"
|
||||
|
||||
// TODO drop legacy code
|
||||
//#define icvCreateFileCapture_FFMPEG_p cvCreateFileCapture_FFMPEG
|
||||
#define icvReleaseCapture_FFMPEG_p cvReleaseCapture_FFMPEG
|
||||
#define icvGrabFrame_FFMPEG_p cvGrabFrame_FFMPEG
|
||||
#define icvRetrieveFrame_FFMPEG_p cvRetrieveFrame_FFMPEG
|
||||
#define icvSetCaptureProperty_FFMPEG_p cvSetCaptureProperty_FFMPEG
|
||||
#define icvGetCaptureProperty_FFMPEG_p cvGetCaptureProperty_FFMPEG
|
||||
#define icvCreateVideoWriter_FFMPEG_p cvCreateVideoWriter_FFMPEG
|
||||
#define icvReleaseVideoWriter_FFMPEG_p cvReleaseVideoWriter_FFMPEG
|
||||
#define icvWriteFrame_FFMPEG_p cvWriteFrame_FFMPEG
|
||||
|
||||
|
||||
namespace cv {
|
||||
namespace {
|
||||
|
||||
class CvCapture_FFMPEG_proxy CV_FINAL : public cv::IVideoCapture
|
||||
{
|
||||
public:
|
||||
CvCapture_FFMPEG_proxy() { ffmpegCapture = 0; }
|
||||
CvCapture_FFMPEG_proxy(const cv::String& filename, const cv::VideoCaptureParameters& params)
|
||||
: ffmpegCapture(NULL)
|
||||
{
|
||||
open(filename, params);
|
||||
}
|
||||
virtual ~CvCapture_FFMPEG_proxy() { close(); }
|
||||
|
||||
virtual double getProperty(int propId) const CV_OVERRIDE
|
||||
{
|
||||
return ffmpegCapture ? icvGetCaptureProperty_FFMPEG_p(ffmpegCapture, propId) : 0;
|
||||
}
|
||||
virtual bool setProperty(int propId, double value) CV_OVERRIDE
|
||||
{
|
||||
return ffmpegCapture ? icvSetCaptureProperty_FFMPEG_p(ffmpegCapture, propId, value)!=0 : false;
|
||||
}
|
||||
virtual bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
return ffmpegCapture ? icvGrabFrame_FFMPEG_p(ffmpegCapture)!=0 : false;
|
||||
}
|
||||
virtual bool retrieveFrame(int, cv::OutputArray frame) CV_OVERRIDE
|
||||
{
|
||||
unsigned char* data = 0;
|
||||
int step=0, width=0, height=0, cn=0;
|
||||
|
||||
if (!ffmpegCapture)
|
||||
return false;
|
||||
|
||||
// if UMat, try GPU to GPU copy using OpenCL extensions
|
||||
if (frame.isUMat()) {
|
||||
if (ffmpegCapture->retrieveHWFrame(frame)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn))
|
||||
return false;
|
||||
|
||||
cv::Mat tmp(height, width, CV_MAKETYPE(CV_8U, cn), data, step);
|
||||
this->rotateFrame(tmp);
|
||||
tmp.copyTo(frame);
|
||||
|
||||
return true;
|
||||
}
|
||||
bool open(const cv::String& filename, const cv::VideoCaptureParameters& params)
|
||||
{
|
||||
close();
|
||||
|
||||
ffmpegCapture = cvCreateFileCaptureWithParams_FFMPEG(filename.c_str(), params);
|
||||
return ffmpegCapture != 0;
|
||||
}
|
||||
void close()
|
||||
{
|
||||
if (ffmpegCapture)
|
||||
icvReleaseCapture_FFMPEG_p( &ffmpegCapture );
|
||||
CV_Assert(ffmpegCapture == 0);
|
||||
ffmpegCapture = 0;
|
||||
}
|
||||
|
||||
virtual bool isOpened() const CV_OVERRIDE { return ffmpegCapture != 0; }
|
||||
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_FFMPEG; }
|
||||
|
||||
protected:
|
||||
CvCapture_FFMPEG* ffmpegCapture;
|
||||
|
||||
void rotateFrame(cv::Mat &mat) const
|
||||
{
|
||||
bool rotation_auto = 0 != getProperty(CAP_PROP_ORIENTATION_AUTO);
|
||||
int rotation_angle = static_cast<int>(getProperty(CAP_PROP_ORIENTATION_META));
|
||||
|
||||
if(!rotation_auto || rotation_angle%360 == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
cv::RotateFlags flag;
|
||||
if(rotation_angle == 90 || rotation_angle == -270) { // Rotate clockwise 90 degrees
|
||||
flag = cv::ROTATE_90_CLOCKWISE;
|
||||
} else if(rotation_angle == 270 || rotation_angle == -90) { // Rotate clockwise 270 degrees
|
||||
flag = cv::ROTATE_90_COUNTERCLOCKWISE;
|
||||
} else if(rotation_angle == 180 || rotation_angle == -180) { // Rotate clockwise 180 degrees
|
||||
flag = cv::ROTATE_180;
|
||||
} else { // Unsupported rotation
|
||||
return;
|
||||
}
|
||||
|
||||
cv::rotate(mat, mat, flag);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
cv::Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &filename, const cv::VideoCaptureParameters& params)
|
||||
{
|
||||
cv::Ptr<CvCapture_FFMPEG_proxy> capture = cv::makePtr<CvCapture_FFMPEG_proxy>(filename, params);
|
||||
if (capture && capture->isOpened())
|
||||
return capture;
|
||||
return cv::Ptr<cv::IVideoCapture>();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class CvVideoWriter_FFMPEG_proxy CV_FINAL :
|
||||
public cv::IVideoWriter
|
||||
{
|
||||
public:
|
||||
CvVideoWriter_FFMPEG_proxy() { ffmpegWriter = 0; }
|
||||
CvVideoWriter_FFMPEG_proxy(const cv::String& filename, int fourcc, double fps, cv::Size frameSize, const VideoWriterParameters& params) { ffmpegWriter = 0; open(filename, fourcc, fps, frameSize, params); }
|
||||
virtual ~CvVideoWriter_FFMPEG_proxy() { close(); }
|
||||
|
||||
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_FFMPEG; }
|
||||
|
||||
virtual void write(cv::InputArray image ) CV_OVERRIDE
|
||||
{
|
||||
if(!ffmpegWriter)
|
||||
return;
|
||||
CV_Assert(image.depth() == CV_8U);
|
||||
|
||||
// if UMat, try GPU to GPU copy using OpenCL extensions
|
||||
if (image.isUMat()) {
|
||||
if (ffmpegWriter->writeHWFrame(image)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
icvWriteFrame_FFMPEG_p(ffmpegWriter, (const uchar*)image.getMat().ptr(), (int)image.step(), image.cols(), image.rows(), image.channels(), 0);
|
||||
}
|
||||
virtual bool open( const cv::String& filename, int fourcc, double fps, cv::Size frameSize, const VideoWriterParameters& params )
|
||||
{
|
||||
close();
|
||||
ffmpegWriter = cvCreateVideoWriterWithParams_FFMPEG( filename.c_str(), fourcc, fps, frameSize.width, frameSize.height, params );
|
||||
return ffmpegWriter != 0;
|
||||
}
|
||||
|
||||
virtual void close()
|
||||
{
|
||||
if (ffmpegWriter)
|
||||
icvReleaseVideoWriter_FFMPEG_p( &ffmpegWriter );
|
||||
CV_Assert(ffmpegWriter == 0);
|
||||
ffmpegWriter = 0;
|
||||
}
|
||||
|
||||
virtual double getProperty(int propId) const CV_OVERRIDE {
|
||||
if(!ffmpegWriter)
|
||||
return 0;
|
||||
return ffmpegWriter->getProperty(propId);
|
||||
}
|
||||
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE { return false; }
|
||||
virtual bool isOpened() const CV_OVERRIDE { return ffmpegWriter != 0; }
|
||||
|
||||
protected:
|
||||
CvVideoWriter_FFMPEG* ffmpegWriter;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
cv::Ptr<cv::IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& filename, int fourcc,
|
||||
double fps, const cv::Size& frameSize,
|
||||
const VideoWriterParameters& params)
|
||||
{
|
||||
cv::Ptr<CvVideoWriter_FFMPEG_proxy> writer = cv::makePtr<CvVideoWriter_FFMPEG_proxy>(filename, fourcc, fps, frameSize, params);
|
||||
if (writer && writer->isOpened())
|
||||
return writer;
|
||||
return cv::Ptr<cv::IVideoWriter>();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
#if defined(BUILD_PLUGIN)
|
||||
|
||||
#define NEW_PLUGIN
|
||||
|
||||
#ifndef NEW_PLUGIN
|
||||
#define ABI_VERSION 0
|
||||
#define API_VERSION 0
|
||||
#include "plugin_api.hpp"
|
||||
#else
|
||||
#define CAPTURE_ABI_VERSION 1
|
||||
#define CAPTURE_API_VERSION 1
|
||||
#include "plugin_capture_api.hpp"
|
||||
#define WRITER_ABI_VERSION 1
|
||||
#define WRITER_API_VERSION 1
|
||||
#include "plugin_writer_api.hpp"
|
||||
#endif
|
||||
|
||||
namespace cv {
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_open(const char* filename, int camera_index, CV_OUT CvPluginCapture* handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
*handle = NULL;
|
||||
if (!filename)
|
||||
return CV_ERROR_FAIL;
|
||||
CV_UNUSED(camera_index);
|
||||
CvCapture_FFMPEG_proxy *cap = 0;
|
||||
try
|
||||
{
|
||||
cap = new CvCapture_FFMPEG_proxy(filename, cv::VideoCaptureParameters());
|
||||
if (cap->isOpened())
|
||||
{
|
||||
*handle = (CvPluginCapture)cap;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
}
|
||||
if (cap)
|
||||
delete cap;
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_open_with_params(
|
||||
const char* filename, int camera_index,
|
||||
int* params, unsigned n_params,
|
||||
CV_OUT CvPluginCapture* handle
|
||||
)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
*handle = NULL;
|
||||
if (!filename)
|
||||
return CV_ERROR_FAIL;
|
||||
CV_UNUSED(camera_index);
|
||||
CvCapture_FFMPEG_proxy *cap = 0;
|
||||
try
|
||||
{
|
||||
cv::VideoCaptureParameters parameters(params, n_params);
|
||||
cap = new CvCapture_FFMPEG_proxy(filename, parameters);
|
||||
if (cap->isOpened())
|
||||
{
|
||||
*handle = (CvPluginCapture)cap;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
}
|
||||
if (cap)
|
||||
delete cap;
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
delete instance;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
if (!val)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
*val = instance->getProperty(prop);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NEW_PLUGIN
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
Mat img;
|
||||
// TODO: avoid unnecessary copying
|
||||
if (instance->retrieveFrame(stream_idx, img))
|
||||
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_capture_retrieve_cb_t callback, void* userdata)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
|
||||
Mat img;
|
||||
// TODO: avoid unnecessary copying
|
||||
if (instance->retrieveFrame(stream_idx, img))
|
||||
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.type(), userdata);
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_open_with_params(
|
||||
const char* filename, int fourcc, double fps, int width, int height,
|
||||
int* params, unsigned n_params,
|
||||
CV_OUT CvPluginWriter* handle)
|
||||
{
|
||||
Size sz(width, height);
|
||||
CvVideoWriter_FFMPEG_proxy* wrt = 0;
|
||||
try
|
||||
{
|
||||
VideoWriterParameters parameters(params, n_params);
|
||||
wrt = new CvVideoWriter_FFMPEG_proxy(filename, fourcc, fps, sz, parameters);
|
||||
if(wrt && wrt->isOpened())
|
||||
{
|
||||
*handle = (CvPluginWriter)wrt;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
}
|
||||
if (wrt)
|
||||
delete wrt;
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_open(const char* filename, int fourcc, double fps, int width, int height, int isColor,
|
||||
CV_OUT CvPluginWriter* handle)
|
||||
{
|
||||
int params[2] = { VIDEOWRITER_PROP_IS_COLOR, isColor };
|
||||
return cv_writer_open_with_params(filename, fourcc, fps, width, height, params, 1, handle);
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_release(CvPluginWriter handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
|
||||
delete instance;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter handle, int prop, CV_OUT double* val)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
if (!val)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
|
||||
*val = instance->getProperty(prop);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_write(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
|
||||
Mat img(Size(width, height), CV_MAKETYPE(CV_8U, cn), const_cast<uchar*>(data), step);
|
||||
instance->write(img);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
#ifndef NEW_PLUGIN
|
||||
|
||||
static const OpenCV_VideoIO_Plugin_API_preview plugin_api =
|
||||
{
|
||||
{
|
||||
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
|
||||
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
|
||||
"FFmpeg OpenCV Video I/O plugin"
|
||||
},
|
||||
{
|
||||
/* 1*/CAP_FFMPEG,
|
||||
/* 2*/cv_capture_open,
|
||||
/* 3*/cv_capture_release,
|
||||
/* 4*/cv_capture_get_prop,
|
||||
/* 5*/cv_capture_set_prop,
|
||||
/* 6*/cv_capture_grab,
|
||||
/* 7*/cv_capture_retrieve,
|
||||
/* 8*/cv_writer_open,
|
||||
/* 9*/cv_writer_release,
|
||||
/* 10*/cv_writer_get_prop,
|
||||
/* 11*/cv_writer_set_prop,
|
||||
/* 12*/cv_writer_write
|
||||
}
|
||||
};
|
||||
|
||||
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
|
||||
{
|
||||
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
|
||||
return &plugin_api;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#else // NEW_PLUGIN
|
||||
|
||||
static const OpenCV_VideoIO_Capture_Plugin_API capture_plugin_api =
|
||||
{
|
||||
{
|
||||
sizeof(OpenCV_VideoIO_Capture_Plugin_API), CAPTURE_ABI_VERSION, CAPTURE_API_VERSION,
|
||||
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
|
||||
"FFmpeg OpenCV Video I/O Capture plugin"
|
||||
},
|
||||
{
|
||||
/* 1*/CAP_FFMPEG,
|
||||
/* 2*/cv_capture_open,
|
||||
/* 3*/cv_capture_release,
|
||||
/* 4*/cv_capture_get_prop,
|
||||
/* 5*/cv_capture_set_prop,
|
||||
/* 6*/cv_capture_grab,
|
||||
/* 7*/cv_capture_retrieve,
|
||||
},
|
||||
{
|
||||
/* 8*/cv_capture_open_with_params,
|
||||
}
|
||||
};
|
||||
|
||||
const OpenCV_VideoIO_Capture_Plugin_API* opencv_videoio_capture_plugin_init_v1(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
|
||||
{
|
||||
if (requested_abi_version == CAPTURE_ABI_VERSION && requested_api_version <= CAPTURE_API_VERSION)
|
||||
return &capture_plugin_api;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const OpenCV_VideoIO_Writer_Plugin_API writer_plugin_api =
|
||||
{
|
||||
{
|
||||
sizeof(OpenCV_VideoIO_Writer_Plugin_API), WRITER_ABI_VERSION, WRITER_API_VERSION,
|
||||
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
|
||||
"FFmpeg OpenCV Video I/O Writer plugin"
|
||||
},
|
||||
{
|
||||
/* 1*/CAP_FFMPEG,
|
||||
/* 2*/cv_writer_open,
|
||||
/* 3*/cv_writer_release,
|
||||
/* 4*/cv_writer_get_prop,
|
||||
/* 5*/cv_writer_set_prop,
|
||||
/* 6*/cv_writer_write
|
||||
},
|
||||
{
|
||||
/* 7*/cv_writer_open_with_params
|
||||
}
|
||||
};
|
||||
|
||||
const OpenCV_VideoIO_Writer_Plugin_API* opencv_videoio_writer_plugin_init_v1(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
|
||||
{
|
||||
if (requested_abi_version == WRITER_ABI_VERSION && requested_api_version <= WRITER_API_VERSION)
|
||||
return &writer_plugin_api;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif // NEW_PLUGIN
|
||||
|
||||
#endif // BUILD_PLUGIN
|
||||
993
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_hw.hpp
vendored
Normal file
993
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_hw.hpp
vendored
Normal file
@@ -0,0 +1,993 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2020-2021 Intel Corporation
|
||||
|
||||
#include "opencv2/videoio.hpp"
|
||||
#ifdef HAVE_OPENCL
|
||||
#include "opencv2/core/ocl.hpp"
|
||||
#endif
|
||||
#if defined(__OPENCV_BUILD) && !defined(BUILD_PLUGIN) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
|
||||
#include "cvconfig.h"
|
||||
#endif
|
||||
#include <sstream>
|
||||
|
||||
#ifdef HAVE_D3D11
|
||||
#define D3D11_NO_HELPERS
|
||||
#include <d3d11.h>
|
||||
#include <codecvt>
|
||||
#include "opencv2/core/directx.hpp"
|
||||
#ifdef HAVE_OPENCL
|
||||
#include <CL/cl_d3d11.h>
|
||||
#endif
|
||||
#endif // HAVE_D3D11
|
||||
|
||||
#ifdef HAVE_VA
|
||||
#include <va/va_backend.h>
|
||||
#ifdef HAVE_VA_INTEL
|
||||
#include "opencv2/core/va_intel.hpp"
|
||||
#ifndef CL_TARGET_OPENCL_VERSION
|
||||
#define CL_TARGET_OPENCL_VERSION 120
|
||||
#endif
|
||||
#ifdef HAVE_VA_INTEL_OLD_HEADER
|
||||
#include <CL/va_ext.h>
|
||||
#else
|
||||
#include <CL/cl_va_api_media_sharing_intel.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif // HAVE_VA
|
||||
|
||||
// FFMPEG "C" headers
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#ifdef HAVE_D3D11
|
||||
#include <libavutil/hwcontext_d3d11va.h>
|
||||
#endif
|
||||
#ifdef HAVE_VA
|
||||
#include <libavutil/hwcontext_vaapi.h>
|
||||
#endif
|
||||
#ifdef HAVE_MFX // dependency only on MFX header files, no linkage dependency
|
||||
#include <libavutil/hwcontext_qsv.h>
|
||||
#endif
|
||||
}
|
||||
|
||||
#define HW_DEFAULT_POOL_SIZE 32
|
||||
#define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
|
||||
|
||||
using namespace cv;
|
||||
|
||||
static AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *),
|
||||
const char *disabled_codecs, AVPixelFormat *hw_pix_fmt);
|
||||
static AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl);
|
||||
static AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format);
|
||||
static AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt);
|
||||
static VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type);
|
||||
|
||||
static
|
||||
const char* getVideoAccelerationName(VideoAccelerationType va_type)
|
||||
{
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: return "none";
|
||||
case VIDEO_ACCELERATION_ANY: return "any";
|
||||
case VIDEO_ACCELERATION_D3D11: return "d3d11";
|
||||
case VIDEO_ACCELERATION_VAAPI: return "vaapi";
|
||||
case VIDEO_ACCELERATION_MFX: return "mfx";
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static
|
||||
std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict)
|
||||
{
|
||||
std::string va_name = getVideoAccelerationName(va_type);
|
||||
std::string key_name = std::string("hw_decoders_") + va_name;
|
||||
const char *hw_acceleration = NULL;
|
||||
if (dict)
|
||||
{
|
||||
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
|
||||
if (entry)
|
||||
hw_acceleration = entry->value;
|
||||
}
|
||||
if (hw_acceleration)
|
||||
return hw_acceleration;
|
||||
|
||||
// some default values (FFMPEG_DECODE_ACCELERATION_TYPES)
|
||||
#ifdef _WIN32
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: return "";
|
||||
case VIDEO_ACCELERATION_ANY: return "d3d11va";
|
||||
case VIDEO_ACCELERATION_D3D11: return "d3d11va";
|
||||
case VIDEO_ACCELERATION_VAAPI: return "";
|
||||
case VIDEO_ACCELERATION_MFX: return ""; // "qsv" fails if non-Intel D3D11 device
|
||||
}
|
||||
return "";
|
||||
#else
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: return "";
|
||||
case VIDEO_ACCELERATION_ANY: return "vaapi.iHD";
|
||||
case VIDEO_ACCELERATION_D3D11: return "";
|
||||
case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD";
|
||||
case VIDEO_ACCELERATION_MFX: return "qsv.iHD";
|
||||
}
|
||||
return "";
|
||||
#endif
|
||||
}
|
||||
|
||||
static
|
||||
std::string getEncoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict)
|
||||
{
|
||||
std::string va_name = getVideoAccelerationName(va_type);
|
||||
std::string key_name = std::string("hw_encoders_") + va_name;
|
||||
const char *hw_acceleration = NULL;
|
||||
if (dict)
|
||||
{
|
||||
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
|
||||
if (entry)
|
||||
hw_acceleration = entry->value;
|
||||
}
|
||||
if (hw_acceleration)
|
||||
return hw_acceleration;
|
||||
|
||||
// some default values (FFMPEG_ENCODE_ACCELERATION_TYPES)
|
||||
#ifdef _WIN32
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: return "";
|
||||
case VIDEO_ACCELERATION_ANY: return "qsv";
|
||||
case VIDEO_ACCELERATION_D3D11: return "";
|
||||
case VIDEO_ACCELERATION_VAAPI: return "";
|
||||
case VIDEO_ACCELERATION_MFX: return "qsv";
|
||||
}
|
||||
return "";
|
||||
#else
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: return "";
|
||||
case VIDEO_ACCELERATION_ANY: return "qsv.iHD,vaapi.iHD";
|
||||
case VIDEO_ACCELERATION_D3D11: return "";
|
||||
case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD";
|
||||
case VIDEO_ACCELERATION_MFX: return "qsv.iHD";
|
||||
}
|
||||
return "unknown";
|
||||
#endif
|
||||
}
|
||||
|
||||
static
|
||||
std::string getDecoderDisabledCodecs(AVDictionary *dict)
|
||||
{
|
||||
std::string key_name = std::string("hw_disable_decoders");
|
||||
const char *disabled_codecs = NULL;
|
||||
if (dict)
|
||||
{
|
||||
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
|
||||
if (entry)
|
||||
disabled_codecs = entry->value;
|
||||
}
|
||||
if (disabled_codecs)
|
||||
return disabled_codecs;
|
||||
|
||||
// some default values (FFMPEG_DECODE_DISABLE_CODECS)
|
||||
#ifdef _WIN32
|
||||
return "none";
|
||||
#else
|
||||
return "av1.vaapi,av1_qsv,vp8.vaapi,vp8_qsv"; // "vp9_qsv"
|
||||
#endif
|
||||
}
|
||||
|
||||
static
|
||||
std::string getEncoderDisabledCodecs(AVDictionary *dict)
|
||||
{
|
||||
std::string key_name = std::string("hw_disabled_encoders");
|
||||
const char *disabled_codecs = NULL;
|
||||
if (dict)
|
||||
{
|
||||
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
|
||||
if (entry)
|
||||
disabled_codecs = entry->value;
|
||||
}
|
||||
if (disabled_codecs)
|
||||
return disabled_codecs;
|
||||
|
||||
// some default values (FFMPEG_ENCODE_DISABLE_CODECS)
|
||||
#ifdef _WIN32
|
||||
return "mjpeg_qsv";
|
||||
#else
|
||||
return "mjpeg_vaapi,mjpeg_qsv,vp8_vaapi";
|
||||
#endif
|
||||
}
|
||||
|
||||
static
|
||||
bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string& device_subname) {
|
||||
if (!ctx)
|
||||
return false;
|
||||
AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data;
|
||||
if (!hw_device_ctx->hwctx)
|
||||
return false;
|
||||
const char *hw_name = av_hwdevice_get_type_name(hw_type);
|
||||
if (hw_type == AV_HWDEVICE_TYPE_QSV)
|
||||
hw_name = "MFX";
|
||||
bool ret = true;
|
||||
std::string device_name;
|
||||
#if defined(HAVE_D3D11)
|
||||
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
|
||||
ID3D11Device* device = ((AVD3D11VADeviceContext*)hw_device_ctx->hwctx)->device;
|
||||
IDXGIDevice* dxgiDevice = nullptr;
|
||||
if (device && SUCCEEDED(device->QueryInterface(__uuidof(IDXGIDevice), reinterpret_cast<void**>(&dxgiDevice)))) {
|
||||
IDXGIAdapter* adapter = nullptr;
|
||||
if (SUCCEEDED(dxgiDevice->GetAdapter(&adapter))) {
|
||||
DXGI_ADAPTER_DESC desc;
|
||||
if (SUCCEEDED(adapter->GetDesc(&desc))) {
|
||||
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> conv;
|
||||
device_name = conv.to_bytes(desc.Description);
|
||||
}
|
||||
adapter->Release();
|
||||
}
|
||||
dxgiDevice->Release();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
#if defined(HAVE_VA) && (VA_MAJOR_VERSION >= 1)
|
||||
VADisplay display = ((AVVAAPIDeviceContext *) hw_device_ctx->hwctx)->display;
|
||||
if (display) {
|
||||
VADriverContext *va_ctx = ((VADisplayContext *) display)->pDriverContext;
|
||||
device_name = va_ctx->str_vendor;
|
||||
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
|
||||
// Workaround for issue fixed in MediaSDK 21.x https://github.com/Intel-Media-SDK/MediaSDK/issues/2595
|
||||
// Checks VAAPI driver for support of VideoProc operation required by MediaSDK
|
||||
ret = false;
|
||||
int n_entrypoints = va_ctx->max_entrypoints;
|
||||
std::vector<VAEntrypoint> entrypoints(n_entrypoints);
|
||||
if (va_ctx->vtable->vaQueryConfigEntrypoints(va_ctx, VAProfileNone, entrypoints.data(), &n_entrypoints) == VA_STATUS_SUCCESS) {
|
||||
for (int i = 0; i < n_entrypoints; i++) {
|
||||
if (entrypoints[i] == VAEntrypointVideoProc) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!ret)
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Skipping MFX video acceleration as entrypoint VideoProc not found in: " << device_name);
|
||||
}
|
||||
}
|
||||
#else
|
||||
ret = (hw_type != AV_HWDEVICE_TYPE_QSV); // disable MFX if we can't check VAAPI for VideoProc entrypoint
|
||||
#endif
|
||||
}
|
||||
if (ret && !device_subname.empty() && device_name.find(device_subname) == std::string::npos)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Skipping '" << hw_name <<
|
||||
"' video acceleration on the following device name as not matching substring '" << device_subname << "': " << device_name);
|
||||
ret = false; // reject configuration
|
||||
}
|
||||
if (ret)
|
||||
{
|
||||
if (!device_name.empty()) {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Using " << hw_name << " video acceleration on device: " << device_name);
|
||||
} else {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Using " << hw_name << " video acceleration");
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
AVBufferRef* hw_create_derived_context(AVHWDeviceType hw_type, AVBufferRef* hw_device_ctx) {
|
||||
AVBufferRef* derived_ctx = NULL;
|
||||
const char* hw_name = av_hwdevice_get_type_name(hw_type);
|
||||
int err = av_hwdevice_ctx_create_derived(&derived_ctx, hw_type, hw_device_ctx, 0);
|
||||
if (!derived_ctx || err < 0)
|
||||
{
|
||||
if (derived_ctx)
|
||||
av_buffer_unref(&derived_ctx);
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " << hw_name << ". Error=" << err);
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Store child context in 'user_opaque' field of parent context.
|
||||
struct FreeChildContext {
|
||||
static void free(struct AVHWDeviceContext* ctx) {
|
||||
AVBufferRef* child_ctx = (AVBufferRef*)ctx->user_opaque;
|
||||
if (child_ctx)
|
||||
av_buffer_unref(&child_ctx);
|
||||
}
|
||||
};
|
||||
AVHWDeviceContext* ctx = (AVHWDeviceContext*)derived_ctx->data;
|
||||
ctx->user_opaque = av_buffer_ref(hw_device_ctx);
|
||||
ctx->free = FreeChildContext::free;
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " << hw_name);
|
||||
return derived_ctx;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCL // GPU buffer interop with cv::UMat
|
||||
|
||||
// FFmpeg context attached to OpenCL context
|
||||
class OpenCL_FFMPEG_Context : public ocl::Context::UserContext {
|
||||
public:
|
||||
OpenCL_FFMPEG_Context(AVBufferRef* ctx) {
|
||||
ctx_ = av_buffer_ref(ctx);
|
||||
}
|
||||
virtual ~OpenCL_FFMPEG_Context() {
|
||||
av_buffer_unref(&ctx_);
|
||||
}
|
||||
AVBufferRef* GetAVHWDevice() {
|
||||
return ctx_;
|
||||
}
|
||||
private:
|
||||
AVBufferRef* ctx_;
|
||||
};
|
||||
|
||||
#ifdef HAVE_MFX
|
||||
static
|
||||
int hw_find_qsv_surface_index(AVFrame* hw_frame)
|
||||
{
|
||||
if (AV_PIX_FMT_QSV != hw_frame->format)
|
||||
return -1;
|
||||
mfxFrameSurface1* surface = (mfxFrameSurface1*)hw_frame->data[3]; // As defined by AV_PIX_FMT_QSV
|
||||
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
|
||||
AVQSVFramesContext* qsv_ctx = (AVQSVFramesContext*)frames_ctx->hwctx;
|
||||
for (int i = 0; i < qsv_ctx->nb_surfaces; i++) {
|
||||
if (surface == qsv_ctx->surfaces + i) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_VA
|
||||
static
|
||||
VADisplay hw_get_va_display(AVHWDeviceContext* hw_device_ctx)
|
||||
{
|
||||
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_QSV) { // we stored pointer to child context in 'user_opaque' field
|
||||
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
|
||||
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
|
||||
}
|
||||
if (hw_device_ctx && hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
return ((AVVAAPIDeviceContext*)hw_device_ctx->hwctx)->display;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
#endif // HAVE_VA
|
||||
|
||||
#ifdef HAVE_VA_INTEL
|
||||
static
|
||||
VASurfaceID hw_get_va_surface(AVFrame* hw_frame) {
|
||||
if (AV_PIX_FMT_VAAPI == hw_frame->format) {
|
||||
return (VASurfaceID)(size_t)hw_frame->data[3]; // As defined by AV_PIX_FMT_VAAPI
|
||||
}
|
||||
#ifdef HAVE_MFX
|
||||
else if (AV_PIX_FMT_QSV == hw_frame->format) {
|
||||
int frame_idx = hw_find_qsv_surface_index(hw_frame);
|
||||
if (frame_idx >= 0) { // frame index is same in parent (QSV) and child (VAAPI) frame context
|
||||
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
|
||||
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
|
||||
if (child_ctx && AV_HWDEVICE_TYPE_VAAPI == child_ctx->device_ctx->type) {
|
||||
AVVAAPIFramesContext *vaapi_ctx = (AVVAAPIFramesContext *) child_ctx->hwctx;
|
||||
CV_Assert(frame_idx < vaapi_ctx->nb_surfaces);
|
||||
return vaapi_ctx->surface_ids[frame_idx];
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // HAVE_MFX
|
||||
return VA_INVALID_SURFACE;
|
||||
}
|
||||
#endif // HAVE_VA_INTEL
|
||||
|
||||
#ifdef HAVE_D3D11
|
||||
static
|
||||
AVD3D11VADeviceContext* hw_get_d3d11_device_ctx(AVHWDeviceContext* hw_device_ctx) {
|
||||
if (AV_HWDEVICE_TYPE_QSV == hw_device_ctx->type) { // we stored pointer to child context in 'user_opaque' field
|
||||
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
|
||||
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
|
||||
}
|
||||
if (AV_HWDEVICE_TYPE_D3D11VA == hw_device_ctx->type) {
|
||||
return (AVD3D11VADeviceContext*)hw_device_ctx->hwctx;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ID3D11Texture2D* hw_get_d3d11_texture(AVFrame* hw_frame, int* subresource) {
|
||||
ID3D11Texture2D* texture = NULL;
|
||||
if (AV_PIX_FMT_D3D11 == hw_frame->format) {
|
||||
texture = (ID3D11Texture2D*)hw_frame->data[0]; // As defined by AV_PIX_FMT_D3D11
|
||||
*subresource = (intptr_t)hw_frame->data[1]; // As defined by AV_PIX_FMT_D3D11
|
||||
}
|
||||
#ifdef HAVE_MFX
|
||||
else if (AV_PIX_FMT_QSV == hw_frame->format) {
|
||||
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
|
||||
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
|
||||
if (child_ctx && AV_HWDEVICE_TYPE_D3D11VA == child_ctx->device_ctx->type) {
|
||||
texture = ((AVD3D11VAFramesContext*)child_ctx->hwctx)->texture;
|
||||
}
|
||||
*subresource = hw_find_qsv_surface_index(hw_frame);
|
||||
CV_Assert(*subresource >= 0);
|
||||
}
|
||||
#endif
|
||||
return texture;
|
||||
}
|
||||
|
||||
// In D3D11 case we allocate additional texture as single texture (not texture array) because
|
||||
// OpenCL interop with D3D11 doesn't support/work with NV12 sub-texture of texture array.
|
||||
ID3D11Texture2D* hw_get_d3d11_single_texture(AVFrame* hw_frame, AVD3D11VADeviceContext* d3d11_device_ctx, ID3D11Texture2D* texture) {
|
||||
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
|
||||
if (AV_HWDEVICE_TYPE_QSV == frames_ctx->device_ctx->type) {
|
||||
frames_ctx = (AVHWFramesContext*)frames_ctx->user_opaque; // we stored pointer to child context in 'user_opaque' field
|
||||
}
|
||||
if (!frames_ctx || AV_HWDEVICE_TYPE_D3D11VA != frames_ctx->device_ctx->type) {
|
||||
return NULL;
|
||||
}
|
||||
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)frames_ctx->user_opaque;
|
||||
if (!singleTexture && d3d11_device_ctx && texture) {
|
||||
D3D11_TEXTURE2D_DESC desc = {};
|
||||
texture->GetDesc(&desc);
|
||||
desc.ArraySize = 1;
|
||||
desc.BindFlags |= D3D11_BIND_SHADER_RESOURCE;
|
||||
desc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED;
|
||||
if (SUCCEEDED(d3d11_device_ctx->device->CreateTexture2D(&desc, NULL, &singleTexture))) {
|
||||
frames_ctx->user_opaque = singleTexture;
|
||||
}
|
||||
}
|
||||
return singleTexture;
|
||||
}
|
||||
#endif // HAVE_D3D11
|
||||
|
||||
static
|
||||
AVHWDeviceType hw_check_opencl_context(AVHWDeviceContext* ctx) {
|
||||
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
|
||||
if (!ctx || ocl_context.empty())
|
||||
return AV_HWDEVICE_TYPE_NONE;
|
||||
#ifdef HAVE_VA_INTEL
|
||||
VADisplay vadisplay_ocl = ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL);
|
||||
VADisplay vadisplay_ctx = hw_get_va_display(ctx);
|
||||
if (vadisplay_ocl && vadisplay_ocl == vadisplay_ctx)
|
||||
return AV_HWDEVICE_TYPE_VAAPI;
|
||||
#endif
|
||||
#ifdef HAVE_D3D11
|
||||
ID3D11Device* d3d11device_ocl = (ID3D11Device*)ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_D3D11_DEVICE_KHR);
|
||||
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(ctx);
|
||||
if (d3d11_device_ctx && d3d11device_ocl && d3d11_device_ctx->device == d3d11device_ocl)
|
||||
return AV_HWDEVICE_TYPE_D3D11VA;
|
||||
#endif
|
||||
return AV_HWDEVICE_TYPE_NONE;
|
||||
}
|
||||
|
||||
static
|
||||
void hw_init_opencl(AVBufferRef* ctx) {
|
||||
if (!ctx)
|
||||
return;
|
||||
AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data;
|
||||
if (!hw_device_ctx)
|
||||
return;
|
||||
#ifdef HAVE_VA_INTEL
|
||||
VADisplay va_display = hw_get_va_display(hw_device_ctx);
|
||||
if (va_display) {
|
||||
va_intel::ocl::initializeContextFromVA(va_display);
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_D3D11
|
||||
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
|
||||
if (d3d11_device_ctx) {
|
||||
directx::ocl::initializeContextFromD3D11Device(d3d11_device_ctx->device);
|
||||
}
|
||||
#endif
|
||||
if (hw_check_opencl_context(hw_device_ctx) != AV_HWDEVICE_TYPE_NONE) {
|
||||
// Attach AVHWDeviceContext to OpenCL context
|
||||
ocl::Context &ocl_context = ocl::OpenCLExecutionContext::getCurrent().getContext();
|
||||
ocl_context.setUserContext(std::make_shared<OpenCL_FFMPEG_Context>(ctx));
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
AVBufferRef* hw_create_context_from_opencl(ocl::OpenCLExecutionContext& ocl_context, AVHWDeviceType hw_type) {
|
||||
if (ocl_context.empty())
|
||||
return NULL;
|
||||
auto ocl_ffmpeg_context = ocl_context.getContext().getUserContext<OpenCL_FFMPEG_Context>();
|
||||
if (!ocl_ffmpeg_context)
|
||||
return NULL;
|
||||
AVBufferRef* ctx = ocl_ffmpeg_context->GetAVHWDevice();
|
||||
if (hw_type != ((AVHWDeviceContext*)ctx->data)->type) {
|
||||
ctx = hw_create_derived_context(hw_type, ctx);
|
||||
}
|
||||
else {
|
||||
ctx = av_buffer_ref(ctx);
|
||||
}
|
||||
if (ctx)
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Using " << av_hwdevice_get_type_name(hw_type) << " video acceleration context attached to OpenCL context");
|
||||
return ctx;
|
||||
}
|
||||
|
||||
#endif // HAVE_OPENCL
|
||||
|
||||
static
|
||||
AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl) {
|
||||
AVBufferRef* hw_device_ctx = NULL;
|
||||
if (AV_HWDEVICE_TYPE_NONE == hw_type)
|
||||
return NULL;
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
// Check if OpenCL context has AVHWDeviceContext attached to it
|
||||
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
|
||||
try {
|
||||
hw_device_ctx = hw_create_context_from_opencl(ocl_context, hw_type);
|
||||
if (hw_device_ctx) {
|
||||
if (hw_device >= 0)
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO/FFMPEG: ignoring property HW_DEVICE as device context already created and attached to OpenCL context");
|
||||
return hw_device_ctx;
|
||||
}
|
||||
}
|
||||
catch (...) {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Exception creating Video Acceleration context using current OpenCL context");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Create new media context. In QSV case, first create 'child' context.
|
||||
std::vector<AVHWDeviceType> child_types = { hw_type };
|
||||
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
|
||||
#ifdef _WIN32
|
||||
child_types = { AV_HWDEVICE_TYPE_D3D11VA, AV_HWDEVICE_TYPE_DXVA2 };
|
||||
#else
|
||||
child_types = { AV_HWDEVICE_TYPE_VAAPI };
|
||||
#endif
|
||||
}
|
||||
for (AVHWDeviceType child_type : child_types) {
|
||||
char device[128] = "";
|
||||
char* pdevice = NULL;
|
||||
if (hw_device >= 0 && hw_device < 100000) {
|
||||
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
snprintf(device, sizeof(device), "/dev/dri/renderD%d", 128 + hw_device);
|
||||
}
|
||||
else {
|
||||
snprintf(device, sizeof(device), "%d", hw_device);
|
||||
}
|
||||
pdevice = device;
|
||||
}
|
||||
const char* hw_child_name = av_hwdevice_get_type_name(child_type);
|
||||
const char* device_name = pdevice ? pdevice : "'default'";
|
||||
int err = av_hwdevice_ctx_create(&hw_device_ctx, child_type, pdevice, NULL, 0);
|
||||
if (hw_device_ctx && err >= 0)
|
||||
{
|
||||
if (!hw_check_device(hw_device_ctx, hw_type, device_subname)) {
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
continue;
|
||||
}
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " << hw_child_name << " on device " << device_name);
|
||||
#ifdef HAVE_OPENCL
|
||||
// if OpenCL context not created yet or property HW_ACCELERATION_USE_OPENCL set, create OpenCL context with binding to video acceleration context
|
||||
if (ocl::haveOpenCL()) {
|
||||
if (ocl_context.empty() || use_opencl) {
|
||||
try {
|
||||
hw_init_opencl(hw_device_ctx);
|
||||
ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
|
||||
if (!ocl_context.empty()) {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Created OpenCL context with " << hw_child_name <<
|
||||
" video acceleration on OpenCL device: " << ocl_context.getDevice().name());
|
||||
}
|
||||
} catch (...) {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Exception creating OpenCL context with " << hw_child_name << " video acceleration");
|
||||
}
|
||||
}
|
||||
else {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Can't bind " << hw_child_name << " video acceleration context to already created OpenCL context");
|
||||
}
|
||||
}
|
||||
#else
|
||||
CV_UNUSED(use_opencl);
|
||||
#endif
|
||||
if (hw_type != child_type) {
|
||||
AVBufferRef* derived_ctx = hw_create_derived_context(hw_type, hw_device_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
return derived_ctx;
|
||||
} else {
|
||||
return hw_device_ctx;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const char* hw_name = hw_child_name;
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Failed to create " << hw_name << " video acceleration (av_hwdevice_ctx_create) on device " << device_name);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static
|
||||
AVBufferRef* hw_create_frames(struct AVCodecContext* codec_ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format)
|
||||
{
|
||||
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hw_device_ctx->data;
|
||||
AVBufferRef* child_ctx = hw_device_ctx;
|
||||
// In QSV case we first allocate child D3D11/VAAPI frames (except DXVA2 as no OpenCL interop), then derive to parent QSV frames
|
||||
if (AV_HWDEVICE_TYPE_QSV == device_ctx->type) {
|
||||
AVBufferRef *ctx = (AVBufferRef *) device_ctx->user_opaque; // child context stored during creation of derived context
|
||||
if (ctx && AV_HWDEVICE_TYPE_DXVA2 != ((AVHWDeviceContext *) ctx->data)->type) {
|
||||
child_ctx = ctx;
|
||||
}
|
||||
}
|
||||
AVBufferRef *hw_frames_ref = nullptr;
|
||||
if (codec_ctx)
|
||||
{
|
||||
int res = avcodec_get_hw_frames_parameters(codec_ctx, child_ctx, hw_format, &hw_frames_ref);
|
||||
if (res < 0)
|
||||
{
|
||||
CV_LOG_DEBUG(NULL, "FFMPEG: avcodec_get_hw_frames_parameters() call failed: " << res)
|
||||
}
|
||||
}
|
||||
if (!hw_frames_ref)
|
||||
{
|
||||
hw_frames_ref = av_hwframe_ctx_alloc(child_ctx);
|
||||
}
|
||||
if (!hw_frames_ref)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Failed to create HW frame context (av_hwframe_ctx_alloc)");
|
||||
return NULL;
|
||||
}
|
||||
AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
|
||||
frames_ctx->width = width;
|
||||
frames_ctx->height = height;
|
||||
if (frames_ctx->format == AV_PIX_FMT_NONE) {
|
||||
if (child_ctx == hw_device_ctx) {
|
||||
frames_ctx->format = hw_format;
|
||||
}
|
||||
else {
|
||||
AVHWFramesConstraints* constraints = av_hwdevice_get_hwframe_constraints(child_ctx, NULL);
|
||||
if (constraints) {
|
||||
frames_ctx->format = constraints->valid_hw_formats[0];
|
||||
av_hwframe_constraints_free(&constraints);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (frames_ctx->sw_format == AV_PIX_FMT_NONE)
|
||||
frames_ctx->sw_format = HW_DEFAULT_SW_FORMAT;
|
||||
if (frames_ctx->initial_pool_size == 0)
|
||||
frames_ctx->initial_pool_size = HW_DEFAULT_POOL_SIZE;
|
||||
|
||||
#ifdef HAVE_D3D11
|
||||
if (frames_ctx->device_ctx && AV_HWDEVICE_TYPE_D3D11VA == frames_ctx->device_ctx->type) {
|
||||
// BindFlags
|
||||
AVD3D11VAFramesContext* frames_hwctx = (AVD3D11VAFramesContext*)frames_ctx->hwctx;
|
||||
frames_hwctx->BindFlags |= D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER;
|
||||
// See function hw_get_d3d11_single_texture(), it allocates additional ID3D11Texture2D texture and
|
||||
// attaches it as 'user_opaque' field. We have to set free() callback before av_hwframe_ctx_init() call.
|
||||
struct D3D11SingleTexture {
|
||||
static void free(struct AVHWFramesContext* ctx) {
|
||||
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)ctx->user_opaque;
|
||||
if (ctx->user_opaque)
|
||||
singleTexture->Release();
|
||||
}
|
||||
};
|
||||
frames_ctx->free = D3D11SingleTexture::free;
|
||||
}
|
||||
#endif
|
||||
|
||||
int res = av_hwframe_ctx_init(hw_frames_ref);
|
||||
if (res < 0)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Failed to initialize HW frame context (av_hwframe_ctx_init): " << res);
|
||||
av_buffer_unref(&hw_frames_ref);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (child_ctx != hw_device_ctx) {
|
||||
AVBufferRef* derived_frame_ctx = NULL;
|
||||
int flags = AV_HWFRAME_MAP_READ | AV_HWFRAME_MAP_WRITE;
|
||||
res = av_hwframe_ctx_create_derived(&derived_frame_ctx, hw_format, hw_device_ctx, hw_frames_ref, flags);
|
||||
av_buffer_unref(&hw_frames_ref);
|
||||
if (res < 0)
|
||||
{
|
||||
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived HW frame context (av_hwframe_ctx_create_derived): " << res);
|
||||
return NULL;
|
||||
}
|
||||
else {
|
||||
((AVHWFramesContext*)derived_frame_ctx->data)->user_opaque = frames_ctx;
|
||||
return derived_frame_ctx;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return hw_frames_ref;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
bool hw_check_codec(AVCodec* codec, AVHWDeviceType hw_type, const char *disabled_codecs)
|
||||
{
|
||||
CV_Assert(disabled_codecs);
|
||||
std::string hw_name = std::string(".") + av_hwdevice_get_type_name(hw_type);
|
||||
std::stringstream s_stream(disabled_codecs);
|
||||
while (s_stream.good()) {
|
||||
std::string name;
|
||||
getline(s_stream, name, ',');
|
||||
if (name == codec->name || name == hw_name || name == codec->name + hw_name || name == "hw") {
|
||||
CV_LOG_INFO(NULL, "FFMPEG: skipping codec " << codec->name << hw_name);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *), const char *disabled_codecs, AVPixelFormat *hw_pix_fmt) {
|
||||
AVCodec *c = 0;
|
||||
void *opaque = 0;
|
||||
|
||||
while (NULL != (c = (AVCodec*)av_codec_iterate(&opaque)))
|
||||
{
|
||||
if (!check_category(c))
|
||||
continue;
|
||||
if (c->id != id)
|
||||
continue;
|
||||
if (c->capabilities & AV_CODEC_CAP_EXPERIMENTAL)
|
||||
continue;
|
||||
if (hw_type != AV_HWDEVICE_TYPE_NONE) {
|
||||
AVPixelFormat hw_native_fmt = AV_PIX_FMT_NONE;
|
||||
#if LIBAVUTIL_BUILD < AV_VERSION_INT(56, 51, 100) // VAAPI encoders support avcodec_get_hw_config() starting ffmpeg 4.3
|
||||
if (hw_type == AV_HWDEVICE_TYPE_VAAPI)
|
||||
hw_native_fmt = AV_PIX_FMT_VAAPI_VLD;
|
||||
#endif
|
||||
if (hw_type == AV_HWDEVICE_TYPE_CUDA) // CUDA encoders don't support avcodec_get_hw_config()
|
||||
hw_native_fmt = AV_PIX_FMT_CUDA;
|
||||
if (av_codec_is_encoder(c) && hw_native_fmt != AV_PIX_FMT_NONE && c->pix_fmts) {
|
||||
for (int i = 0; c->pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
|
||||
if (c->pix_fmts[i] == hw_native_fmt) {
|
||||
*hw_pix_fmt = hw_native_fmt;
|
||||
if (hw_check_codec(c, hw_type, disabled_codecs))
|
||||
return c;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig *hw_config = avcodec_get_hw_config(c, i);
|
||||
if (!hw_config)
|
||||
break;
|
||||
if (hw_config->device_type == hw_type) {
|
||||
*hw_pix_fmt = hw_config->pix_fmt;
|
||||
if (hw_check_codec(c, hw_type, disabled_codecs))
|
||||
return c;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Callback to select hardware pixel format (not software format) and allocate frame pool (hw_frames_ctx)
|
||||
static
|
||||
AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt) {
|
||||
if (!ctx->hw_device_ctx)
|
||||
return fmt[0];
|
||||
AVHWDeviceType hw_type = ((AVHWDeviceContext*)ctx->hw_device_ctx->data)->type;
|
||||
for (int j = 0;; j++) {
|
||||
const AVCodecHWConfig *hw_config = avcodec_get_hw_config(ctx->codec, j);
|
||||
if (!hw_config)
|
||||
break;
|
||||
if (hw_config->device_type == hw_type) {
|
||||
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
||||
if (fmt[i] == hw_config->pix_fmt) {
|
||||
if (hw_config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) {
|
||||
ctx->sw_pix_fmt = HW_DEFAULT_SW_FORMAT;
|
||||
ctx->hw_frames_ctx = hw_create_frames(ctx, ctx->hw_device_ctx, ctx->width, ctx->height, fmt[i]);
|
||||
if (ctx->hw_frames_ctx) {
|
||||
//ctx->sw_pix_fmt = ((AVHWFramesContext *)(ctx->hw_frames_ctx->data))->sw_format;
|
||||
return fmt[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, "FFMPEG: Can't select HW format in 'get_format()' callback, use default");
|
||||
return fmt[0];
|
||||
}
|
||||
|
||||
// GPU color conversion NV12->BGRA via OpenCL extensions
|
||||
static bool
|
||||
hw_copy_frame_to_umat(AVBufferRef* ctx, AVFrame* hw_frame, cv::OutputArray output) {
|
||||
CV_UNUSED(hw_frame);
|
||||
CV_UNUSED(output);
|
||||
if (!ctx)
|
||||
return false;
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
try {
|
||||
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
|
||||
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
|
||||
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
|
||||
if (child_type == AV_HWDEVICE_TYPE_NONE)
|
||||
return false;
|
||||
|
||||
#ifdef HAVE_VA_INTEL
|
||||
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
VADisplay va_display = hw_get_va_display(hw_device_ctx);
|
||||
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
|
||||
if (va_display && va_surface != VA_INVALID_SURFACE) {
|
||||
va_intel::convertFromVASurface(va_display, va_surface, {hw_frame->width, hw_frame->height}, output);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_D3D11
|
||||
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
|
||||
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
|
||||
int subresource = 0;
|
||||
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
|
||||
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
|
||||
if (texture && singleTexture) {
|
||||
// Copy D3D11 sub-texture to D3D11 single texture
|
||||
d3d11_device_ctx->device_context->CopySubresourceRegion(singleTexture, 0, 0, 0, 0, texture, subresource, NULL);
|
||||
// Copy D3D11 single texture to cv::UMat
|
||||
directx::convertFromD3D11Texture2D(singleTexture, output);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif // HAVE_OPENCL
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// GPU color conversion BGRA->NV12 via OpenCL extensions
|
||||
static bool
|
||||
hw_copy_umat_to_frame(AVBufferRef* ctx, cv::InputArray input, AVFrame* hw_frame) {
|
||||
CV_UNUSED(input);
|
||||
CV_UNUSED(hw_frame);
|
||||
if (!ctx)
|
||||
return false;
|
||||
|
||||
#ifdef HAVE_OPENCL
|
||||
try {
|
||||
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
|
||||
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
|
||||
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
|
||||
if (child_type == AV_HWDEVICE_TYPE_NONE)
|
||||
return false;
|
||||
|
||||
#ifdef HAVE_VA_INTEL
|
||||
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||
VADisplay va_display = hw_get_va_display(hw_device_ctx);
|
||||
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
|
||||
if (va_display != NULL && va_surface != VA_INVALID_SURFACE) {
|
||||
va_intel::convertToVASurface(va_display, input, va_surface, {hw_frame->width, hw_frame->height});
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_D3D11
|
||||
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
|
||||
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
|
||||
int subresource = 0;
|
||||
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
|
||||
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
|
||||
if (texture && singleTexture) {
|
||||
// Copy cv::UMat to D3D11 single texture
|
||||
directx::convertToD3D11Texture2D(input, singleTexture);
|
||||
// Copy D3D11 single texture to D3D11 sub-texture
|
||||
d3d11_device_ctx->device_context->CopySubresourceRegion(texture, subresource, 0, 0, 0, singleTexture, 0, NULL);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif // HAVE_OPENCL
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static
|
||||
VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type) {
|
||||
struct HWTypeFFMPEG {
|
||||
AVHWDeviceType hw_type;
|
||||
VideoAccelerationType va_type;
|
||||
} known_hw_types[] = {
|
||||
{ AV_HWDEVICE_TYPE_D3D11VA, VIDEO_ACCELERATION_D3D11 },
|
||||
{ AV_HWDEVICE_TYPE_VAAPI, VIDEO_ACCELERATION_VAAPI },
|
||||
{ AV_HWDEVICE_TYPE_QSV, VIDEO_ACCELERATION_MFX },
|
||||
{ AV_HWDEVICE_TYPE_CUDA, (VideoAccelerationType)(1 << 11) },
|
||||
};
|
||||
for (const HWTypeFFMPEG& hw : known_hw_types) {
|
||||
if (hw_type == hw.hw_type)
|
||||
return hw.va_type;
|
||||
}
|
||||
return VIDEO_ACCELERATION_NONE;
|
||||
}
|
||||
|
||||
class HWAccelIterator {
|
||||
public:
|
||||
HWAccelIterator(VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict)
|
||||
: hw_type_(AV_HWDEVICE_TYPE_NONE)
|
||||
{
|
||||
std::string accel_list;
|
||||
if (va_type != VIDEO_ACCELERATION_NONE)
|
||||
{
|
||||
updateAccelList_(accel_list, va_type, isEncoder, dict);
|
||||
}
|
||||
if (va_type == VIDEO_ACCELERATION_ANY)
|
||||
{
|
||||
if (!accel_list.empty())
|
||||
accel_list += ","; // add no-acceleration case to the end of the list
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, "FFMPEG: allowed acceleration types (" << getVideoAccelerationName(va_type) << "): '" << accel_list << "'");
|
||||
|
||||
if (accel_list.empty() && va_type != VIDEO_ACCELERATION_NONE && va_type != VIDEO_ACCELERATION_ANY)
|
||||
{
|
||||
// broke stream
|
||||
std::string tmp;
|
||||
s_stream_ >> tmp;
|
||||
}
|
||||
else
|
||||
{
|
||||
s_stream_ = std::istringstream(accel_list);
|
||||
}
|
||||
|
||||
if (va_type != VIDEO_ACCELERATION_NONE)
|
||||
{
|
||||
disabled_codecs_ = isEncoder
|
||||
? getEncoderDisabledCodecs(dict)
|
||||
: getDecoderDisabledCodecs(dict);
|
||||
CV_LOG_DEBUG(NULL, "FFMPEG: disabled codecs: '" << disabled_codecs_ << "'");
|
||||
}
|
||||
}
|
||||
bool good() const
|
||||
{
|
||||
return s_stream_.good();
|
||||
}
|
||||
void parse_next()
|
||||
{
|
||||
getline(s_stream_, hw_type_device_string_, ',');
|
||||
size_t index = hw_type_device_string_.find('.');
|
||||
if (index != std::string::npos) {
|
||||
device_subname_ = hw_type_device_string_.substr(index + 1);
|
||||
hw_type_string_ = hw_type_device_string_.substr(0, index);
|
||||
} else {
|
||||
device_subname_.clear();
|
||||
hw_type_string_ = hw_type_device_string_;
|
||||
}
|
||||
hw_type_ = av_hwdevice_find_type_by_name(hw_type_string_.c_str());
|
||||
}
|
||||
const std::string& hw_type_device_string() const { return hw_type_device_string_; }
|
||||
const std::string& hw_type_string() const { return hw_type_string_; }
|
||||
AVHWDeviceType hw_type() const { return hw_type_; }
|
||||
const std::string& device_subname() const { return device_subname_; }
|
||||
const std::string& disabled_codecs() const { return disabled_codecs_; }
|
||||
private:
|
||||
bool updateAccelList_(std::string& accel_list, VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict)
|
||||
{
|
||||
std::string new_accels = isEncoder
|
||||
? getEncoderConfiguration(va_type, dict)
|
||||
: getDecoderConfiguration(va_type, dict);
|
||||
if (new_accels.empty())
|
||||
return false;
|
||||
if (accel_list.empty())
|
||||
accel_list = new_accels;
|
||||
else
|
||||
accel_list = accel_list + "," + new_accels;
|
||||
return true;
|
||||
}
|
||||
std::istringstream s_stream_;
|
||||
std::string hw_type_device_string_;
|
||||
std::string hw_type_string_;
|
||||
AVHWDeviceType hw_type_;
|
||||
std::string device_subname_;
|
||||
|
||||
std::string disabled_codecs_;
|
||||
};
|
||||
2966
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_impl.hpp
vendored
Normal file
2966
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_impl.hpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
46
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_legacy_api.hpp
vendored
Normal file
46
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ffmpeg_legacy_api.hpp
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
#ifndef __OPENCV_FFMPEG_LEGACY_API_H__
|
||||
#define __OPENCV_FFMPEG_LEGACY_API_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#ifndef OPENCV_FFMPEG_API
|
||||
#if defined(__OPENCV_BUILD)
|
||||
# define OPENCV_FFMPEG_API
|
||||
#elif defined _WIN32
|
||||
# define OPENCV_FFMPEG_API __declspec(dllexport)
|
||||
#elif defined __GNUC__ && __GNUC__ >= 4
|
||||
# define OPENCV_FFMPEG_API __attribute__ ((visibility ("default")))
|
||||
#else
|
||||
# define OPENCV_FFMPEG_API
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct CvCapture_FFMPEG CvCapture_FFMPEG;
|
||||
typedef struct CvVideoWriter_FFMPEG CvVideoWriter_FFMPEG;
|
||||
|
||||
//OPENCV_FFMPEG_API struct CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG(const char* filename);
|
||||
OPENCV_FFMPEG_API int cvSetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap,
|
||||
int prop, double value);
|
||||
OPENCV_FFMPEG_API double cvGetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap, int prop);
|
||||
OPENCV_FFMPEG_API int cvGrabFrame_FFMPEG(struct CvCapture_FFMPEG* cap);
|
||||
OPENCV_FFMPEG_API int cvRetrieveFrame_FFMPEG(struct CvCapture_FFMPEG* capture, unsigned char** data,
|
||||
int* step, int* width, int* height, int* cn);
|
||||
OPENCV_FFMPEG_API void cvReleaseCapture_FFMPEG(struct CvCapture_FFMPEG** cap);
|
||||
|
||||
OPENCV_FFMPEG_API struct CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG(const char* filename,
|
||||
int fourcc, double fps, int width, int height, int isColor );
|
||||
OPENCV_FFMPEG_API int cvWriteFrame_FFMPEG(struct CvVideoWriter_FFMPEG* writer, const unsigned char* data,
|
||||
int step, int width, int height, int cn, int origin);
|
||||
OPENCV_FFMPEG_API void cvReleaseVideoWriter_FFMPEG(struct CvVideoWriter_FFMPEG** writer);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __OPENCV_FFMPEG_LEGACY_API_H__
|
||||
1222
3rdparty/opencv-4.5.4/modules/videoio/src/cap_gphoto2.cpp
vendored
Normal file
1222
3rdparty/opencv-4.5.4/modules/videoio/src/cap_gphoto2.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2420
3rdparty/opencv-4.5.4/modules/videoio/src/cap_gstreamer.cpp
vendored
Normal file
2420
3rdparty/opencv-4.5.4/modules/videoio/src/cap_gstreamer.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
452
3rdparty/opencv-4.5.4/modules/videoio/src/cap_images.cpp
vendored
Normal file
452
3rdparty/opencv-4.5.4/modules/videoio/src/cap_images.cpp
vendored
Normal file
@@ -0,0 +1,452 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2008, Nils Hasler, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
// Author: Nils Hasler <hasler@mpi-inf.mpg.de>
|
||||
//
|
||||
// Max-Planck-Institut Informatik
|
||||
|
||||
//
|
||||
// capture video from a sequence of images
|
||||
// the filename when opening can either be a printf pattern such as
|
||||
// video%04d.png or the first frame of the sequence i.e. video0001.png
|
||||
//
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
|
||||
#include "opencv2/core/utils/filesystem.hpp"
|
||||
|
||||
#if 0
|
||||
#define CV_WARN(message)
|
||||
#else
|
||||
#define CV_WARN(message) CV_LOG_INFO(NULL, "CAP_IMAGES warning: %s (%s:%d)" << message)
|
||||
#endif
|
||||
|
||||
namespace cv {
|
||||
|
||||
class CvCapture_Images: public IVideoCapture
|
||||
{
|
||||
public:
|
||||
void init()
|
||||
{
|
||||
filename_pattern.clear();
|
||||
frame.release();
|
||||
currentframe = firstframe = 0;
|
||||
length = 0;
|
||||
grabbedInOpen = false;
|
||||
}
|
||||
CvCapture_Images()
|
||||
{
|
||||
init();
|
||||
}
|
||||
CvCapture_Images(const String& _filename)
|
||||
{
|
||||
init();
|
||||
open(_filename);
|
||||
}
|
||||
|
||||
virtual ~CvCapture_Images() CV_OVERRIDE
|
||||
{
|
||||
close();
|
||||
}
|
||||
virtual double getProperty(int) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE;
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
|
||||
virtual bool isOpened() const CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_IMAGES; }
|
||||
|
||||
bool open(const String&);
|
||||
void close();
|
||||
protected:
|
||||
std::string filename_pattern; // actually a printf-pattern
|
||||
unsigned currentframe;
|
||||
unsigned firstframe; // number of first frame
|
||||
unsigned length; // length of sequence
|
||||
|
||||
Mat frame;
|
||||
bool grabbedInOpen;
|
||||
};
|
||||
|
||||
void CvCapture_Images::close()
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
bool CvCapture_Images::grabFrame()
|
||||
{
|
||||
cv::String filename = cv::format(filename_pattern.c_str(), (int)(firstframe + currentframe));
|
||||
CV_Assert(!filename.empty());
|
||||
|
||||
if (grabbedInOpen)
|
||||
{
|
||||
grabbedInOpen = false;
|
||||
++currentframe;
|
||||
|
||||
return !frame.empty();
|
||||
}
|
||||
|
||||
frame = imread(filename, IMREAD_UNCHANGED);
|
||||
if( !frame.empty() )
|
||||
currentframe++;
|
||||
|
||||
return !frame.empty();
|
||||
}
|
||||
|
||||
bool CvCapture_Images::retrieveFrame(int, OutputArray out)
|
||||
{
|
||||
frame.copyTo(out);
|
||||
return grabbedInOpen ? false : !frame.empty();
|
||||
}
|
||||
|
||||
|
||||
double CvCapture_Images::getProperty(int id) const
|
||||
{
|
||||
switch(id)
|
||||
{
|
||||
case CV_CAP_PROP_POS_MSEC:
|
||||
CV_WARN("collections of images don't have framerates");
|
||||
return 0;
|
||||
case CV_CAP_PROP_POS_FRAMES:
|
||||
return currentframe;
|
||||
case CV_CAP_PROP_FRAME_COUNT:
|
||||
return length;
|
||||
case CV_CAP_PROP_POS_AVI_RATIO:
|
||||
return (double)currentframe / (double)(length - 1);
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
return frame.cols;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
return frame.rows;
|
||||
case CV_CAP_PROP_FPS:
|
||||
CV_WARN("collections of images don't have framerates");
|
||||
return 1;
|
||||
case CV_CAP_PROP_FOURCC:
|
||||
CV_WARN("collections of images don't have 4-character codes");
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool CvCapture_Images::setProperty(int id, double value)
|
||||
{
|
||||
switch(id)
|
||||
{
|
||||
case CV_CAP_PROP_POS_MSEC:
|
||||
case CV_CAP_PROP_POS_FRAMES:
|
||||
if(value < 0) {
|
||||
CV_WARN("seeking to negative positions does not work - clamping");
|
||||
value = 0;
|
||||
}
|
||||
if(value >= length) {
|
||||
CV_WARN("seeking beyond end of sequence - clamping");
|
||||
value = length - 1;
|
||||
}
|
||||
currentframe = cvRound(value);
|
||||
if (currentframe != 0)
|
||||
grabbedInOpen = false; // grabbed frame is not valid anymore
|
||||
return true;
|
||||
case CV_CAP_PROP_POS_AVI_RATIO:
|
||||
if(value > 1) {
|
||||
CV_WARN("seeking beyond end of sequence - clamping");
|
||||
value = 1;
|
||||
} else if(value < 0) {
|
||||
CV_WARN("seeking to negative positions does not work - clamping");
|
||||
value = 0;
|
||||
}
|
||||
currentframe = cvRound((length - 1) * value);
|
||||
if (currentframe != 0)
|
||||
grabbedInOpen = false; // grabbed frame is not valid anymore
|
||||
return true;
|
||||
}
|
||||
CV_WARN("unknown/unhandled property");
|
||||
return false;
|
||||
}
|
||||
|
||||
static
|
||||
std::string icvExtractPattern(const std::string& filename, unsigned *offset)
|
||||
{
|
||||
size_t len = filename.size();
|
||||
CV_Assert(!filename.empty());
|
||||
CV_Assert(offset);
|
||||
|
||||
*offset = 0;
|
||||
|
||||
// check whether this is a valid image sequence filename
|
||||
std::string::size_type pos = filename.find('%');
|
||||
if (pos != std::string::npos)
|
||||
{
|
||||
pos++; CV_Assert(pos < len);
|
||||
if (filename[pos] == '0') // optional zero prefix
|
||||
{
|
||||
pos++; CV_Assert(pos < len);
|
||||
}
|
||||
if (filename[pos] >= '1' && filename[pos] <= '9') // optional numeric size (1..9) (one symbol only)
|
||||
{
|
||||
pos++; CV_Assert(pos < len);
|
||||
}
|
||||
if (filename[pos] == 'd' || filename[pos] == 'u')
|
||||
{
|
||||
pos++;
|
||||
if (pos == len)
|
||||
return filename; // end of string '...%5d'
|
||||
CV_Assert(pos < len);
|
||||
if (filename.find('%', pos) == std::string::npos)
|
||||
return filename; // no more patterns
|
||||
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: invalid multiple patterns: %s", filename.c_str()));
|
||||
}
|
||||
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: error, expected '0?[1-9][du]' pattern, got: %s", filename.c_str()));
|
||||
}
|
||||
else // no pattern filename was given - extract the pattern
|
||||
{
|
||||
pos = filename.rfind('/');
|
||||
#ifdef _WIN32
|
||||
if (pos == std::string::npos)
|
||||
pos = filename.rfind('\\');
|
||||
#endif
|
||||
if (pos != std::string::npos)
|
||||
pos++;
|
||||
else
|
||||
pos = 0;
|
||||
|
||||
while (pos < len && !isdigit(filename[pos])) pos++;
|
||||
|
||||
if (pos == len)
|
||||
{
|
||||
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: can't find starting number (in the name of file): %s", filename.c_str()));
|
||||
}
|
||||
|
||||
std::string::size_type pos0 = pos;
|
||||
|
||||
const int64_t max_number = 1000000000;
|
||||
CV_Assert(max_number < INT_MAX); // offset is 'int'
|
||||
|
||||
int number_str_size = 0;
|
||||
uint64_t number = 0;
|
||||
while (pos < len && isdigit(filename[pos]))
|
||||
{
|
||||
char ch = filename[pos];
|
||||
number = (number * 10) + (uint64_t)((int)ch - (int)'0');
|
||||
CV_Assert(number < max_number);
|
||||
number_str_size++;
|
||||
CV_Assert(number_str_size <= 64); // don't allow huge zero prefixes
|
||||
pos++;
|
||||
}
|
||||
CV_Assert(number_str_size > 0);
|
||||
|
||||
*offset = (int)number;
|
||||
|
||||
std::string result;
|
||||
if (pos0 > 0)
|
||||
result += filename.substr(0, pos0);
|
||||
result += cv::format("%%0%dd", number_str_size);
|
||||
if (pos < len)
|
||||
result += filename.substr(pos);
|
||||
CV_LOG_INFO(NULL, "Pattern: " << result << " @ " << number);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool CvCapture_Images::open(const std::string& _filename)
|
||||
{
|
||||
unsigned offset = 0;
|
||||
close();
|
||||
|
||||
CV_Assert(!_filename.empty());
|
||||
filename_pattern = icvExtractPattern(_filename, &offset);
|
||||
CV_Assert(!filename_pattern.empty());
|
||||
|
||||
// determine the length of the sequence
|
||||
for (length = 0; ;)
|
||||
{
|
||||
cv::String filename = cv::format(filename_pattern.c_str(), (int)(offset + length));
|
||||
if (!utils::fs::exists(filename))
|
||||
{
|
||||
if (length == 0 && offset == 0) // allow starting with 0 or 1
|
||||
{
|
||||
offset++;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if(!haveImageReader(filename))
|
||||
{
|
||||
CV_LOG_INFO(NULL, "CAP_IMAGES: Stop scanning. Can't read image file: " << filename);
|
||||
break;
|
||||
}
|
||||
|
||||
length++;
|
||||
}
|
||||
|
||||
if (length == 0)
|
||||
{
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
|
||||
firstframe = offset;
|
||||
|
||||
// grab frame to enable properties retrieval
|
||||
bool grabRes = grabFrame();
|
||||
grabbedInOpen = true;
|
||||
currentframe = 0;
|
||||
|
||||
return grabRes;
|
||||
}
|
||||
|
||||
bool CvCapture_Images::isOpened() const
|
||||
{
|
||||
return !filename_pattern.empty();
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> create_Images_capture(const std::string &filename)
|
||||
{
|
||||
return makePtr<CvCapture_Images>(filename);
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// image sequence writer
|
||||
//
|
||||
//
|
||||
class CvVideoWriter_Images CV_FINAL : public CvVideoWriter
|
||||
{
|
||||
public:
|
||||
CvVideoWriter_Images()
|
||||
{
|
||||
filename_pattern.clear();
|
||||
currentframe = 0;
|
||||
}
|
||||
virtual ~CvVideoWriter_Images() { close(); }
|
||||
|
||||
virtual bool open( const char* _filename );
|
||||
virtual void close();
|
||||
virtual bool setProperty( int, double ); // FIXIT doesn't work: IVideoWriter interface only!
|
||||
virtual bool writeFrame( const IplImage* ) CV_OVERRIDE;
|
||||
|
||||
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_IMAGES; }
|
||||
protected:
|
||||
std::string filename_pattern;
|
||||
unsigned currentframe;
|
||||
std::vector<int> params;
|
||||
};
|
||||
|
||||
bool CvVideoWriter_Images::writeFrame( const IplImage* image )
|
||||
{
|
||||
CV_Assert(!filename_pattern.empty());
|
||||
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
|
||||
CV_Assert(!filename.empty());
|
||||
|
||||
std::vector<int> image_params = params;
|
||||
image_params.push_back(0); // append parameters 'stop' mark
|
||||
image_params.push_back(0);
|
||||
|
||||
cv::Mat img = cv::cvarrToMat(image);
|
||||
bool ret = cv::imwrite(filename, img, image_params);
|
||||
|
||||
currentframe++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void CvVideoWriter_Images::close()
|
||||
{
|
||||
filename_pattern.clear();
|
||||
currentframe = 0;
|
||||
params.clear();
|
||||
}
|
||||
|
||||
|
||||
bool CvVideoWriter_Images::open( const char* _filename )
|
||||
{
|
||||
unsigned offset = 0;
|
||||
close();
|
||||
|
||||
CV_Assert(_filename);
|
||||
filename_pattern = icvExtractPattern(_filename, &offset);
|
||||
CV_Assert(!filename_pattern.empty());
|
||||
|
||||
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
|
||||
if (!cv::haveImageWriter(filename))
|
||||
{
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
|
||||
currentframe = offset;
|
||||
params.clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool CvVideoWriter_Images::setProperty( int id, double value )
|
||||
{
|
||||
if (id >= cv::CAP_PROP_IMAGES_BASE && id < cv::CAP_PROP_IMAGES_LAST)
|
||||
{
|
||||
params.push_back( id - cv::CAP_PROP_IMAGES_BASE );
|
||||
params.push_back( static_cast<int>( value ) );
|
||||
return true;
|
||||
}
|
||||
return false; // not supported
|
||||
}
|
||||
|
||||
Ptr<IVideoWriter> create_Images_writer(const std::string &filename, int, double, const Size &,
|
||||
const cv::VideoWriterParameters&)
|
||||
{
|
||||
CvVideoWriter_Images *writer = new CvVideoWriter_Images;
|
||||
|
||||
try
|
||||
{
|
||||
if( writer->open( filename.c_str() ))
|
||||
return makePtr<LegacyWriter>(writer);
|
||||
delete writer;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
delete writer;
|
||||
throw;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // cv::
|
||||
436
3rdparty/opencv-4.5.4/modules/videoio/src/cap_interface.hpp
vendored
Normal file
436
3rdparty/opencv-4.5.4/modules/videoio/src/cap_interface.hpp
vendored
Normal file
@@ -0,0 +1,436 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef CAP_INTERFACE_HPP
|
||||
#define CAP_INTERFACE_HPP
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
|
||||
//===================================================
|
||||
|
||||
// Legacy structs
|
||||
|
||||
struct CvCapture
|
||||
{
|
||||
virtual ~CvCapture() {}
|
||||
virtual double getProperty(int) const { return 0; }
|
||||
virtual bool setProperty(int, double) { return 0; }
|
||||
virtual bool grabFrame() { return true; }
|
||||
virtual IplImage* retrieveFrame(int) { return 0; }
|
||||
virtual int getCaptureDomain() { return cv::CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
|
||||
};
|
||||
|
||||
struct CvVideoWriter
|
||||
{
|
||||
virtual ~CvVideoWriter() {}
|
||||
virtual bool writeFrame(const IplImage*) { return false; }
|
||||
virtual int getCaptureDomain() const { return cv::CAP_ANY; } // Return the type of the capture object: CAP_FFMPEG, etc...
|
||||
virtual double getProperty(int) const { return 0; }
|
||||
};
|
||||
|
||||
//===================================================
|
||||
|
||||
// Modern classes
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace
|
||||
{
|
||||
template <class T>
|
||||
inline T castParameterTo(int paramValue)
|
||||
{
|
||||
return static_cast<T>(paramValue);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool castParameterTo(int paramValue)
|
||||
{
|
||||
return paramValue != 0;
|
||||
}
|
||||
}
|
||||
|
||||
class VideoParameters
|
||||
{
|
||||
public:
|
||||
struct VideoParameter {
|
||||
VideoParameter() = default;
|
||||
|
||||
VideoParameter(int key_, int value_) : key(key_), value(value_) {}
|
||||
|
||||
int key{-1};
|
||||
int value{-1};
|
||||
mutable bool isConsumed{false};
|
||||
};
|
||||
|
||||
VideoParameters() = default;
|
||||
|
||||
explicit VideoParameters(const std::vector<int>& params)
|
||||
{
|
||||
const auto count = params.size();
|
||||
if (count % 2 != 0)
|
||||
{
|
||||
CV_Error_(Error::StsVecLengthErr,
|
||||
("Vector of VideoWriter parameters should have even length"));
|
||||
}
|
||||
params_.reserve(count / 2);
|
||||
for (std::size_t i = 0; i < count; i += 2)
|
||||
{
|
||||
add(params[i], params[i + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
VideoParameters(int* params, unsigned n_params)
|
||||
{
|
||||
params_.reserve(n_params);
|
||||
for (unsigned i = 0; i < n_params; ++i)
|
||||
{
|
||||
add(params[2*i], params[2*i + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
void add(int key, int value)
|
||||
{
|
||||
params_.emplace_back(key, value);
|
||||
}
|
||||
|
||||
bool has(int key) const
|
||||
{
|
||||
auto it = std::find_if(params_.begin(), params_.end(),
|
||||
[key](const VideoParameter ¶m)
|
||||
{
|
||||
return param.key == key;
|
||||
}
|
||||
);
|
||||
return it != params_.end();
|
||||
}
|
||||
|
||||
template <class ValueType>
|
||||
ValueType get(int key) const
|
||||
{
|
||||
auto it = std::find_if(params_.begin(), params_.end(),
|
||||
[key](const VideoParameter ¶m)
|
||||
{
|
||||
return param.key == key;
|
||||
}
|
||||
);
|
||||
if (it != params_.end())
|
||||
{
|
||||
it->isConsumed = true;
|
||||
return castParameterTo<ValueType>(it->value);
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Error_(Error::StsBadArg, ("Missing value for parameter: [%d]", key));
|
||||
}
|
||||
}
|
||||
|
||||
template <class ValueType>
|
||||
ValueType get(int key, ValueType defaultValue) const
|
||||
{
|
||||
auto it = std::find_if(params_.begin(), params_.end(),
|
||||
[key](const VideoParameter ¶m)
|
||||
{
|
||||
return param.key == key;
|
||||
}
|
||||
);
|
||||
if (it != params_.end())
|
||||
{
|
||||
it->isConsumed = true;
|
||||
return castParameterTo<ValueType>(it->value);
|
||||
}
|
||||
else
|
||||
{
|
||||
return defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> getUnused() const
|
||||
{
|
||||
std::vector<int> unusedParams;
|
||||
for (const auto ¶m : params_)
|
||||
{
|
||||
if (!param.isConsumed)
|
||||
{
|
||||
unusedParams.push_back(param.key);
|
||||
}
|
||||
}
|
||||
return unusedParams;
|
||||
}
|
||||
|
||||
std::vector<int> getIntVector() const
|
||||
{
|
||||
std::vector<int> vint_params;
|
||||
for (const auto& param : params_)
|
||||
{
|
||||
vint_params.push_back(param.key);
|
||||
vint_params.push_back(param.value);
|
||||
}
|
||||
return vint_params;
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return params_.empty();
|
||||
}
|
||||
|
||||
bool warnUnusedParameters() const
|
||||
{
|
||||
bool found = false;
|
||||
for (const auto ¶m : params_)
|
||||
{
|
||||
if (!param.isConsumed)
|
||||
{
|
||||
found = true;
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: unused parameter: [" << param.key << "]=" <<
|
||||
cv::format("%lld / 0x%016llx", (long long)param.value, (long long)param.value));
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
std::vector<VideoParameter> params_;
|
||||
};
|
||||
|
||||
class VideoWriterParameters : public VideoParameters
|
||||
{
|
||||
public:
|
||||
using VideoParameters::VideoParameters; // reuse constructors
|
||||
};
|
||||
|
||||
class VideoCaptureParameters : public VideoParameters
|
||||
{
|
||||
public:
|
||||
using VideoParameters::VideoParameters; // reuse constructors
|
||||
};
|
||||
|
||||
class IVideoCapture
|
||||
{
|
||||
public:
|
||||
virtual ~IVideoCapture() {}
|
||||
virtual double getProperty(int) const { return 0; }
|
||||
virtual bool setProperty(int, double) { return false; }
|
||||
virtual bool grabFrame() = 0;
|
||||
virtual bool retrieveFrame(int, OutputArray) = 0;
|
||||
virtual bool isOpened() const = 0;
|
||||
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
|
||||
};
|
||||
|
||||
class IVideoWriter
|
||||
{
|
||||
public:
|
||||
virtual ~IVideoWriter() {}
|
||||
virtual double getProperty(int) const { return 0; }
|
||||
virtual bool setProperty(int, double) { return false; }
|
||||
virtual bool isOpened() const = 0;
|
||||
virtual void write(InputArray) = 0;
|
||||
virtual int getCaptureDomain() const { return cv::CAP_ANY; } // Return the type of the capture object: CAP_FFMPEG, etc...
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
class VideoCapturePrivateAccessor
|
||||
{
|
||||
public:
|
||||
static
|
||||
IVideoCapture* getIVideoCapture(const VideoCapture& cap) { return cap.icap.get(); }
|
||||
};
|
||||
} // namespace
|
||||
|
||||
//===================================================
|
||||
|
||||
// Wrapper
|
||||
|
||||
class LegacyCapture : public IVideoCapture
|
||||
{
|
||||
private:
|
||||
CvCapture * cap;
|
||||
LegacyCapture(const LegacyCapture &);
|
||||
LegacyCapture& operator=(const LegacyCapture &);
|
||||
public:
|
||||
LegacyCapture(CvCapture * cap_) : cap(cap_) {}
|
||||
~LegacyCapture()
|
||||
{
|
||||
cvReleaseCapture(&cap);
|
||||
}
|
||||
double getProperty(int propId) const CV_OVERRIDE
|
||||
{
|
||||
return cap ? cap->getProperty(propId) : 0;
|
||||
}
|
||||
bool setProperty(int propId, double value) CV_OVERRIDE
|
||||
{
|
||||
return cvSetCaptureProperty(cap, propId, value) != 0;
|
||||
}
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
return cap ? cvGrabFrame(cap) != 0 : false;
|
||||
}
|
||||
bool retrieveFrame(int channel, OutputArray image) CV_OVERRIDE
|
||||
{
|
||||
IplImage* _img = cvRetrieveFrame(cap, channel);
|
||||
if( !_img )
|
||||
{
|
||||
image.release();
|
||||
return false;
|
||||
}
|
||||
if(_img->origin == IPL_ORIGIN_TL)
|
||||
{
|
||||
cv::cvarrToMat(_img).copyTo(image);
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat temp = cv::cvarrToMat(_img);
|
||||
flip(temp, image, 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return cap != 0; // legacy interface doesn't support closed files
|
||||
}
|
||||
int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return cap ? cap->getCaptureDomain() : 0;
|
||||
}
|
||||
|
||||
CvCapture* getCvCapture() const { return cap; }
|
||||
};
|
||||
|
||||
class LegacyWriter : public IVideoWriter
|
||||
{
|
||||
private:
|
||||
CvVideoWriter * writer;
|
||||
LegacyWriter(const LegacyWriter &);
|
||||
LegacyWriter& operator=(const LegacyWriter &);
|
||||
public:
|
||||
LegacyWriter(CvVideoWriter * wri_) : writer(wri_)
|
||||
{}
|
||||
~LegacyWriter()
|
||||
{
|
||||
cvReleaseVideoWriter(&writer);
|
||||
}
|
||||
double getProperty(int propId) const CV_OVERRIDE
|
||||
{
|
||||
if (writer)
|
||||
{
|
||||
return writer->getProperty(propId);
|
||||
}
|
||||
return 0.;
|
||||
}
|
||||
bool setProperty(int, double) CV_OVERRIDE
|
||||
{
|
||||
return false;
|
||||
}
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return writer != NULL;
|
||||
}
|
||||
void write(InputArray image) CV_OVERRIDE
|
||||
{
|
||||
IplImage _img = cvIplImage(image.getMat());
|
||||
cvWriteFrame(writer, &_img);
|
||||
}
|
||||
int getCaptureDomain() const CV_OVERRIDE
|
||||
{
|
||||
return writer ? writer->getCaptureDomain() : 0;
|
||||
}
|
||||
};
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
Ptr<IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &filename, const VideoCaptureParameters& params);
|
||||
Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> createGStreamerCapture_file(const std::string& filename, const cv::VideoCaptureParameters& params);
|
||||
Ptr<IVideoCapture> createGStreamerCapture_cam(int index, const cv::VideoCaptureParameters& params);
|
||||
Ptr<IVideoWriter> create_GStreamer_writer(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> create_MFX_capture(const std::string &filename);
|
||||
Ptr<IVideoWriter> create_MFX_writer(const std::string& filename, int _fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> create_AVFoundation_capture_file(const std::string &filename);
|
||||
Ptr<IVideoCapture> create_AVFoundation_capture_cam(int index);
|
||||
Ptr<IVideoWriter> create_AVFoundation_writer(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> create_WRT_capture(int device);
|
||||
|
||||
Ptr<IVideoCapture> cvCreateCapture_MSMF(int index, const VideoCaptureParameters& params);
|
||||
Ptr<IVideoCapture> cvCreateCapture_MSMF(const std::string& filename, const VideoCaptureParameters& params);
|
||||
Ptr<IVideoWriter> cvCreateVideoWriter_MSMF(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> create_DShow_capture(int index);
|
||||
|
||||
Ptr<IVideoCapture> create_V4L_capture_cam(int index);
|
||||
Ptr<IVideoCapture> create_V4L_capture_file(const std::string &filename);
|
||||
|
||||
Ptr<IVideoCapture> create_OpenNI2_capture_cam( int index );
|
||||
Ptr<IVideoCapture> create_OpenNI2_capture_file( const std::string &filename );
|
||||
|
||||
Ptr<IVideoCapture> create_Images_capture(const std::string &filename);
|
||||
Ptr<IVideoWriter> create_Images_writer(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> create_DC1394_capture(int index);
|
||||
|
||||
Ptr<IVideoCapture> create_RealSense_capture(int index);
|
||||
|
||||
Ptr<IVideoCapture> create_PvAPI_capture( int index );
|
||||
|
||||
Ptr<IVideoCapture> create_XIMEA_capture_cam( int index );
|
||||
Ptr<IVideoCapture> create_XIMEA_capture_file( const std::string &serialNumber );
|
||||
|
||||
Ptr<IVideoCapture> create_ueye_camera(int camera);
|
||||
|
||||
Ptr<IVideoCapture> create_Aravis_capture( int index );
|
||||
|
||||
Ptr<IVideoCapture> createMotionJpegCapture(const std::string& filename);
|
||||
Ptr<IVideoWriter> createMotionJpegWriter(const std::string& filename, int fourcc,
|
||||
double fps, const Size& frameSize,
|
||||
const VideoWriterParameters& params);
|
||||
|
||||
Ptr<IVideoCapture> createGPhoto2Capture(int index);
|
||||
Ptr<IVideoCapture> createGPhoto2Capture(const std::string& deviceName);
|
||||
|
||||
Ptr<IVideoCapture> createXINECapture(const std::string &filename);
|
||||
|
||||
Ptr<IVideoCapture> createAndroidCapture_cam( int index );
|
||||
Ptr<IVideoCapture> createAndroidCapture_file(const std::string &filename);
|
||||
|
||||
bool VideoCapture_V4L_waitAny(
|
||||
const std::vector<VideoCapture>& streams,
|
||||
CV_OUT std::vector<int>& ready,
|
||||
int64 timeoutNs);
|
||||
|
||||
static inline
|
||||
std::ostream& operator<<(std::ostream& out, const VideoAccelerationType& va_type)
|
||||
{
|
||||
switch (va_type)
|
||||
{
|
||||
case VIDEO_ACCELERATION_NONE: out << "NONE"; return out;
|
||||
case VIDEO_ACCELERATION_ANY: out << "ANY"; return out;
|
||||
case VIDEO_ACCELERATION_D3D11: out << "D3D11"; return out;
|
||||
case VIDEO_ACCELERATION_VAAPI: out << "VAAPI"; return out;
|
||||
case VIDEO_ACCELERATION_MFX: out << "MFX"; return out;
|
||||
}
|
||||
out << cv::format("UNKNOWN(0x%ux)", static_cast<unsigned int>(va_type));
|
||||
return out;
|
||||
}
|
||||
|
||||
} // cv::
|
||||
|
||||
#endif // CAP_INTERFACE_HPP
|
||||
516
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_abstract_camera.mm
vendored
Normal file
516
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_abstract_camera.mm
vendored
Normal file
@@ -0,0 +1,516 @@
|
||||
/*
|
||||
* cap_ios_abstract_camera.mm
|
||||
* For iOS video I/O
|
||||
* by Eduard Feicho on 29/07/12
|
||||
* by Alexander Shishkov on 17/07/13
|
||||
* Copyright 2012. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
@interface CvAbstractCamera ()
|
||||
|
||||
@property (nonatomic, strong) AVCaptureVideoPreviewLayer* captureVideoPreviewLayer;
|
||||
|
||||
- (void)deviceOrientationDidChange:(NSNotification*)notification;
|
||||
- (void)startCaptureSession;
|
||||
|
||||
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
|
||||
|
||||
- (void)updateSize;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
#pragma mark - Implementation
|
||||
|
||||
|
||||
@implementation CvAbstractCamera
|
||||
|
||||
|
||||
|
||||
#pragma mark Public
|
||||
|
||||
@synthesize imageWidth;
|
||||
@synthesize imageHeight;
|
||||
|
||||
|
||||
@synthesize defaultFPS;
|
||||
@synthesize defaultAVCaptureDevicePosition;
|
||||
@synthesize defaultAVCaptureVideoOrientation;
|
||||
@synthesize defaultAVCaptureSessionPreset;
|
||||
|
||||
|
||||
|
||||
@synthesize captureSession;
|
||||
@synthesize captureVideoPreviewLayer;
|
||||
@synthesize videoCaptureConnection;
|
||||
@synthesize running;
|
||||
@synthesize captureSessionLoaded;
|
||||
@synthesize useAVCaptureVideoPreviewLayer;
|
||||
|
||||
@synthesize parentView;
|
||||
|
||||
#pragma mark - Constructors
|
||||
|
||||
- (id)init;
|
||||
{
|
||||
self = [super init];
|
||||
if (self) {
|
||||
// react to device orientation notifications
|
||||
[[NSNotificationCenter defaultCenter] addObserver:self
|
||||
selector:@selector(deviceOrientationDidChange:)
|
||||
name:UIDeviceOrientationDidChangeNotification
|
||||
object:nil];
|
||||
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
|
||||
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
|
||||
|
||||
|
||||
// check if camera available
|
||||
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
|
||||
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
|
||||
|
||||
running = NO;
|
||||
|
||||
// set camera default configuration
|
||||
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
|
||||
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
|
||||
self.defaultFPS = 15;
|
||||
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
|
||||
|
||||
self.parentView = nil;
|
||||
self.useAVCaptureVideoPreviewLayer = NO;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (id)initWithParentView:(UIView*)parent;
|
||||
{
|
||||
self = [super init];
|
||||
if (self) {
|
||||
// react to device orientation notifications
|
||||
[[NSNotificationCenter defaultCenter] addObserver:self
|
||||
selector:@selector(deviceOrientationDidChange:)
|
||||
name:UIDeviceOrientationDidChangeNotification
|
||||
object:nil];
|
||||
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
|
||||
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
|
||||
|
||||
|
||||
// check if camera available
|
||||
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
|
||||
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
|
||||
|
||||
running = NO;
|
||||
|
||||
// set camera default configuration
|
||||
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
|
||||
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
|
||||
self.defaultFPS = 15;
|
||||
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset640x480;
|
||||
|
||||
self.parentView = parent;
|
||||
self.useAVCaptureVideoPreviewLayer = YES;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)dealloc;
|
||||
{
|
||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
||||
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
|
||||
[super dealloc];
|
||||
}
|
||||
|
||||
|
||||
#pragma mark - Public interface
|
||||
|
||||
|
||||
- (void)start;
|
||||
{
|
||||
if (![NSThread isMainThread]) {
|
||||
NSLog(@"[Camera] Warning: Call start only from main thread");
|
||||
[self performSelectorOnMainThread:@selector(start) withObject:nil waitUntilDone:NO];
|
||||
return;
|
||||
}
|
||||
|
||||
if (running == YES) {
|
||||
return;
|
||||
}
|
||||
running = YES;
|
||||
|
||||
// TODO: update image size data before actually starting (needed for recording)
|
||||
[self updateSize];
|
||||
|
||||
if (cameraAvailable) {
|
||||
[self startCaptureSession];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
- (void)pause;
|
||||
{
|
||||
running = NO;
|
||||
[self.captureSession stopRunning];
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)stop;
|
||||
{
|
||||
running = NO;
|
||||
|
||||
// Release any retained subviews of the main view.
|
||||
// e.g. self.myOutlet = nil;
|
||||
if (self.captureSession) {
|
||||
for (AVCaptureInput *input in self.captureSession.inputs) {
|
||||
[self.captureSession removeInput:input];
|
||||
}
|
||||
|
||||
for (AVCaptureOutput *output in self.captureSession.outputs) {
|
||||
[self.captureSession removeOutput:output];
|
||||
}
|
||||
|
||||
[self.captureSession stopRunning];
|
||||
[captureSession release];
|
||||
}
|
||||
|
||||
[captureVideoPreviewLayer release];
|
||||
[videoCaptureConnection release];
|
||||
captureSessionLoaded = NO;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// use front/back camera
|
||||
- (void)switchCameras;
|
||||
{
|
||||
BOOL was_running = self.running;
|
||||
if (was_running) {
|
||||
[self stop];
|
||||
}
|
||||
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
|
||||
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
|
||||
} else {
|
||||
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
|
||||
}
|
||||
if (was_running) {
|
||||
[self start];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#pragma mark - Device Orientation Changes
|
||||
|
||||
|
||||
- (void)deviceOrientationDidChange:(NSNotification*)notification
|
||||
{
|
||||
(void)notification;
|
||||
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
|
||||
|
||||
switch (orientation)
|
||||
{
|
||||
case UIDeviceOrientationPortrait:
|
||||
case UIDeviceOrientationPortraitUpsideDown:
|
||||
case UIDeviceOrientationLandscapeLeft:
|
||||
case UIDeviceOrientationLandscapeRight:
|
||||
currentDeviceOrientation = orientation;
|
||||
break;
|
||||
|
||||
case UIDeviceOrientationFaceUp:
|
||||
case UIDeviceOrientationFaceDown:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
NSLog(@"deviceOrientationDidChange: %d", (int)orientation);
|
||||
|
||||
[self updateOrientation];
|
||||
}
|
||||
|
||||
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
- (void)createCaptureSession;
|
||||
{
|
||||
// set a av capture session preset
|
||||
self.captureSession = [[AVCaptureSession alloc] init];
|
||||
if ([self.captureSession canSetSessionPreset:self.defaultAVCaptureSessionPreset]) {
|
||||
[self.captureSession setSessionPreset:self.defaultAVCaptureSessionPreset];
|
||||
} else if ([self.captureSession canSetSessionPreset:AVCaptureSessionPresetLow]) {
|
||||
[self.captureSession setSessionPreset:AVCaptureSessionPresetLow];
|
||||
} else {
|
||||
NSLog(@"[Camera] Error: could not set session preset");
|
||||
}
|
||||
}
|
||||
|
||||
- (void)createCaptureDevice;
|
||||
{
|
||||
// setup the device
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
[self setDesiredCameraPosition:self.defaultAVCaptureDevicePosition];
|
||||
NSLog(@"[Camera] device connected? %@", device.connected ? @"YES" : @"NO");
|
||||
NSLog(@"[Camera] device position %@", (device.position == AVCaptureDevicePositionBack) ? @"back" : @"front");
|
||||
}
|
||||
|
||||
|
||||
- (void)createVideoPreviewLayer;
|
||||
{
|
||||
self.captureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
|
||||
|
||||
if ([self.captureVideoPreviewLayer respondsToSelector:@selector(connection)])
|
||||
{
|
||||
if ([self.captureVideoPreviewLayer.connection isVideoOrientationSupported])
|
||||
{
|
||||
[self.captureVideoPreviewLayer.connection setVideoOrientation:self.defaultAVCaptureVideoOrientation];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if (!defined(TARGET_OS_MACCATALYST) || !TARGET_OS_MACCATALYST)
|
||||
// Deprecated in 6.0; here for backward compatibility
|
||||
if ([self.captureVideoPreviewLayer isOrientationSupported])
|
||||
{
|
||||
[self.captureVideoPreviewLayer setOrientation:self.defaultAVCaptureVideoOrientation];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (parentView != nil) {
|
||||
self.captureVideoPreviewLayer.frame = self.parentView.bounds;
|
||||
self.captureVideoPreviewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
|
||||
[self.parentView.layer addSublayer:self.captureVideoPreviewLayer];
|
||||
}
|
||||
NSLog(@"[Camera] created AVCaptureVideoPreviewLayer");
|
||||
}
|
||||
|
||||
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
|
||||
{
|
||||
for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
|
||||
if ([device position] == desiredPosition) {
|
||||
[self.captureSession beginConfiguration];
|
||||
|
||||
NSError* error = nil;
|
||||
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
|
||||
if (!input) {
|
||||
NSLog(@"error creating input %@", [error description]);
|
||||
}
|
||||
|
||||
// support for autofocus
|
||||
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
|
||||
error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
[self.captureSession addInput:input];
|
||||
|
||||
for (AVCaptureInput *oldInput in self.captureSession.inputs) {
|
||||
[self.captureSession removeInput:oldInput];
|
||||
}
|
||||
[self.captureSession addInput:input];
|
||||
[self.captureSession commitConfiguration];
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)startCaptureSession
|
||||
{
|
||||
if (!cameraAvailable) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (self.captureSessionLoaded == NO) {
|
||||
[self createCaptureSession];
|
||||
[self createCaptureDevice];
|
||||
[self createCaptureOutput];
|
||||
|
||||
// setup preview layer
|
||||
if (self.useAVCaptureVideoPreviewLayer) {
|
||||
[self createVideoPreviewLayer];
|
||||
} else {
|
||||
[self createCustomVideoPreview];
|
||||
}
|
||||
|
||||
captureSessionLoaded = YES;
|
||||
}
|
||||
|
||||
[self.captureSession startRunning];
|
||||
}
|
||||
|
||||
|
||||
- (void)createCaptureOutput;
|
||||
{
|
||||
[NSException raise:NSInternalInconsistencyException
|
||||
format:@"You must override %s in a subclass", __FUNCTION__];
|
||||
}
|
||||
|
||||
- (void)createCustomVideoPreview;
|
||||
{
|
||||
[NSException raise:NSInternalInconsistencyException
|
||||
format:@"You must override %s in a subclass", __FUNCTION__];
|
||||
}
|
||||
|
||||
- (void)updateOrientation;
|
||||
{
|
||||
// nothing to do here
|
||||
}
|
||||
|
||||
|
||||
- (void)updateSize;
|
||||
{
|
||||
if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetPhoto]) {
|
||||
//TODO: find the correct resolution
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetHigh]) {
|
||||
//TODO: find the correct resolution
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetMedium]) {
|
||||
//TODO: find the correct resolution
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetLow]) {
|
||||
//TODO: find the correct resolution
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset352x288]) {
|
||||
self.imageWidth = 352;
|
||||
self.imageHeight = 288;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset640x480]) {
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset1280x720]) {
|
||||
self.imageWidth = 1280;
|
||||
self.imageHeight = 720;
|
||||
} else {
|
||||
self.imageWidth = 640;
|
||||
self.imageHeight = 480;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)lockFocus;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isFocusModeSupported:AVCaptureFocusModeLocked]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.focusMode = AVCaptureFocusModeLocked;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for locked focus configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void) unlockFocus;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)lockExposure;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isExposureModeSupported:AVCaptureExposureModeLocked]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.exposureMode = AVCaptureExposureModeLocked;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for locked exposure configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void) unlockExposure;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for autoexposure configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)lockBalance;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeLocked]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.whiteBalanceMode = AVCaptureWhiteBalanceModeLocked;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for locked white balance configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void) unlockBalance;
|
||||
{
|
||||
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {
|
||||
NSError *error = nil;
|
||||
if ([device lockForConfiguration:&error]) {
|
||||
device.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
|
||||
[device unlockForConfiguration];
|
||||
} else {
|
||||
NSLog(@"unable to lock device for auto white balance configuration %@", [error description]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@end
|
||||
172
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_photo_camera.mm
vendored
Normal file
172
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_photo_camera.mm
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* cap_ios_photo_camera.mm
|
||||
* For iOS video I/O
|
||||
* by Eduard Feicho on 29/07/12
|
||||
* Copyright 2012. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
|
||||
@interface CvPhotoCamera ()
|
||||
{
|
||||
id<CvPhotoCameraDelegate> _delegate;
|
||||
}
|
||||
|
||||
@property (nonatomic, strong) AVCaptureStillImageOutput* stillImageOutput;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
|
||||
#pragma mark - Implementation
|
||||
|
||||
|
||||
@implementation CvPhotoCamera
|
||||
|
||||
|
||||
|
||||
#pragma mark Public
|
||||
|
||||
@synthesize stillImageOutput;
|
||||
|
||||
- (void)setDelegate:(id<CvPhotoCameraDelegate>)newDelegate {
|
||||
_delegate = newDelegate;
|
||||
}
|
||||
|
||||
- (id<CvPhotoCameraDelegate>)delegate {
|
||||
return _delegate;
|
||||
}
|
||||
|
||||
#pragma mark - Public interface
|
||||
|
||||
|
||||
- (void)takePicture
|
||||
{
|
||||
if (cameraAvailable == NO) {
|
||||
return;
|
||||
}
|
||||
cameraAvailable = NO;
|
||||
|
||||
|
||||
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:self.videoCaptureConnection
|
||||
completionHandler:
|
||||
^(CMSampleBufferRef imageSampleBuffer, NSError *error)
|
||||
{
|
||||
if (error == nil && imageSampleBuffer != NULL)
|
||||
{
|
||||
// TODO check
|
||||
// NSNumber* imageOrientation = [UIImage cgImageOrientationForUIDeviceOrientation:currentDeviceOrientation];
|
||||
// CMSetAttachment(imageSampleBuffer, kCGImagePropertyOrientation, imageOrientation, 1);
|
||||
|
||||
NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
|
||||
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
[self.captureSession stopRunning];
|
||||
|
||||
// Make sure we create objects on the main thread in the main context
|
||||
UIImage* newImage = [UIImage imageWithData:jpegData];
|
||||
|
||||
//UIImageOrientation orientation = [newImage imageOrientation];
|
||||
|
||||
// TODO: only apply rotation, don't scale, since we can set this directly in the camera
|
||||
/*
|
||||
switch (orientation) {
|
||||
case UIImageOrientationUp:
|
||||
case UIImageOrientationDown:
|
||||
newImage = [newImage imageWithAppliedRotationAndMaxSize:CGSizeMake(640.0, 480.0)];
|
||||
break;
|
||||
case UIImageOrientationLeft:
|
||||
case UIImageOrientationRight:
|
||||
newImage = [newImage imageWithMaxSize:CGSizeMake(640.0, 480.0)];
|
||||
default:
|
||||
break;
|
||||
}
|
||||
*/
|
||||
|
||||
// We have captured the image, we can allow the user to take another picture
|
||||
cameraAvailable = YES;
|
||||
|
||||
NSLog(@"CvPhotoCamera captured image");
|
||||
[self.delegate photoCamera:self capturedImage:newImage];
|
||||
|
||||
[self.captureSession startRunning];
|
||||
});
|
||||
}
|
||||
}];
|
||||
|
||||
|
||||
}
|
||||
|
||||
- (void)stop;
|
||||
{
|
||||
[super stop];
|
||||
self.stillImageOutput = nil;
|
||||
}
|
||||
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
|
||||
- (void)createStillImageOutput;
|
||||
{
|
||||
// setup still image output with jpeg codec
|
||||
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
|
||||
NSDictionary *outputSettings = [NSDictionary dictionaryWithObjectsAndKeys:AVVideoCodecJPEG, AVVideoCodecKey, nil];
|
||||
[self.stillImageOutput setOutputSettings:outputSettings];
|
||||
[self.captureSession addOutput:self.stillImageOutput];
|
||||
|
||||
for (AVCaptureConnection *connection in self.stillImageOutput.connections) {
|
||||
for (AVCaptureInputPort *port in [connection inputPorts]) {
|
||||
if ([port.mediaType isEqual:AVMediaTypeVideo]) {
|
||||
self.videoCaptureConnection = connection;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (self.videoCaptureConnection) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
NSLog(@"[Camera] still image output created");
|
||||
}
|
||||
|
||||
|
||||
- (void)createCaptureOutput;
|
||||
{
|
||||
[self createStillImageOutput];
|
||||
}
|
||||
|
||||
- (void)createCustomVideoPreview;
|
||||
{
|
||||
//do nothing, always use AVCaptureVideoPreviewLayer
|
||||
}
|
||||
|
||||
|
||||
@end
|
||||
653
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_video_camera.mm
vendored
Normal file
653
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ios_video_camera.mm
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
/*
|
||||
* cap_ios_video_camera.mm
|
||||
* For iOS video I/O
|
||||
* by Eduard Feicho on 29/07/12
|
||||
* by Alexander Shishkov on 17/07/13
|
||||
* Copyright 2012. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
#import <UIKit/UIKit.h>
|
||||
|
||||
|
||||
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
|
||||
|
||||
|
||||
@interface CvVideoCamera () {
|
||||
int recordingCountDown;
|
||||
}
|
||||
|
||||
- (void)createVideoDataOutput;
|
||||
- (void)createVideoFileOutput;
|
||||
|
||||
|
||||
@property (nonatomic, strong) CALayer *customPreviewLayer;
|
||||
@property (nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
|
||||
#pragma mark - Implementation
|
||||
|
||||
|
||||
|
||||
@implementation CvVideoCamera
|
||||
{
|
||||
id<CvVideoCameraDelegate> _delegate;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@synthesize grayscaleMode;
|
||||
|
||||
@synthesize customPreviewLayer;
|
||||
@synthesize videoDataOutput;
|
||||
|
||||
@synthesize recordVideo;
|
||||
@synthesize rotateVideo;
|
||||
//@synthesize videoFileOutput;
|
||||
@synthesize recordAssetWriterInput;
|
||||
@synthesize recordPixelBufferAdaptor;
|
||||
@synthesize recordAssetWriter;
|
||||
|
||||
- (void)setDelegate:(id<CvVideoCameraDelegate>)newDelegate {
|
||||
_delegate = newDelegate;
|
||||
}
|
||||
|
||||
- (id<CvVideoCameraDelegate>)delegate {
|
||||
return _delegate;
|
||||
}
|
||||
|
||||
#pragma mark - Constructors
|
||||
|
||||
- (id)initWithParentView:(UIView*)parent;
|
||||
{
|
||||
self = [super initWithParentView:parent];
|
||||
if (self) {
|
||||
self.useAVCaptureVideoPreviewLayer = NO;
|
||||
self.recordVideo = NO;
|
||||
self.rotateVideo = NO;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#pragma mark - Public interface
|
||||
|
||||
|
||||
- (void)start;
|
||||
{
|
||||
if (self.running == YES) {
|
||||
return;
|
||||
}
|
||||
|
||||
recordingCountDown = 10;
|
||||
[super start];
|
||||
|
||||
if (self.recordVideo == YES) {
|
||||
NSError* error = nil;
|
||||
if ([[NSFileManager defaultManager] fileExistsAtPath:[self videoFileString]]) {
|
||||
[[NSFileManager defaultManager] removeItemAtPath:[self videoFileString] error:&error];
|
||||
}
|
||||
if (error == nil) {
|
||||
NSLog(@"[Camera] Delete file %@", [self videoFileString]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)stop;
|
||||
{
|
||||
if (self.running == YES) {
|
||||
[super stop];
|
||||
|
||||
[videoDataOutput release];
|
||||
if (videoDataOutputQueue) {
|
||||
dispatch_release(videoDataOutputQueue);
|
||||
}
|
||||
|
||||
if (self.recordVideo == YES) {
|
||||
if (self.recordAssetWriter) {
|
||||
if (self.recordAssetWriter.status == AVAssetWriterStatusWriting) {
|
||||
[self.recordAssetWriter finishWriting];
|
||||
NSLog(@"[Camera] recording stopped");
|
||||
} else {
|
||||
NSLog(@"[Camera] Recording Error: asset writer status is not writing");
|
||||
}
|
||||
[recordAssetWriter release];
|
||||
}
|
||||
|
||||
[recordAssetWriterInput release];
|
||||
[recordPixelBufferAdaptor release];
|
||||
}
|
||||
|
||||
if (self.customPreviewLayer) {
|
||||
[self.customPreviewLayer removeFromSuperlayer];
|
||||
self.customPreviewLayer = nil;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO fix
|
||||
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
|
||||
{
|
||||
|
||||
NSLog(@"layout preview layer");
|
||||
if (self.parentView != nil) {
|
||||
|
||||
CALayer* layer = self.customPreviewLayer;
|
||||
CGRect bounds = self.customPreviewLayer.bounds;
|
||||
int rotation_angle = 0;
|
||||
bool flip_bounds = false;
|
||||
|
||||
switch (interfaceOrientation) {
|
||||
case UIInterfaceOrientationPortrait:
|
||||
NSLog(@"to Portrait");
|
||||
rotation_angle = 270;
|
||||
break;
|
||||
case UIInterfaceOrientationPortraitUpsideDown:
|
||||
rotation_angle = 90;
|
||||
NSLog(@"to UpsideDown");
|
||||
break;
|
||||
case UIInterfaceOrientationLandscapeLeft:
|
||||
rotation_angle = 0;
|
||||
NSLog(@"to LandscapeLeft");
|
||||
break;
|
||||
case UIInterfaceOrientationLandscapeRight:
|
||||
rotation_angle = 180;
|
||||
NSLog(@"to LandscapeRight");
|
||||
break;
|
||||
default:
|
||||
break; // leave the layer in its last known orientation
|
||||
}
|
||||
|
||||
switch (self.defaultAVCaptureVideoOrientation) {
|
||||
case AVCaptureVideoOrientationLandscapeRight:
|
||||
rotation_angle += 180;
|
||||
break;
|
||||
case AVCaptureVideoOrientationPortraitUpsideDown:
|
||||
rotation_angle += 270;
|
||||
break;
|
||||
case AVCaptureVideoOrientationPortrait:
|
||||
rotation_angle += 90;
|
||||
case AVCaptureVideoOrientationLandscapeLeft:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rotation_angle = rotation_angle % 360;
|
||||
|
||||
if (rotation_angle == 90 || rotation_angle == 270) {
|
||||
flip_bounds = true;
|
||||
}
|
||||
|
||||
if (flip_bounds) {
|
||||
NSLog(@"flip bounds");
|
||||
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
|
||||
}
|
||||
|
||||
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
|
||||
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
|
||||
|
||||
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
|
||||
layer.bounds = bounds;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO fix
|
||||
- (void)layoutPreviewLayer;
|
||||
{
|
||||
NSLog(@"layout preview layer");
|
||||
if (self.parentView != nil) {
|
||||
|
||||
CALayer* layer = self.customPreviewLayer;
|
||||
CGRect bounds = self.customPreviewLayer.bounds;
|
||||
int rotation_angle = 0;
|
||||
bool flip_bounds = false;
|
||||
|
||||
switch (currentDeviceOrientation) {
|
||||
case UIDeviceOrientationPortrait:
|
||||
rotation_angle = 270;
|
||||
break;
|
||||
case UIDeviceOrientationPortraitUpsideDown:
|
||||
rotation_angle = 90;
|
||||
break;
|
||||
case UIDeviceOrientationLandscapeLeft:
|
||||
NSLog(@"left");
|
||||
rotation_angle = 180;
|
||||
break;
|
||||
case UIDeviceOrientationLandscapeRight:
|
||||
NSLog(@"right");
|
||||
rotation_angle = 0;
|
||||
break;
|
||||
case UIDeviceOrientationFaceUp:
|
||||
case UIDeviceOrientationFaceDown:
|
||||
default:
|
||||
break; // leave the layer in its last known orientation
|
||||
}
|
||||
|
||||
switch (self.defaultAVCaptureVideoOrientation) {
|
||||
case AVCaptureVideoOrientationLandscapeRight:
|
||||
rotation_angle += 180;
|
||||
break;
|
||||
case AVCaptureVideoOrientationPortraitUpsideDown:
|
||||
rotation_angle += 270;
|
||||
break;
|
||||
case AVCaptureVideoOrientationPortrait:
|
||||
rotation_angle += 90;
|
||||
case AVCaptureVideoOrientationLandscapeLeft:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rotation_angle = rotation_angle % 360;
|
||||
|
||||
if (rotation_angle == 90 || rotation_angle == 270) {
|
||||
flip_bounds = true;
|
||||
}
|
||||
|
||||
if (flip_bounds) {
|
||||
NSLog(@"flip bounds");
|
||||
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
|
||||
}
|
||||
|
||||
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
|
||||
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
|
||||
layer.bounds = bounds;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#pragma mark - Private Interface
|
||||
|
||||
- (void)createVideoDataOutput;
|
||||
{
|
||||
// Make a video data output
|
||||
self.videoDataOutput = [AVCaptureVideoDataOutput new];
|
||||
|
||||
// In grayscale mode we want YUV (YpCbCr 4:2:0) so we can directly access the graylevel intensity values (Y component)
|
||||
// In color mode we, BGRA format is used
|
||||
OSType format = self.grayscaleMode ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
|
||||
|
||||
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
|
||||
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
|
||||
|
||||
// discard if the data output queue is blocked (as we process the still image)
|
||||
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
|
||||
|
||||
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
|
||||
[self.captureSession addOutput:self.videoDataOutput];
|
||||
}
|
||||
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
|
||||
|
||||
|
||||
// set default FPS
|
||||
AVCaptureDeviceInput *currentInput = [self.captureSession.inputs objectAtIndex:0];
|
||||
AVCaptureDevice *device = currentInput.device;
|
||||
|
||||
NSError *error = nil;
|
||||
[device lockForConfiguration:&error];
|
||||
|
||||
float maxRate = ((AVFrameRateRange*) [device.activeFormat.videoSupportedFrameRateRanges objectAtIndex:0]).maxFrameRate;
|
||||
if (maxRate > self.defaultFPS - 1 && error == nil) {
|
||||
[device setActiveVideoMinFrameDuration:CMTimeMake(1, self.defaultFPS)];
|
||||
[device setActiveVideoMaxFrameDuration:CMTimeMake(1, self.defaultFPS)];
|
||||
NSLog(@"[Camera] FPS set to %d", self.defaultFPS);
|
||||
} else {
|
||||
NSLog(@"[Camera] unable to set defaultFPS at %d FPS, max is %f FPS", self.defaultFPS, maxRate);
|
||||
}
|
||||
|
||||
if (error != nil) {
|
||||
NSLog(@"[Camera] unable to set defaultFPS: %@", error);
|
||||
}
|
||||
|
||||
[device unlockForConfiguration];
|
||||
|
||||
// set video mirroring for front camera (more intuitive)
|
||||
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
|
||||
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
|
||||
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
|
||||
} else {
|
||||
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
|
||||
}
|
||||
}
|
||||
|
||||
// set default video orientation
|
||||
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
|
||||
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
|
||||
}
|
||||
|
||||
|
||||
// create a custom preview layer
|
||||
self.customPreviewLayer = [CALayer layer];
|
||||
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
|
||||
self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
|
||||
[self updateOrientation];
|
||||
|
||||
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
|
||||
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
|
||||
// see the header doc for setSampleBufferDelegate:queue: for more information
|
||||
videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
|
||||
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
|
||||
|
||||
|
||||
NSLog(@"[Camera] created AVCaptureVideoDataOutput");
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)createVideoFileOutput;
|
||||
{
|
||||
/* Video File Output in H.264, via AVAsserWriter */
|
||||
NSLog(@"Create Video with dimensions %dx%d", self.imageWidth, self.imageHeight);
|
||||
|
||||
NSDictionary *outputSettings
|
||||
= [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:self.imageWidth], AVVideoWidthKey,
|
||||
[NSNumber numberWithInt:self.imageHeight], AVVideoHeightKey,
|
||||
AVVideoCodecH264, AVVideoCodecKey,
|
||||
nil
|
||||
];
|
||||
|
||||
|
||||
self.recordAssetWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings];
|
||||
|
||||
|
||||
int pixelBufferFormat = (self.grayscaleMode == YES) ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
|
||||
|
||||
self.recordPixelBufferAdaptor =
|
||||
[[AVAssetWriterInputPixelBufferAdaptor alloc]
|
||||
initWithAssetWriterInput:self.recordAssetWriterInput
|
||||
sourcePixelBufferAttributes:[NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:pixelBufferFormat], kCVPixelBufferPixelFormatTypeKey, nil]];
|
||||
|
||||
NSError* error = nil;
|
||||
NSLog(@"Create AVAssetWriter with url: %@", [self videoFileURL]);
|
||||
self.recordAssetWriter = [AVAssetWriter assetWriterWithURL:[self videoFileURL]
|
||||
fileType:AVFileTypeMPEG4
|
||||
error:&error];
|
||||
if (error != nil) {
|
||||
NSLog(@"[Camera] Unable to create AVAssetWriter: %@", error);
|
||||
}
|
||||
|
||||
[self.recordAssetWriter addInput:self.recordAssetWriterInput];
|
||||
self.recordAssetWriterInput.expectsMediaDataInRealTime = YES;
|
||||
|
||||
NSLog(@"[Camera] created AVAssetWriter");
|
||||
}
|
||||
|
||||
|
||||
- (void)createCaptureOutput;
|
||||
{
|
||||
[self createVideoDataOutput];
|
||||
if (self.recordVideo == YES) {
|
||||
[self createVideoFileOutput];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)createCustomVideoPreview;
|
||||
{
|
||||
[self.parentView.layer addSublayer:self.customPreviewLayer];
|
||||
}
|
||||
|
||||
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
|
||||
{
|
||||
|
||||
CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image));
|
||||
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
|
||||
[NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey,
|
||||
[NSNumber numberWithBool:NO], kCVPixelBufferCGBitmapContextCompatibilityKey,
|
||||
nil];
|
||||
CVPixelBufferRef pxbuffer = NULL;
|
||||
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameSize.width,
|
||||
frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options),
|
||||
&pxbuffer);
|
||||
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
|
||||
|
||||
CVPixelBufferLockBaseAddress(pxbuffer, 0);
|
||||
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
|
||||
|
||||
|
||||
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
|
||||
CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width,
|
||||
frameSize.height, 8, 4*frameSize.width, rgbColorSpace,
|
||||
kCGImageAlphaPremultipliedFirst);
|
||||
|
||||
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
|
||||
CGImageGetHeight(image)), image);
|
||||
CGColorSpaceRelease(rgbColorSpace);
|
||||
CGContextRelease(context);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
|
||||
|
||||
return pxbuffer;
|
||||
}
|
||||
|
||||
#pragma mark - Protocol AVCaptureVideoDataOutputSampleBufferDelegate
|
||||
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
|
||||
{
|
||||
(void)captureOutput;
|
||||
(void)connection;
|
||||
auto strongDelegate = self.delegate;
|
||||
if (strongDelegate) {
|
||||
|
||||
// convert from Core Media to Core Video
|
||||
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
|
||||
CVPixelBufferLockBaseAddress(imageBuffer, 0);
|
||||
|
||||
void* bufferAddress;
|
||||
size_t width;
|
||||
size_t height;
|
||||
size_t bytesPerRow;
|
||||
|
||||
CGColorSpaceRef colorSpace;
|
||||
CGContextRef context;
|
||||
|
||||
int format_opencv;
|
||||
|
||||
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
|
||||
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
|
||||
|
||||
format_opencv = CV_8UC1;
|
||||
|
||||
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
|
||||
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
|
||||
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
|
||||
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
|
||||
|
||||
} else { // expect kCVPixelFormatType_32BGRA
|
||||
|
||||
format_opencv = CV_8UC4;
|
||||
|
||||
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
|
||||
width = CVPixelBufferGetWidth(imageBuffer);
|
||||
height = CVPixelBufferGetHeight(imageBuffer);
|
||||
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
|
||||
|
||||
}
|
||||
|
||||
// delegate image processing to the delegate
|
||||
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
|
||||
|
||||
CGImage* dstImage;
|
||||
|
||||
if ([strongDelegate respondsToSelector:@selector(processImage:)]) {
|
||||
[strongDelegate processImage:image];
|
||||
}
|
||||
|
||||
// check if matrix data pointer or dimensions were changed by the delegate
|
||||
bool iOSimage = false;
|
||||
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
|
||||
iOSimage = true;
|
||||
}
|
||||
|
||||
|
||||
// (create color space, create graphics context, render buffer)
|
||||
CGBitmapInfo bitmapInfo;
|
||||
|
||||
// basically we decide if it's a grayscale, rgb or rgba image
|
||||
if (image.channels() == 1) {
|
||||
colorSpace = CGColorSpaceCreateDeviceGray();
|
||||
bitmapInfo = kCGImageAlphaNone;
|
||||
} else if (image.channels() == 3) {
|
||||
colorSpace = CGColorSpaceCreateDeviceRGB();
|
||||
bitmapInfo = kCGImageAlphaNone;
|
||||
if (iOSimage) {
|
||||
bitmapInfo |= kCGBitmapByteOrder32Little;
|
||||
} else {
|
||||
bitmapInfo |= kCGBitmapByteOrder32Big;
|
||||
}
|
||||
} else {
|
||||
colorSpace = CGColorSpaceCreateDeviceRGB();
|
||||
bitmapInfo = kCGImageAlphaPremultipliedFirst;
|
||||
if (iOSimage) {
|
||||
bitmapInfo |= kCGBitmapByteOrder32Little;
|
||||
} else {
|
||||
bitmapInfo |= kCGBitmapByteOrder32Big;
|
||||
}
|
||||
}
|
||||
|
||||
if (iOSimage) {
|
||||
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
|
||||
dstImage = CGBitmapContextCreateImage(context);
|
||||
CGContextRelease(context);
|
||||
} else {
|
||||
|
||||
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
|
||||
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
|
||||
|
||||
// Creating CGImage from cv::Mat
|
||||
dstImage = CGImageCreate(image.cols, // width
|
||||
image.rows, // height
|
||||
8, // bits per component
|
||||
8 * image.elemSize(), // bits per pixel
|
||||
image.step, // bytesPerRow
|
||||
colorSpace, // colorspace
|
||||
bitmapInfo, // bitmap info
|
||||
provider, // CGDataProviderRef
|
||||
NULL, // decode
|
||||
false, // should interpolate
|
||||
kCGRenderingIntentDefault // intent
|
||||
);
|
||||
|
||||
CGDataProviderRelease(provider);
|
||||
}
|
||||
|
||||
|
||||
// render buffer
|
||||
dispatch_sync(dispatch_get_main_queue(), ^{
|
||||
self.customPreviewLayer.contents = (__bridge id)dstImage;
|
||||
});
|
||||
|
||||
|
||||
recordingCountDown--;
|
||||
if (self.recordVideo == YES && recordingCountDown < 0) {
|
||||
lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
|
||||
// CMTimeShow(lastSampleTime);
|
||||
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
|
||||
[self.recordAssetWriter startWriting];
|
||||
[self.recordAssetWriter startSessionAtSourceTime:lastSampleTime];
|
||||
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
|
||||
NSLog(@"[Camera] Recording Error: asset writer status is not writing: %@", self.recordAssetWriter.error);
|
||||
return;
|
||||
} else {
|
||||
NSLog(@"[Camera] Video recording started");
|
||||
}
|
||||
}
|
||||
|
||||
if (self.recordAssetWriterInput.readyForMoreMediaData) {
|
||||
CVImageBufferRef pixelBuffer = [self pixelBufferFromCGImage:dstImage];
|
||||
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:pixelBuffer
|
||||
withPresentationTime:lastSampleTime] ) {
|
||||
NSLog(@"Video Writing Error");
|
||||
}
|
||||
if (pixelBuffer != nullptr)
|
||||
CVPixelBufferRelease(pixelBuffer);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// cleanup
|
||||
CGImageRelease(dstImage);
|
||||
|
||||
CGColorSpaceRelease(colorSpace);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
- (void)updateOrientation;
|
||||
{
|
||||
if (self.rotateVideo == YES)
|
||||
{
|
||||
NSLog(@"rotate..");
|
||||
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
|
||||
[self layoutPreviewLayer];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
- (void)saveVideo;
|
||||
{
|
||||
if (self.recordVideo == NO) {
|
||||
return;
|
||||
}
|
||||
|
||||
UISaveVideoAtPathToSavedPhotosAlbum([self videoFileString], nil, nil, NULL);
|
||||
}
|
||||
|
||||
|
||||
- (NSURL *)videoFileURL;
|
||||
{
|
||||
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
|
||||
NSURL *outputURL = [NSURL fileURLWithPath:outputPath];
|
||||
NSFileManager *fileManager = [NSFileManager defaultManager];
|
||||
if ([fileManager fileExistsAtPath:outputPath]) {
|
||||
NSLog(@"file exists");
|
||||
}
|
||||
return outputURL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (NSString *)videoFileString;
|
||||
{
|
||||
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
|
||||
return outputPath;
|
||||
}
|
||||
|
||||
@end
|
||||
249
3rdparty/opencv-4.5.4/modules/videoio/src/cap_librealsense.cpp
vendored
Normal file
249
3rdparty/opencv-4.5.4/modules/videoio/src/cap_librealsense.cpp
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#ifdef HAVE_LIBREALSENSE
|
||||
#include "cap_librealsense.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
VideoCapture_LibRealsense::VideoCapture_LibRealsense(int) : mAlign(RS2_STREAM_COLOR)
|
||||
{
|
||||
try
|
||||
{
|
||||
rs2::config config;
|
||||
// Configure all streams to run at VGA resolution at default fps
|
||||
config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16);
|
||||
config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8);
|
||||
config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8);
|
||||
mPipe.start(config);
|
||||
}
|
||||
catch (const rs2::error&)
|
||||
{
|
||||
}
|
||||
}
|
||||
VideoCapture_LibRealsense::~VideoCapture_LibRealsense(){}
|
||||
|
||||
double VideoCapture_LibRealsense::getProperty(int propIdx) const
|
||||
{
|
||||
double propValue = 0.0;
|
||||
|
||||
const int purePropIdx = propIdx & ~CAP_INTELPERC_GENERATORS_MASK;
|
||||
if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_IMAGE_GENERATOR)
|
||||
{
|
||||
propValue = getImageGeneratorProperty(purePropIdx);
|
||||
}
|
||||
else if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_DEPTH_GENERATOR)
|
||||
{
|
||||
propValue = getDepthGeneratorProperty(purePropIdx);
|
||||
}
|
||||
else if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_IR_GENERATOR)
|
||||
{
|
||||
propValue = getIrGeneratorProperty(purePropIdx);
|
||||
}
|
||||
else
|
||||
{
|
||||
propValue = getCommonProperty(purePropIdx);
|
||||
}
|
||||
|
||||
return propValue;
|
||||
}
|
||||
|
||||
double VideoCapture_LibRealsense::getImageGeneratorProperty(int propIdx) const
|
||||
{
|
||||
double propValue = 0.0;
|
||||
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();
|
||||
if(!profile)
|
||||
{
|
||||
return propValue;
|
||||
}
|
||||
|
||||
switch(propIdx)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
propValue = static_cast<double>(profile.width());
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
propValue = static_cast<double>(profile.height());
|
||||
break;
|
||||
case CAP_PROP_FPS:
|
||||
propValue = static_cast<double>(profile.fps());
|
||||
break;
|
||||
}
|
||||
|
||||
return propValue;
|
||||
}
|
||||
|
||||
double VideoCapture_LibRealsense::getDepthGeneratorProperty(int propIdx) const
|
||||
{
|
||||
double propValue = 0.0;
|
||||
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
|
||||
const rs2::depth_sensor sensor = mPipe.get_active_profile().get_device().first<rs2::depth_sensor>();
|
||||
if(!profile || !sensor)
|
||||
{
|
||||
return propValue;
|
||||
}
|
||||
|
||||
switch(propIdx)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
propValue = static_cast<double>(profile.width());
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
propValue = static_cast<double>(profile.height());
|
||||
break;
|
||||
case CAP_PROP_FPS:
|
||||
propValue = static_cast<double>(profile.fps());
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
|
||||
propValue = static_cast<double>(sensor.get_depth_scale());
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ:
|
||||
propValue = static_cast<double>(profile.get_intrinsics().fx);
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT:
|
||||
propValue = static_cast<double>(profile.get_intrinsics().fy);
|
||||
break;
|
||||
}
|
||||
|
||||
return propValue;
|
||||
}
|
||||
|
||||
double VideoCapture_LibRealsense::getIrGeneratorProperty(int propIdx) const
|
||||
{
|
||||
double propValue = 0.0;
|
||||
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_INFRARED).as<rs2::video_stream_profile>();
|
||||
if(!profile)
|
||||
{
|
||||
return propValue;
|
||||
}
|
||||
|
||||
switch(propIdx)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
propValue = static_cast<double>(profile.width());
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
propValue = static_cast<double>(profile.height());
|
||||
break;
|
||||
case CAP_PROP_FPS:
|
||||
propValue = static_cast<double>(profile.fps());
|
||||
break;
|
||||
}
|
||||
|
||||
return propValue;
|
||||
}
|
||||
|
||||
double VideoCapture_LibRealsense::getCommonProperty(int propIdx) const
|
||||
{
|
||||
double propValue = 0.0;
|
||||
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
|
||||
const rs2::depth_sensor sensor = mPipe.get_active_profile().get_device().first<rs2::depth_sensor>();
|
||||
if(!profile || !sensor)
|
||||
{
|
||||
return propValue;
|
||||
}
|
||||
|
||||
switch(propIdx)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
case CAP_PROP_FPS:
|
||||
propValue = getDepthGeneratorProperty(propIdx);
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
|
||||
propValue = static_cast<double>(sensor.get_depth_scale());
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ:
|
||||
propValue = static_cast<double>(profile.get_intrinsics().fx);
|
||||
break;
|
||||
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT:
|
||||
propValue = static_cast<double>(profile.get_intrinsics().fy);
|
||||
break;
|
||||
}
|
||||
|
||||
return propValue;
|
||||
}
|
||||
|
||||
bool VideoCapture_LibRealsense::setProperty(int, double)
|
||||
{
|
||||
bool isSet = false;
|
||||
return isSet;
|
||||
}
|
||||
|
||||
bool VideoCapture_LibRealsense::grabFrame()
|
||||
{
|
||||
if (!isOpened())
|
||||
return false;
|
||||
|
||||
try
|
||||
{
|
||||
mData = mAlign.process(mPipe.wait_for_frames());
|
||||
}
|
||||
catch (const rs2::error&)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
bool VideoCapture_LibRealsense::retrieveFrame(int outputType, cv::OutputArray frame)
|
||||
{
|
||||
rs2::video_frame _frame(nullptr);
|
||||
int type;
|
||||
switch (outputType)
|
||||
{
|
||||
case CAP_INTELPERC_DEPTH_MAP:
|
||||
_frame = mData.get_depth_frame().as<rs2::video_frame>();
|
||||
type = CV_16UC1;
|
||||
break;
|
||||
case CAP_INTELPERC_IR_MAP:
|
||||
_frame = mData.get_infrared_frame();
|
||||
type = CV_8UC1;
|
||||
break;
|
||||
case CAP_INTELPERC_IMAGE:
|
||||
_frame = mData.get_color_frame();
|
||||
type = CV_8UC3;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
// we copy the data straight away, so const_cast should be fine
|
||||
void* data = const_cast<void*>(_frame.get_data());
|
||||
Mat(_frame.get_height(), _frame.get_width(), type, data, _frame.get_stride_in_bytes()).copyTo(frame);
|
||||
|
||||
if(_frame.get_profile().format() == RS2_FORMAT_RGB8)
|
||||
cvtColor(frame, frame, COLOR_RGB2BGR);
|
||||
}
|
||||
catch (const rs2::error&)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
int VideoCapture_LibRealsense::getCaptureDomain()
|
||||
{
|
||||
return CAP_INTELPERC;
|
||||
}
|
||||
|
||||
bool VideoCapture_LibRealsense::isOpened() const
|
||||
{
|
||||
return bool(std::shared_ptr<rs2_pipeline>(mPipe));
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> create_RealSense_capture(int index)
|
||||
{
|
||||
return makePtr<VideoCapture_LibRealsense>(index);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
43
3rdparty/opencv-4.5.4/modules/videoio/src/cap_librealsense.hpp
vendored
Normal file
43
3rdparty/opencv-4.5.4/modules/videoio/src/cap_librealsense.hpp
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef _CAP_LIBREALSENE_HPP_
|
||||
#define _CAP_LIBREALSENE_HPP_
|
||||
|
||||
#ifdef HAVE_LIBREALSENSE
|
||||
|
||||
#include <librealsense2/rs.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class VideoCapture_LibRealsense : public IVideoCapture
|
||||
{
|
||||
public:
|
||||
VideoCapture_LibRealsense(int index);
|
||||
virtual ~VideoCapture_LibRealsense();
|
||||
|
||||
virtual double getProperty(int propIdx) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int propIdx, double propVal) CV_OVERRIDE;
|
||||
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE;
|
||||
virtual bool isOpened() const CV_OVERRIDE;
|
||||
|
||||
protected:
|
||||
rs2::pipeline mPipe;
|
||||
rs2::frameset mData;
|
||||
rs2::align mAlign;
|
||||
|
||||
double getDepthGeneratorProperty(int propIdx) const;
|
||||
double getImageGeneratorProperty(int propIdx) const;
|
||||
double getIrGeneratorProperty(int propIdx) const;
|
||||
double getCommonProperty(int propIdx) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
233
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_common.cpp
vendored
Normal file
233
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_common.cpp
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "cap_mfx_common.hpp"
|
||||
|
||||
// Linux specific
|
||||
#ifdef __linux__
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static mfxIMPL getImpl()
|
||||
{
|
||||
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_IMPL", MFX_IMPL_AUTO_ANY);
|
||||
return (mfxIMPL)res;
|
||||
}
|
||||
|
||||
static size_t getExtraSurfaceNum()
|
||||
{
|
||||
static const size_t res = cv::utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_EXTRA_SURFACE_NUM", 1);
|
||||
return res;
|
||||
}
|
||||
|
||||
static size_t getPoolTimeoutSec()
|
||||
{
|
||||
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_POOL_TIMEOUT", 1);
|
||||
return res;
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
bool DeviceHandler::init(MFXVideoSession &session)
|
||||
{
|
||||
mfxStatus res = MFX_ERR_NONE;
|
||||
mfxIMPL impl = getImpl();
|
||||
mfxVersion ver = { {19, 1} };
|
||||
|
||||
res = session.Init(impl, &ver);
|
||||
DBG(cout << "MFX SessionInit: " << res << endl);
|
||||
|
||||
res = session.QueryIMPL(&impl);
|
||||
DBG(cout << "MFX QueryIMPL: " << res << " => " << asHex(impl) << endl);
|
||||
|
||||
res = session.QueryVersion(&ver);
|
||||
DBG(cout << "MFX QueryVersion: " << res << " => " << ver.Major << "." << ver.Minor << endl);
|
||||
|
||||
if (res != MFX_ERR_NONE)
|
||||
return false;
|
||||
|
||||
return initDeviceSession(session);
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
VAHandle::VAHandle() {
|
||||
// TODO: provide a way of modifying this path
|
||||
const string filename = "/dev/dri/renderD128";
|
||||
file = open(filename.c_str(), O_RDWR);
|
||||
if (file < 0)
|
||||
CV_Error(Error::StsError, "Can't open file: " + filename);
|
||||
display = vaGetDisplayDRM(file);
|
||||
}
|
||||
|
||||
VAHandle::~VAHandle() {
|
||||
if (display) {
|
||||
vaTerminate(display);
|
||||
}
|
||||
if (file >= 0) {
|
||||
close(file);
|
||||
}
|
||||
}
|
||||
|
||||
bool VAHandle::initDeviceSession(MFXVideoSession &session) {
|
||||
int majorVer = 0, minorVer = 0;
|
||||
VAStatus va_res = vaInitialize(display, &majorVer, &minorVer);
|
||||
DBG(cout << "vaInitialize: " << va_res << endl << majorVer << '.' << minorVer << endl);
|
||||
if (va_res == VA_STATUS_SUCCESS) {
|
||||
mfxStatus mfx_res = session.SetHandle(static_cast<mfxHandleType>(MFX_HANDLE_VA_DISPLAY), display);
|
||||
DBG(cout << "MFX SetHandle: " << mfx_res << endl);
|
||||
if (mfx_res == MFX_ERR_NONE) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // __linux__
|
||||
|
||||
DeviceHandler * createDeviceHandler()
|
||||
{
|
||||
#if defined __linux__
|
||||
return new VAHandle();
|
||||
#elif defined _WIN32
|
||||
return new DXHandle();
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
SurfacePool::SurfacePool(ushort width_, ushort height_, ushort count, const mfxFrameInfo &frameInfo, uchar bpp)
|
||||
: width(alignSize(width_, 32)),
|
||||
height(alignSize(height_, 32)),
|
||||
oneSize(width * height * bpp / 8),
|
||||
buffers(count * oneSize),
|
||||
surfaces(count)
|
||||
{
|
||||
for(int i = 0; i < count; ++i)
|
||||
{
|
||||
mfxFrameSurface1 &surface = surfaces[i];
|
||||
uint8_t * dataPtr = buffers.data() + oneSize * i;
|
||||
memset(&surface, 0, sizeof(mfxFrameSurface1));
|
||||
surface.Info = frameInfo;
|
||||
surface.Data.Y = dataPtr;
|
||||
surface.Data.UV = dataPtr + width * height;
|
||||
surface.Data.PitchLow = width & 0xFFFF;
|
||||
surface.Data.PitchHigh = (width >> 16) & 0xFFFF;
|
||||
DBG(cout << "allocate surface " << (void*)&surface << ", Y = " << (void*)dataPtr << " (" << width << "x" << height << ")" << endl);
|
||||
}
|
||||
DBG(cout << "Allocated: " << endl
|
||||
<< "- surface data: " << buffers.size() << " bytes" << endl
|
||||
<< "- surface headers: " << surfaces.size() * sizeof(mfxFrameSurface1) << " bytes" << endl);
|
||||
}
|
||||
|
||||
SurfacePool::~SurfacePool()
|
||||
{
|
||||
}
|
||||
|
||||
SurfacePool * SurfacePool::_create(const mfxFrameAllocRequest &request, const mfxVideoParam ¶ms)
|
||||
{
|
||||
return new SurfacePool(request.Info.Width,
|
||||
request.Info.Height,
|
||||
saturate_cast<ushort>((size_t)request.NumFrameSuggested + getExtraSurfaceNum()),
|
||||
params.mfx.FrameInfo);
|
||||
}
|
||||
|
||||
mfxFrameSurface1 *SurfacePool::getFreeSurface()
|
||||
{
|
||||
const int64 start = cv::getTickCount();
|
||||
do
|
||||
{
|
||||
for(std::vector<mfxFrameSurface1>::iterator i = surfaces.begin(); i != surfaces.end(); ++i)
|
||||
if (!i->Data.Locked)
|
||||
return &(*i);
|
||||
sleep_ms(10);
|
||||
}
|
||||
while((cv::getTickCount() - start) / cv::getTickFrequency() < getPoolTimeoutSec()); // seconds
|
||||
DBG(cout << "No free surface!" << std::endl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
ReadBitstream::ReadBitstream(const char *filename, size_t maxSize) : drain(false)
|
||||
{
|
||||
input.open(filename, std::ios::in | std::ios::binary);
|
||||
DBG(cout << "Open " << filename << " -> " << input.is_open() << std::endl);
|
||||
memset(&stream, 0, sizeof(stream));
|
||||
stream.MaxLength = (mfxU32)maxSize;
|
||||
stream.Data = new mfxU8[stream.MaxLength];
|
||||
CV_Assert(stream.Data);
|
||||
}
|
||||
|
||||
ReadBitstream::~ReadBitstream()
|
||||
{
|
||||
delete[] stream.Data;
|
||||
}
|
||||
|
||||
bool ReadBitstream::isOpened() const
|
||||
{
|
||||
return input.is_open();
|
||||
}
|
||||
|
||||
bool ReadBitstream::isDone() const
|
||||
{
|
||||
return input.eof();
|
||||
}
|
||||
|
||||
bool ReadBitstream::read()
|
||||
{
|
||||
memmove(stream.Data, stream.Data + stream.DataOffset, stream.DataLength);
|
||||
stream.DataOffset = 0;
|
||||
input.read((char*)(stream.Data + stream.DataLength), stream.MaxLength - stream.DataLength);
|
||||
if (input.eof() || input.good())
|
||||
{
|
||||
mfxU32 bytesRead = (mfxU32)input.gcount();
|
||||
if (bytesRead > 0)
|
||||
{
|
||||
stream.DataLength += bytesRead;
|
||||
DBG(cout << "read " << bytesRead << " bytes" << endl);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
WriteBitstream::WriteBitstream(const char * filename, size_t maxSize)
|
||||
{
|
||||
output.open(filename, std::ios::out | std::ios::binary);
|
||||
DBG(cout << "BS Open " << filename << " -> " << output.is_open() << std::endl);
|
||||
memset(&stream, 0, sizeof(stream));
|
||||
stream.MaxLength = (mfxU32)maxSize;
|
||||
stream.Data = new mfxU8[stream.MaxLength];
|
||||
DBG(cout << "BS Allocate " << maxSize << " bytes (" << ((float)maxSize / (1 << 20)) << " Mb)" << endl);
|
||||
CV_Assert(stream.Data);
|
||||
}
|
||||
|
||||
WriteBitstream::~WriteBitstream()
|
||||
{
|
||||
delete[] stream.Data;
|
||||
}
|
||||
|
||||
bool WriteBitstream::write()
|
||||
{
|
||||
output.write((char*)(stream.Data + stream.DataOffset), stream.DataLength);
|
||||
stream.DataLength = 0;
|
||||
return output.good();
|
||||
}
|
||||
|
||||
bool WriteBitstream::isOpened() const
|
||||
{
|
||||
return output.is_open();
|
||||
}
|
||||
370
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_common.hpp
vendored
Normal file
370
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_common.hpp
vendored
Normal file
@@ -0,0 +1,370 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef MFXHELPER_H
|
||||
#define MFXHELPER_H
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/core/utils/configuration.private.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
# include <vpl/mfxcommon.h>
|
||||
# include <vpl/mfxstructures.h>
|
||||
# include <vpl/mfxvideo++.h>
|
||||
# include <vpl/mfxvp8.h>
|
||||
# include <vpl/mfxjpeg.h>
|
||||
#else
|
||||
# include <mfxcommon.h>
|
||||
# include <mfxstructures.h>
|
||||
# include <mfxvideo++.h>
|
||||
# include <mfxvp8.h>
|
||||
# include <mfxjpeg.h>
|
||||
# ifdef HAVE_MFX_PLUGIN
|
||||
# include <mfxplugin++.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// //
|
||||
// Debug helpers //
|
||||
// //
|
||||
|
||||
#if 0
|
||||
# define DBG(i) i
|
||||
#else
|
||||
# define DBG(i)
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
# define MSG(i) i
|
||||
#else
|
||||
# define MSG(i)
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
struct HexWrap {
|
||||
HexWrap(T val_) : val(val_) {}
|
||||
T val;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline std::ostream & operator<<(std::ostream &out, const HexWrap<T> &wrap) {
|
||||
std::ios_base::fmtflags flags = out.flags(std::ios::hex | std::ios::showbase);
|
||||
out << wrap.val;
|
||||
out.flags(flags);
|
||||
return out;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ::HexWrap<T> asHex(const T & val) {
|
||||
return ::HexWrap<T>(val);
|
||||
}
|
||||
|
||||
struct FourCC
|
||||
{
|
||||
FourCC(uint val) : val32(val) {}
|
||||
FourCC(char a, char b, char c, char d) { val8[0] = a; val8[1] = b; val8[2] = c; val8[3] = d; }
|
||||
union {
|
||||
uint val32;
|
||||
int vali32;
|
||||
uchar val8[4];
|
||||
};
|
||||
};
|
||||
|
||||
inline std::ostream & operator<<(std::ostream &out, FourCC cc) {
|
||||
for (size_t i = 0; i < 4; out << cc.val8[i++]) {}
|
||||
out << " (" << asHex(cc.val32) << ")";
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::string mfxStatusToString(mfxStatus s) {
|
||||
switch (s)
|
||||
{
|
||||
case MFX_ERR_NONE: return "MFX_ERR_NONE";
|
||||
case MFX_ERR_UNKNOWN: return "MFX_ERR_UNKNOWN";
|
||||
case MFX_ERR_NULL_PTR: return "MFX_ERR_NULL_PTR";
|
||||
case MFX_ERR_UNSUPPORTED: return "MFX_ERR_UNSUPPORTED";
|
||||
case MFX_ERR_MEMORY_ALLOC: return "MFX_ERR_MEMORY_ALLOC";
|
||||
case MFX_ERR_NOT_ENOUGH_BUFFER: return "MFX_ERR_NOT_ENOUGH_BUFFER";
|
||||
case MFX_ERR_INVALID_HANDLE: return "MFX_ERR_INVALID_HANDLE";
|
||||
case MFX_ERR_LOCK_MEMORY: return "MFX_ERR_LOCK_MEMORY";
|
||||
case MFX_ERR_NOT_INITIALIZED: return "MFX_ERR_NOT_INITIALIZED";
|
||||
case MFX_ERR_NOT_FOUND: return "MFX_ERR_NOT_FOUND";
|
||||
case MFX_ERR_MORE_DATA: return "MFX_ERR_MORE_DATA";
|
||||
case MFX_ERR_MORE_SURFACE: return "MFX_ERR_MORE_SURFACE";
|
||||
case MFX_ERR_ABORTED: return "MFX_ERR_ABORTED";
|
||||
case MFX_ERR_DEVICE_LOST: return "MFX_ERR_DEVICE_LOST";
|
||||
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM: return "MFX_ERR_INCOMPATIBLE_VIDEO_PARAM";
|
||||
case MFX_ERR_INVALID_VIDEO_PARAM: return "MFX_ERR_INVALID_VIDEO_PARAM";
|
||||
case MFX_ERR_UNDEFINED_BEHAVIOR: return "MFX_ERR_UNDEFINED_BEHAVIOR";
|
||||
case MFX_ERR_DEVICE_FAILED: return "MFX_ERR_DEVICE_FAILED";
|
||||
case MFX_ERR_MORE_BITSTREAM: return "MFX_ERR_MORE_BITSTREAM";
|
||||
case MFX_ERR_GPU_HANG: return "MFX_ERR_GPU_HANG";
|
||||
case MFX_ERR_REALLOC_SURFACE: return "MFX_ERR_REALLOC_SURFACE";
|
||||
case MFX_WRN_IN_EXECUTION: return "MFX_WRN_IN_EXECUTION";
|
||||
case MFX_WRN_DEVICE_BUSY: return "MFX_WRN_DEVICE_BUSY";
|
||||
case MFX_WRN_VIDEO_PARAM_CHANGED: return "MFX_WRN_VIDEO_PARAM_CHANGED";
|
||||
case MFX_WRN_PARTIAL_ACCELERATION: return "MFX_WRN_PARTIAL_ACCELERATION";
|
||||
case MFX_WRN_INCOMPATIBLE_VIDEO_PARAM: return "MFX_WRN_INCOMPATIBLE_VIDEO_PARAM";
|
||||
case MFX_WRN_VALUE_NOT_CHANGED: return "MFX_WRN_VALUE_NOT_CHANGED";
|
||||
case MFX_WRN_OUT_OF_RANGE: return "MFX_WRN_OUT_OF_RANGE";
|
||||
case MFX_WRN_FILTER_SKIPPED: return "MFX_WRN_FILTER_SKIPPED";
|
||||
default: return "<Invalid or unknown mfxStatus>";
|
||||
}
|
||||
}
|
||||
|
||||
inline std::ostream & operator<<(std::ostream &out, mfxStatus s) {
|
||||
out << mfxStatusToString(s) << " (" << (int)s << ")"; return out;
|
||||
}
|
||||
|
||||
inline std::ostream & operator<<(std::ostream &out, const mfxInfoMFX &info) {
|
||||
out << "InfoMFX:" << std::endl
|
||||
<< "| Codec: " << FourCC(info.CodecId) << " / " << info.CodecProfile << " / " << info.CodecLevel << std::endl
|
||||
<< "| DecodedOrder: " << info.DecodedOrder << std::endl
|
||||
<< "| TimeStampCalc: " << info.TimeStampCalc << std::endl
|
||||
;
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::ostream & operator<<(std::ostream & out, const mfxFrameInfo & info) {
|
||||
out << "FrameInfo: " << std::endl
|
||||
<< "| FourCC: " << FourCC(info.FourCC) << std::endl
|
||||
<< "| Size: " << info.Width << "x" << info.Height << std::endl
|
||||
<< "| ROI: " << "(" << info.CropX << ";" << info.CropY << ") " << info.CropW << "x" << info.CropH << std::endl
|
||||
<< "| BitDepth(L/C): " << info.BitDepthLuma << " / " << info.BitDepthChroma << std::endl
|
||||
<< "| Shift: " << info.Shift << std::endl
|
||||
<< "| TemporalID: " << info.FrameId.TemporalId << std::endl
|
||||
<< "| FrameRate: " << info.FrameRateExtN << "/" << info.FrameRateExtD << std::endl
|
||||
<< "| AspectRatio: " << info.AspectRatioW << "x" << info.AspectRatioH << std::endl
|
||||
<< "| PicStruct: " << info.PicStruct << std::endl
|
||||
<< "| ChromaFormat: " << info.ChromaFormat << std::endl
|
||||
;
|
||||
return out;
|
||||
}
|
||||
|
||||
inline std::ostream & operator<<(std::ostream &out, const mfxFrameData &data) {
|
||||
out << "FrameData:" << std::endl
|
||||
<< "| NumExtParam: " << data.NumExtParam << std::endl
|
||||
<< "| MemType: " << data.MemType << std::endl
|
||||
<< "| PitchHigh: " << data.PitchHigh << std::endl
|
||||
<< "| TimeStamp: " << data.TimeStamp << std::endl
|
||||
<< "| FrameOrder: " << data.FrameOrder << std::endl
|
||||
<< "| Locked: " << data.Locked << std::endl
|
||||
<< "| Pitch: " << data.PitchHigh << ", " << data.PitchLow << std::endl
|
||||
<< "| Y: " << (void*)data.Y << std::endl
|
||||
<< "| U: " << (void*)data.U << std::endl
|
||||
<< "| V: " << (void*)data.V << std::endl
|
||||
;
|
||||
return out;
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
template <typename T>
|
||||
inline void cleanup(T * &ptr)
|
||||
{
|
||||
if (ptr)
|
||||
{
|
||||
delete ptr;
|
||||
ptr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class Plugin
|
||||
{
|
||||
public:
|
||||
static Plugin * loadEncoderPlugin(MFXVideoSession &session, mfxU32 codecId)
|
||||
{
|
||||
#ifdef HAVE_MFX_PLUGIN
|
||||
static const mfxPluginUID hevc_enc_uid = { 0x6f, 0xad, 0xc7, 0x91, 0xa0, 0xc2, 0xeb, 0x47, 0x9a, 0xb6, 0xdc, 0xd5, 0xea, 0x9d, 0xa3, 0x47 };
|
||||
if (codecId == MFX_CODEC_HEVC)
|
||||
return new Plugin(session, hevc_enc_uid);
|
||||
#else
|
||||
CV_UNUSED(session); CV_UNUSED(codecId);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
static Plugin * loadDecoderPlugin(MFXVideoSession &session, mfxU32 codecId)
|
||||
{
|
||||
#ifdef HAVE_MFX_PLUGIN
|
||||
static const mfxPluginUID hevc_dec_uid = { 0x33, 0xa6, 0x1c, 0x0b, 0x4c, 0x27, 0x45, 0x4c, 0xa8, 0xd8, 0x5d, 0xde, 0x75, 0x7c, 0x6f, 0x8e };
|
||||
if (codecId == MFX_CODEC_HEVC)
|
||||
return new Plugin(session, hevc_dec_uid);
|
||||
#else
|
||||
CV_UNUSED(session); CV_UNUSED(codecId);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
~Plugin()
|
||||
{
|
||||
#ifdef HAVE_MFX_PLUGIN
|
||||
if (isGood())
|
||||
MFXVideoUSER_UnLoad(session, &uid);
|
||||
#endif
|
||||
}
|
||||
bool isGood() const { return res >= MFX_ERR_NONE; }
|
||||
private:
|
||||
mfxStatus res;
|
||||
private:
|
||||
#ifdef HAVE_MFX_PLUGIN
|
||||
MFXVideoSession &session;
|
||||
mfxPluginUID uid;
|
||||
Plugin(MFXVideoSession &_session, mfxPluginUID _uid) : session(_session), uid(_uid)
|
||||
{
|
||||
res = MFXVideoUSER_Load(session, &uid, 1);
|
||||
}
|
||||
#endif
|
||||
Plugin(const Plugin &);
|
||||
Plugin &operator=(const Plugin &);
|
||||
};
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class ReadBitstream
|
||||
{
|
||||
public:
|
||||
ReadBitstream(const char * filename, size_t maxSize = 10 * 1024 * 1024);
|
||||
~ReadBitstream();
|
||||
bool isOpened() const;
|
||||
bool isDone() const;
|
||||
bool read();
|
||||
private:
|
||||
ReadBitstream(const ReadBitstream &);
|
||||
ReadBitstream &operator=(const ReadBitstream &);
|
||||
public:
|
||||
std::fstream input;
|
||||
mfxBitstream stream;
|
||||
bool drain;
|
||||
};
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class WriteBitstream
|
||||
{
|
||||
public:
|
||||
WriteBitstream(const char * filename, size_t maxSize);
|
||||
~WriteBitstream();
|
||||
bool write();
|
||||
bool isOpened() const;
|
||||
private:
|
||||
WriteBitstream(const WriteBitstream &);
|
||||
WriteBitstream &operator=(const WriteBitstream &);
|
||||
public:
|
||||
std::fstream output;
|
||||
mfxBitstream stream;
|
||||
};
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class SurfacePool
|
||||
{
|
||||
public:
|
||||
SurfacePool(ushort width_, ushort height_, ushort count, const mfxFrameInfo & frameInfo, uchar bpp = 12);
|
||||
~SurfacePool();
|
||||
mfxFrameSurface1 *getFreeSurface();
|
||||
|
||||
template <typename T>
|
||||
static SurfacePool * create(T * instance, mfxVideoParam ¶ms)
|
||||
{
|
||||
CV_Assert(instance);
|
||||
mfxFrameAllocRequest request;
|
||||
memset(&request, 0, sizeof(request));
|
||||
mfxStatus res = instance->QueryIOSurf(¶ms, &request);
|
||||
DBG(std::cout << "MFX QueryIOSurf: " << res << std::endl);
|
||||
if (res < MFX_ERR_NONE)
|
||||
return 0;
|
||||
return _create(request, params);
|
||||
}
|
||||
private:
|
||||
static SurfacePool* _create(const mfxFrameAllocRequest& request, const mfxVideoParam& params);
|
||||
private:
|
||||
SurfacePool(const SurfacePool &);
|
||||
SurfacePool &operator=(const SurfacePool &);
|
||||
public:
|
||||
size_t width, height;
|
||||
size_t oneSize;
|
||||
cv::AutoBuffer<uchar, 0> buffers;
|
||||
std::vector<mfxFrameSurface1> surfaces;
|
||||
};
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
class DeviceHandler {
|
||||
public:
|
||||
virtual ~DeviceHandler() {}
|
||||
bool init(MFXVideoSession &session);
|
||||
protected:
|
||||
virtual bool initDeviceSession(MFXVideoSession &session) = 0;
|
||||
};
|
||||
|
||||
|
||||
// TODO: move to core::util?
|
||||
#ifdef CV_CXX11
|
||||
#include <thread>
|
||||
static void sleep_ms(int64 ms)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
|
||||
}
|
||||
#elif defined(__linux__)
|
||||
#include <time.h>
|
||||
static void sleep_ms(int64 ms)
|
||||
{
|
||||
nanosleep(ms * 1000 * 1000);
|
||||
}
|
||||
#elif defined _WIN32
|
||||
static void sleep_ms(int64 ms)
|
||||
{
|
||||
Sleep(ms);
|
||||
}
|
||||
#else
|
||||
#error "Can not detect sleep_ms() implementation"
|
||||
#endif
|
||||
|
||||
|
||||
// Linux specific
|
||||
#ifdef __linux__
|
||||
|
||||
#include <unistd.h>
|
||||
#include <va/va_drm.h>
|
||||
|
||||
class VAHandle : public DeviceHandler {
|
||||
public:
|
||||
VAHandle();
|
||||
~VAHandle();
|
||||
private:
|
||||
VAHandle(const VAHandle &);
|
||||
VAHandle &operator=(const VAHandle &);
|
||||
bool initDeviceSession(MFXVideoSession &session) CV_OVERRIDE;
|
||||
private:
|
||||
VADisplay display;
|
||||
int file;
|
||||
};
|
||||
|
||||
#endif // __linux__
|
||||
|
||||
// Windows specific
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
class DXHandle : public DeviceHandler {
|
||||
public:
|
||||
DXHandle() {}
|
||||
~DXHandle() {}
|
||||
private:
|
||||
DXHandle(const DXHandle &);
|
||||
DXHandle &operator=(const DXHandle &);
|
||||
bool initDeviceSession(MFXVideoSession &) CV_OVERRIDE { return true; }
|
||||
};
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
DeviceHandler * createDeviceHandler();
|
||||
|
||||
#endif // MFXHELPER_H
|
||||
263
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_plugin.cpp
vendored
Normal file
263
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_plugin.cpp
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#if defined(BUILD_PLUGIN)
|
||||
|
||||
#include <string>
|
||||
#include "cap_mfx_reader.hpp"
|
||||
#include "cap_mfx_writer.hpp"
|
||||
|
||||
#define ABI_VERSION 0
|
||||
#define API_VERSION 0
|
||||
#include "plugin_api.hpp"
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace cv {
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_open(const char* filename, int, CV_OUT CvPluginCapture* handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
*handle = NULL;
|
||||
if (!filename)
|
||||
return CV_ERROR_FAIL;
|
||||
VideoCapture_IntelMFX *cap = 0;
|
||||
try
|
||||
{
|
||||
if (filename)
|
||||
{
|
||||
cap = new VideoCapture_IntelMFX(string(filename));
|
||||
if (cap->isOpened())
|
||||
{
|
||||
*handle = (CvPluginCapture)cap;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
}
|
||||
if (cap)
|
||||
delete cap;
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
|
||||
delete instance;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
if (!val)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
|
||||
*val = instance->getProperty(prop);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
|
||||
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
|
||||
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
|
||||
Mat img;
|
||||
if (instance->retrieveFrame(stream_idx, img))
|
||||
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_open(const char* filename, int fourcc, double fps, int width, int height, int isColor,
|
||||
CV_OUT CvPluginWriter* handle)
|
||||
{
|
||||
VideoWriter_IntelMFX* wrt = 0;
|
||||
try
|
||||
{
|
||||
wrt = new VideoWriter_IntelMFX(filename, fourcc, fps, Size(width, height), isColor);
|
||||
if(wrt->isOpened())
|
||||
{
|
||||
*handle = (CvPluginWriter)wrt;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
}
|
||||
if (wrt)
|
||||
delete wrt;
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_release(CvPluginWriter handle)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
VideoWriter_IntelMFX* instance = (VideoWriter_IntelMFX*)handle;
|
||||
delete instance;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
static
|
||||
CvResult CV_API_CALL cv_writer_write(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn)
|
||||
{
|
||||
if (!handle)
|
||||
return CV_ERROR_FAIL;
|
||||
try
|
||||
{
|
||||
VideoWriter_IntelMFX* instance = (VideoWriter_IntelMFX*)handle;
|
||||
Mat img(Size(width, height), CV_MAKETYPE(CV_8U, cn), const_cast<uchar*>(data), step);
|
||||
instance->write(img);
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
static const OpenCV_VideoIO_Plugin_API_preview plugin_api =
|
||||
{
|
||||
{
|
||||
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
|
||||
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
|
||||
"MediaSDK OpenCV Video I/O plugin"
|
||||
},
|
||||
{
|
||||
/* 1*/CAP_INTEL_MFX,
|
||||
/* 2*/cv_capture_open,
|
||||
/* 3*/cv_capture_release,
|
||||
/* 4*/cv_capture_get_prop,
|
||||
/* 5*/cv_capture_set_prop,
|
||||
/* 6*/cv_capture_grab,
|
||||
/* 7*/cv_capture_retrieve,
|
||||
/* 8*/cv_writer_open,
|
||||
/* 9*/cv_writer_release,
|
||||
/* 10*/cv_writer_get_prop,
|
||||
/* 11*/cv_writer_set_prop,
|
||||
/* 12*/cv_writer_write
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
|
||||
{
|
||||
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
|
||||
return &cv::plugin_api;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif // BUILD_PLUGIN
|
||||
286
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_reader.cpp
vendored
Normal file
286
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_reader.cpp
vendored
Normal file
@@ -0,0 +1,286 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "cap_mfx_reader.hpp"
|
||||
#include "opencv2/core/base.hpp"
|
||||
#include "cap_mfx_common.hpp"
|
||||
#include "opencv2/imgproc/hal/hal.hpp"
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
inline bool hasExtension(const String &filename, const String &ext)
|
||||
{
|
||||
if (filename.size() <= ext.size())
|
||||
return false;
|
||||
const size_t diff = filename.size() - ext.size();
|
||||
const size_t found_at = filename.rfind(ext);
|
||||
return found_at == diff;
|
||||
}
|
||||
|
||||
inline mfxU32 determineCodecId(const String &filename)
|
||||
{
|
||||
if (hasExtension(filename, ".h264") || hasExtension(filename, ".264"))
|
||||
return MFX_CODEC_AVC;
|
||||
else if (hasExtension(filename, ".mp2") || hasExtension(filename, ".mpeg2"))
|
||||
return MFX_CODEC_MPEG2;
|
||||
else if (hasExtension(filename, ".265") || hasExtension(filename, ".hevc"))
|
||||
return MFX_CODEC_HEVC;
|
||||
else
|
||||
return (mfxU32)-1;
|
||||
}
|
||||
|
||||
//==========================================================================
|
||||
|
||||
VideoCapture_IntelMFX::VideoCapture_IntelMFX(const cv::String &filename)
|
||||
: session(0), plugin(0), deviceHandler(0), bs(0), decoder(0), pool(0), outSurface(0), good(false)
|
||||
{
|
||||
mfxStatus res = MFX_ERR_NONE;
|
||||
|
||||
// Init device and session
|
||||
deviceHandler = createDeviceHandler();
|
||||
session = new MFXVideoSession();
|
||||
if (!deviceHandler->init(*session))
|
||||
{
|
||||
MSG(cerr << "MFX: Can't initialize session" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Load appropriate plugin
|
||||
|
||||
mfxU32 codecId = determineCodecId(filename);
|
||||
if (codecId == (mfxU32)-1)
|
||||
{
|
||||
MSG(cerr << "MFX: Unsupported extension: " << filename << endl);
|
||||
return;
|
||||
}
|
||||
plugin = Plugin::loadDecoderPlugin(*session, codecId);
|
||||
if (plugin && !plugin->isGood())
|
||||
{
|
||||
MSG(cerr << "MFX: LoadPlugin failed for codec: " << codecId << " (" << filename << ")" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Read some content from file
|
||||
|
||||
bs = new ReadBitstream(filename.c_str());
|
||||
if (!bs->read())
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to read bitstream" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Create decoder and decode stream header
|
||||
|
||||
decoder = new MFXVideoDECODE(*session);
|
||||
mfxVideoParam params;
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.mfx.CodecId = codecId;
|
||||
params.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
|
||||
res = decoder->DecodeHeader(&bs->stream, ¶ms);
|
||||
DBG(cout << "DecodeHeader: " << res << endl << params.mfx << params.mfx.FrameInfo << endl);
|
||||
if (res < MFX_ERR_NONE)
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to decode stream header: " << res << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Adjust parameters
|
||||
|
||||
res = decoder->Query(¶ms, ¶ms);
|
||||
DBG(cout << "MFX Query: " << res << endl << params.mfx << params.mfx.FrameInfo);
|
||||
CV_Assert(res >= MFX_ERR_NONE);
|
||||
|
||||
// Init surface pool
|
||||
|
||||
pool = SurfacePool::create(decoder, params);
|
||||
if (!pool)
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to create surface pool" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Init decoder
|
||||
|
||||
res = decoder->Init(¶ms);
|
||||
DBG(cout << "MFX Init: " << res << endl << params.mfx.FrameInfo);
|
||||
if (res < MFX_ERR_NONE)
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to init decoder: " << res << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
frameSize = Size(params.mfx.FrameInfo.CropW, params.mfx.FrameInfo.CropH);
|
||||
good = true;
|
||||
}
|
||||
|
||||
|
||||
VideoCapture_IntelMFX::~VideoCapture_IntelMFX()
|
||||
{
|
||||
cleanup(plugin);
|
||||
cleanup(bs);
|
||||
cleanup(decoder);
|
||||
cleanup(pool);
|
||||
session->Close();
|
||||
cleanup(session);
|
||||
cleanup(deviceHandler);
|
||||
}
|
||||
|
||||
double VideoCapture_IntelMFX::getProperty(int prop) const
|
||||
{
|
||||
if (!good)
|
||||
{
|
||||
MSG(cerr << "MFX: can not call getProperty(), backend has not been initialized" << endl);
|
||||
return 0;
|
||||
}
|
||||
switch (prop)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
return frameSize.width;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
return frameSize.height;
|
||||
default:
|
||||
MSG(cerr << "MFX: unsupported property" << endl);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool VideoCapture_IntelMFX::setProperty(int, double)
|
||||
{
|
||||
MSG(cerr << "MFX: setProperty() is not implemented" << endl);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VideoCapture_IntelMFX::grabFrame()
|
||||
{
|
||||
mfxStatus res;
|
||||
mfxFrameSurface1 *workSurface = 0;
|
||||
mfxSyncPoint sync;
|
||||
|
||||
workSurface = pool->getFreeSurface();
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (!workSurface)
|
||||
{
|
||||
// not enough surfaces
|
||||
MSG(cerr << "MFX: Failed to get free surface" << endl);
|
||||
return false;
|
||||
}
|
||||
|
||||
outSurface = 0;
|
||||
res = decoder->DecodeFrameAsync(bs->drain ? 0 : &bs->stream, workSurface, (mfxFrameSurface1**)&outSurface, &sync);
|
||||
if (res == MFX_ERR_NONE)
|
||||
{
|
||||
res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout
|
||||
if (res == MFX_ERR_NONE)
|
||||
{
|
||||
// ready to retrieve
|
||||
DBG(cout << "Frame ready to retrieve" << endl);
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
MSG(cerr << "MFX: Sync error: " << res << endl);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (res == MFX_ERR_MORE_DATA)
|
||||
{
|
||||
if (bs->isDone())
|
||||
{
|
||||
if (bs->drain)
|
||||
{
|
||||
// finish
|
||||
DBG(cout << "Drain finished" << endl);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBG(cout << "Bitstream finished - Drain started" << endl);
|
||||
bs->drain = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
bool read_res = bs->read();
|
||||
if (!read_res)
|
||||
{
|
||||
// failed to read
|
||||
MSG(cerr << "MFX: Bitstream read failure" << endl);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBG(cout << "Bitstream read success" << endl);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (res == MFX_ERR_MORE_SURFACE)
|
||||
{
|
||||
DBG(cout << "Getting another surface" << endl);
|
||||
workSurface = pool->getFreeSurface();
|
||||
continue;
|
||||
}
|
||||
else if (res == MFX_WRN_DEVICE_BUSY)
|
||||
{
|
||||
DBG(cout << "Waiting for device" << endl);
|
||||
sleep_ms(1000);
|
||||
continue;
|
||||
}
|
||||
else if (res == MFX_WRN_VIDEO_PARAM_CHANGED)
|
||||
{
|
||||
DBG(cout << "Video param changed" << endl);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
MSG(cerr << "MFX: Bad status: " << res << endl);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture_IntelMFX::retrieveFrame(int, OutputArray out)
|
||||
{
|
||||
if (!outSurface)
|
||||
{
|
||||
MSG(cerr << "MFX: No frame ready to retrieve" << endl);
|
||||
return false;
|
||||
}
|
||||
mfxFrameSurface1 * s = (mfxFrameSurface1*)outSurface;
|
||||
mfxFrameInfo &info = s->Info;
|
||||
mfxFrameData &data = s->Data;
|
||||
|
||||
const int cols = info.CropW;
|
||||
const int rows = info.CropH;
|
||||
|
||||
out.create(rows, cols, CV_8UC3);
|
||||
Mat res = out.getMat();
|
||||
|
||||
hal::cvtTwoPlaneYUVtoBGR(data.Y, data.UV, data.Pitch, res.data, res.step, cols, rows, 3, false, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VideoCapture_IntelMFX::isOpened() const
|
||||
{
|
||||
return good;
|
||||
}
|
||||
|
||||
int VideoCapture_IntelMFX::getCaptureDomain()
|
||||
{
|
||||
return CAP_INTEL_MFX;
|
||||
}
|
||||
|
||||
//==================================================================================================
|
||||
|
||||
cv::Ptr<IVideoCapture> cv::create_MFX_capture(const std::string &filename)
|
||||
{
|
||||
return cv::makePtr<VideoCapture_IntelMFX>(filename);
|
||||
}
|
||||
42
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_reader.hpp
vendored
Normal file
42
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_reader.hpp
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef CAP_MFX_HPP
|
||||
#define CAP_MFX_HPP
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
|
||||
class MFXVideoSession;
|
||||
class Plugin;
|
||||
class DeviceHandler;
|
||||
class ReadBitstream;
|
||||
class SurfacePool;
|
||||
class MFXVideoDECODE;
|
||||
|
||||
class VideoCapture_IntelMFX : public cv::IVideoCapture
|
||||
{
|
||||
public:
|
||||
VideoCapture_IntelMFX(const cv::String &filename);
|
||||
~VideoCapture_IntelMFX();
|
||||
double getProperty(int) const CV_OVERRIDE;
|
||||
bool setProperty(int, double) CV_OVERRIDE;
|
||||
bool grabFrame() CV_OVERRIDE;
|
||||
bool retrieveFrame(int, cv::OutputArray out) CV_OVERRIDE;
|
||||
bool isOpened() const CV_OVERRIDE;
|
||||
int getCaptureDomain() CV_OVERRIDE;
|
||||
private:
|
||||
MFXVideoSession *session;
|
||||
Plugin *plugin;
|
||||
DeviceHandler *deviceHandler;
|
||||
ReadBitstream *bs;
|
||||
MFXVideoDECODE *decoder;
|
||||
SurfacePool *pool;
|
||||
void *outSurface;
|
||||
cv::Size frameSize;
|
||||
bool good;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
277
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_writer.cpp
vendored
Normal file
277
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_writer.cpp
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#include "cap_mfx_writer.hpp"
|
||||
#include "opencv2/core/base.hpp"
|
||||
#include "cap_mfx_common.hpp"
|
||||
#include "opencv2/imgproc/hal/hal.hpp"
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static size_t getBitrateDivisor()
|
||||
{
|
||||
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_BITRATE_DIVISOR", 300);
|
||||
return res;
|
||||
}
|
||||
|
||||
static mfxU32 getWriterTimeoutMS()
|
||||
{
|
||||
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_WRITER_TIMEOUT", 1);
|
||||
return saturate_cast<mfxU32>(res * 1000); // convert from seconds
|
||||
}
|
||||
|
||||
inline mfxU32 codecIdByFourCC(int fourcc)
|
||||
{
|
||||
const int CC_MPG2 = FourCC('M', 'P', 'G', '2').vali32;
|
||||
const int CC_H264 = FourCC('H', '2', '6', '4').vali32;
|
||||
const int CC_X264 = FourCC('X', '2', '6', '4').vali32;
|
||||
const int CC_AVC = FourCC('A', 'V', 'C', ' ').vali32;
|
||||
const int CC_H265 = FourCC('H', '2', '6', '5').vali32;
|
||||
const int CC_HEVC = FourCC('H', 'E', 'V', 'C').vali32;
|
||||
|
||||
if (fourcc == CC_X264 || fourcc == CC_H264 || fourcc == CC_AVC)
|
||||
return MFX_CODEC_AVC;
|
||||
else if (fourcc == CC_H265 || fourcc == CC_HEVC)
|
||||
return MFX_CODEC_HEVC;
|
||||
else if (fourcc == CC_MPG2)
|
||||
return MFX_CODEC_MPEG2;
|
||||
else
|
||||
return (mfxU32)-1;
|
||||
}
|
||||
|
||||
VideoWriter_IntelMFX::VideoWriter_IntelMFX(const String &filename, int _fourcc, double fps, Size frameSize_, bool)
|
||||
: session(0), plugin(0), deviceHandler(0), bs(0), encoder(0), pool(0), outSurface(NULL), frameSize(frameSize_), good(false)
|
||||
{
|
||||
mfxStatus res = MFX_ERR_NONE;
|
||||
|
||||
if (frameSize.width % 2 || frameSize.height % 2)
|
||||
{
|
||||
MSG(cerr << "MFX: Invalid frame size passed to encoder" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
if (fps <= 0)
|
||||
{
|
||||
MSG(cerr << "MFX: Invalid FPS passed to encoder" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Init device and session
|
||||
deviceHandler = createDeviceHandler();
|
||||
session = new MFXVideoSession();
|
||||
if (!deviceHandler->init(*session))
|
||||
{
|
||||
MSG(cerr << "MFX: Can't initialize session" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Load appropriate plugin
|
||||
|
||||
mfxU32 codecId = codecIdByFourCC(_fourcc);
|
||||
if (codecId == (mfxU32)-1)
|
||||
{
|
||||
MSG(cerr << "MFX: Unsupported FourCC: " << FourCC(_fourcc) << endl);
|
||||
return;
|
||||
}
|
||||
plugin = Plugin::loadEncoderPlugin(*session, codecId);
|
||||
if (plugin && !plugin->isGood())
|
||||
{
|
||||
MSG(cerr << "MFX: LoadPlugin failed for codec: " << codecId << " (" << FourCC(_fourcc) << ")" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Init encoder
|
||||
|
||||
encoder = new MFXVideoENCODE(*session);
|
||||
mfxVideoParam params;
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.mfx.CodecId = codecId;
|
||||
params.mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED;
|
||||
params.mfx.TargetKbps = saturate_cast<mfxU16>((frameSize.area() * fps) / (42.6666 * getBitrateDivisor())); // TODO: set in options
|
||||
params.mfx.RateControlMethod = MFX_RATECONTROL_VBR;
|
||||
params.mfx.FrameInfo.FrameRateExtN = cvRound(fps * 1000);
|
||||
params.mfx.FrameInfo.FrameRateExtD = 1000;
|
||||
params.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
|
||||
params.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
|
||||
params.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
|
||||
params.mfx.FrameInfo.CropX = 0;
|
||||
params.mfx.FrameInfo.CropY = 0;
|
||||
params.mfx.FrameInfo.CropW = (mfxU16)frameSize.width;
|
||||
params.mfx.FrameInfo.CropH = (mfxU16)frameSize.height;
|
||||
params.mfx.FrameInfo.Width = (mfxU16)alignSize(frameSize.width, 32);
|
||||
params.mfx.FrameInfo.Height = (mfxU16)alignSize(frameSize.height, 32);
|
||||
params.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
|
||||
res = encoder->Query(¶ms, ¶ms);
|
||||
DBG(cout << "MFX Query: " << res << endl << params.mfx << params.mfx.FrameInfo);
|
||||
if (res < MFX_ERR_NONE)
|
||||
{
|
||||
MSG(cerr << "MFX: Query failed: " << res << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Init surface pool
|
||||
pool = SurfacePool::create(encoder, params);
|
||||
if (!pool)
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to create surface pool" << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Init encoder
|
||||
res = encoder->Init(¶ms);
|
||||
DBG(cout << "MFX Init: " << res << endl << params.mfx.FrameInfo);
|
||||
if (res < MFX_ERR_NONE)
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to init encoder: " << res << endl);
|
||||
return;
|
||||
}
|
||||
|
||||
// Open output bitstream
|
||||
{
|
||||
mfxVideoParam par;
|
||||
memset(&par, 0, sizeof(par));
|
||||
res = encoder->GetVideoParam(&par);
|
||||
DBG(cout << "MFX GetVideoParam: " << res << endl << "requested " << par.mfx.BufferSizeInKB << " kB" << endl);
|
||||
CV_Assert(res >= MFX_ERR_NONE);
|
||||
bs = new WriteBitstream(filename.c_str(), par.mfx.BufferSizeInKB * 1024 * 2);
|
||||
if (!bs->isOpened())
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to open output file: " << filename << endl);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
good = true;
|
||||
}
|
||||
|
||||
VideoWriter_IntelMFX::~VideoWriter_IntelMFX()
|
||||
{
|
||||
if (isOpened())
|
||||
{
|
||||
DBG(cout << "====== Drain bitstream..." << endl);
|
||||
Mat dummy;
|
||||
while (write_one(dummy)) {}
|
||||
DBG(cout << "====== Drain Finished" << endl);
|
||||
}
|
||||
cleanup(bs);
|
||||
cleanup(pool);
|
||||
cleanup(encoder);
|
||||
cleanup(plugin);
|
||||
cleanup(session);
|
||||
cleanup(deviceHandler);
|
||||
}
|
||||
|
||||
double VideoWriter_IntelMFX::getProperty(int) const
|
||||
{
|
||||
MSG(cerr << "MFX: getProperty() is not implemented" << endl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool VideoWriter_IntelMFX::setProperty(int, double)
|
||||
{
|
||||
MSG(cerr << "MFX: setProperty() is not implemented" << endl);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VideoWriter_IntelMFX::isOpened() const
|
||||
{
|
||||
return good;
|
||||
}
|
||||
|
||||
void VideoWriter_IntelMFX::write(cv::InputArray input)
|
||||
{
|
||||
write_one(input);
|
||||
}
|
||||
|
||||
bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr)
|
||||
{
|
||||
mfxStatus res;
|
||||
mfxFrameSurface1 *workSurface = 0;
|
||||
mfxSyncPoint sync;
|
||||
|
||||
if (!bgr.empty() && (bgr.dims() != 2 || bgr.type() != CV_8UC3 || bgr.size() != frameSize))
|
||||
{
|
||||
MSG(cerr << "MFX: invalid frame passed to encoder: "
|
||||
<< "dims/depth/cn=" << bgr.dims() << "/" << bgr.depth() << "/" << bgr.channels()
|
||||
<< ", size=" << bgr.size() << endl);
|
||||
return false;
|
||||
|
||||
}
|
||||
if (!bgr.empty())
|
||||
{
|
||||
workSurface = pool->getFreeSurface();
|
||||
if (!workSurface)
|
||||
{
|
||||
// not enough surfaces
|
||||
MSG(cerr << "MFX: Failed to get free surface" << endl);
|
||||
return false;
|
||||
}
|
||||
Mat src = bgr.getMat();
|
||||
hal::cvtBGRtoTwoPlaneYUV(src.data, src.step,
|
||||
workSurface->Data.Y, workSurface->Data.UV, workSurface->Data.Pitch,
|
||||
workSurface->Info.CropW, workSurface->Info.CropH,
|
||||
3, false, 1);
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
outSurface = 0;
|
||||
DBG(cout << "Calling with surface: " << workSurface << endl);
|
||||
res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync);
|
||||
if (res == MFX_ERR_NONE)
|
||||
{
|
||||
res = session->SyncOperation(sync, getWriterTimeoutMS()); // TODO: provide interface to modify timeout
|
||||
if (res == MFX_ERR_NONE)
|
||||
{
|
||||
// ready to write
|
||||
if (!bs->write())
|
||||
{
|
||||
MSG(cerr << "MFX: Failed to write bitstream" << endl);
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
DBG(cout << "Write bitstream" << endl);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
MSG(cerr << "MFX: Sync error: " << res << endl);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (res == MFX_ERR_MORE_DATA)
|
||||
{
|
||||
DBG(cout << "ERR_MORE_DATA" << endl);
|
||||
return false;
|
||||
}
|
||||
else if (res == MFX_WRN_DEVICE_BUSY)
|
||||
{
|
||||
DBG(cout << "Waiting for device" << endl);
|
||||
sleep_ms(1000);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
MSG(cerr << "MFX: Bad status: " << res << endl);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ptr<IVideoWriter> cv::create_MFX_writer(const std::string& filename, int _fourcc, double fps,
|
||||
const Size& frameSize, const VideoWriterParameters& params)
|
||||
{
|
||||
if (codecIdByFourCC(_fourcc) > 0)
|
||||
{
|
||||
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
|
||||
Ptr<VideoWriter_IntelMFX> a = makePtr<VideoWriter_IntelMFX>(filename, _fourcc, fps, frameSize, isColor);
|
||||
if (a->isOpened())
|
||||
return a;
|
||||
}
|
||||
return Ptr<VideoWriter_IntelMFX>();
|
||||
}
|
||||
47
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_writer.hpp
vendored
Normal file
47
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mfx_writer.hpp
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html
|
||||
|
||||
#ifndef CAP_MFX_WRITER_HPP
|
||||
#define CAP_MFX_WRITER_HPP
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
class MFXVideoSession;
|
||||
class Plugin;
|
||||
class DeviceHandler;
|
||||
class WriteBitstream;
|
||||
class SurfacePool;
|
||||
class MFXVideoDECODE;
|
||||
class MFXVideoENCODE;
|
||||
|
||||
class VideoWriter_IntelMFX : public cv::IVideoWriter
|
||||
{
|
||||
public:
|
||||
VideoWriter_IntelMFX(const cv::String &filename, int _fourcc, double fps, cv::Size frameSize, bool isColor);
|
||||
~VideoWriter_IntelMFX() CV_OVERRIDE;
|
||||
double getProperty(int) const CV_OVERRIDE;
|
||||
bool setProperty(int, double) CV_OVERRIDE;
|
||||
bool isOpened() const CV_OVERRIDE;
|
||||
void write(cv::InputArray input) CV_OVERRIDE;
|
||||
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_INTEL_MFX; }
|
||||
protected:
|
||||
bool write_one(cv::InputArray bgr);
|
||||
|
||||
private:
|
||||
VideoWriter_IntelMFX(const VideoWriter_IntelMFX &);
|
||||
VideoWriter_IntelMFX & operator=(const VideoWriter_IntelMFX &);
|
||||
|
||||
private:
|
||||
MFXVideoSession *session;
|
||||
Plugin *plugin;
|
||||
DeviceHandler *deviceHandler;
|
||||
WriteBitstream *bs;
|
||||
MFXVideoENCODE *encoder;
|
||||
SurfacePool *pool;
|
||||
void *outSurface;
|
||||
cv::Size frameSize;
|
||||
bool good;
|
||||
};
|
||||
|
||||
#endif // CAP_MFX_WRITER_HPP
|
||||
234
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mjpeg_decoder.cpp
vendored
Normal file
234
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mjpeg_decoder.cpp
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/videoio/container_avi.private.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class MotionJpegCapture: public IVideoCapture
|
||||
{
|
||||
public:
|
||||
virtual ~MotionJpegCapture() CV_OVERRIDE;
|
||||
virtual double getProperty(int) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE;
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
|
||||
virtual bool isOpened() const CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE { return CAP_OPENCV_MJPEG; }
|
||||
MotionJpegCapture(const String&);
|
||||
|
||||
bool open(const String&);
|
||||
void close();
|
||||
protected:
|
||||
|
||||
inline uint64_t getFramePos() const;
|
||||
|
||||
Ptr<AVIReadContainer> m_avi_container;
|
||||
bool m_is_first_frame;
|
||||
frame_list m_mjpeg_frames;
|
||||
|
||||
frame_iterator m_frame_iterator;
|
||||
Mat m_current_frame;
|
||||
|
||||
//frame width/height and fps could be different for
|
||||
//each frame/stream. At the moment we suppose that they
|
||||
//stays the same within single avi file.
|
||||
uint32_t m_frame_width;
|
||||
uint32_t m_frame_height;
|
||||
double m_fps;
|
||||
};
|
||||
|
||||
uint64_t MotionJpegCapture::getFramePos() const
|
||||
{
|
||||
if(m_is_first_frame)
|
||||
return 0;
|
||||
|
||||
if(m_frame_iterator == m_mjpeg_frames.end())
|
||||
return m_mjpeg_frames.size();
|
||||
|
||||
return m_frame_iterator - m_mjpeg_frames.begin() + 1;
|
||||
}
|
||||
|
||||
bool MotionJpegCapture::setProperty(int property, double value)
|
||||
{
|
||||
if(property == CAP_PROP_POS_FRAMES)
|
||||
{
|
||||
if(int(value) == 0)
|
||||
{
|
||||
m_is_first_frame = true;
|
||||
m_frame_iterator = m_mjpeg_frames.end();
|
||||
return true;
|
||||
}
|
||||
else if(m_mjpeg_frames.size() > value)
|
||||
{
|
||||
m_frame_iterator = m_mjpeg_frames.begin() + int(value - 1);
|
||||
m_is_first_frame = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
double MotionJpegCapture::getProperty(int property) const
|
||||
{
|
||||
switch(property)
|
||||
{
|
||||
case CAP_PROP_POS_FRAMES:
|
||||
return (double)getFramePos();
|
||||
case CAP_PROP_POS_MSEC:
|
||||
return (double)getFramePos() * (1000. / m_fps);
|
||||
case CAP_PROP_POS_AVI_RATIO:
|
||||
return double(getFramePos())/m_mjpeg_frames.size();
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
return (double)m_frame_width;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
return (double)m_frame_height;
|
||||
case CAP_PROP_FPS:
|
||||
return m_fps;
|
||||
case CAP_PROP_FOURCC:
|
||||
return (double)CV_FOURCC('M','J','P','G');
|
||||
case CAP_PROP_FRAME_COUNT:
|
||||
return (double)m_mjpeg_frames.size();
|
||||
case CAP_PROP_FORMAT:
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool MotionJpegCapture::grabFrame()
|
||||
{
|
||||
if(isOpened())
|
||||
{
|
||||
if(m_is_first_frame)
|
||||
{
|
||||
m_is_first_frame = false;
|
||||
m_frame_iterator = m_mjpeg_frames.begin();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_frame_iterator == m_mjpeg_frames.end())
|
||||
return false;
|
||||
|
||||
++m_frame_iterator;
|
||||
}
|
||||
}
|
||||
|
||||
return m_frame_iterator != m_mjpeg_frames.end();
|
||||
}
|
||||
|
||||
bool MotionJpegCapture::retrieveFrame(int, OutputArray output_frame)
|
||||
{
|
||||
if(m_frame_iterator != m_mjpeg_frames.end())
|
||||
{
|
||||
std::vector<char> data = m_avi_container->readFrame(m_frame_iterator);
|
||||
|
||||
if(data.size())
|
||||
{
|
||||
m_current_frame = imdecode(data, IMREAD_ANYDEPTH | IMREAD_COLOR | IMREAD_IGNORE_ORIENTATION);
|
||||
}
|
||||
|
||||
m_current_frame.copyTo(output_frame);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
MotionJpegCapture::~MotionJpegCapture()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
MotionJpegCapture::MotionJpegCapture(const String& filename)
|
||||
{
|
||||
m_avi_container = makePtr<AVIReadContainer>();
|
||||
m_avi_container->initStream(filename);
|
||||
open(filename);
|
||||
}
|
||||
|
||||
bool MotionJpegCapture::isOpened() const
|
||||
{
|
||||
return m_mjpeg_frames.size() > 0;
|
||||
}
|
||||
|
||||
void MotionJpegCapture::close()
|
||||
{
|
||||
m_avi_container->close();
|
||||
m_frame_iterator = m_mjpeg_frames.end();
|
||||
}
|
||||
|
||||
bool MotionJpegCapture::open(const String& filename)
|
||||
{
|
||||
close();
|
||||
|
||||
m_avi_container = makePtr<AVIReadContainer>();
|
||||
m_avi_container->initStream(filename);
|
||||
|
||||
m_frame_iterator = m_mjpeg_frames.end();
|
||||
m_is_first_frame = true;
|
||||
|
||||
if(!m_avi_container->parseRiff(m_mjpeg_frames))
|
||||
{
|
||||
close();
|
||||
} else
|
||||
{
|
||||
m_frame_width = m_avi_container->getWidth();
|
||||
m_frame_height = m_avi_container->getHeight();
|
||||
m_fps = m_avi_container->getFps();
|
||||
}
|
||||
|
||||
return isOpened();
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> createMotionJpegCapture(const String& filename)
|
||||
{
|
||||
Ptr<MotionJpegCapture> mjdecoder(new MotionJpegCapture(filename));
|
||||
if( mjdecoder->isOpened() )
|
||||
return mjdecoder;
|
||||
return Ptr<MotionJpegCapture>();
|
||||
}
|
||||
|
||||
}
|
||||
1555
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mjpeg_encoder.cpp
vendored
Normal file
1555
3rdparty/opencv-4.5.4/modules/videoio/src/cap_mjpeg_encoder.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2231
3rdparty/opencv-4.5.4/modules/videoio/src/cap_msmf.cpp
vendored
Normal file
2231
3rdparty/opencv-4.5.4/modules/videoio/src/cap_msmf.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
0
3rdparty/opencv-4.5.4/modules/videoio/src/cap_msmf.hpp
vendored
Normal file
0
3rdparty/opencv-4.5.4/modules/videoio/src/cap_msmf.hpp
vendored
Normal file
1133
3rdparty/opencv-4.5.4/modules/videoio/src/cap_openni2.cpp
vendored
Normal file
1133
3rdparty/opencv-4.5.4/modules/videoio/src/cap_openni2.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
613
3rdparty/opencv-4.5.4/modules/videoio/src/cap_pvapi.cpp
vendored
Normal file
613
3rdparty/opencv-4.5.4/modules/videoio/src/cap_pvapi.cpp
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//
|
||||
|
||||
//
|
||||
// The code has been contributed by Justin G. Eskesen on 2010 Jan
|
||||
//
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
#ifdef HAVE_PVAPI
|
||||
#if !defined _WIN32 && !defined _LINUX
|
||||
#define _LINUX
|
||||
#endif
|
||||
|
||||
#if defined(_x64) || defined (__x86_64) || defined (_M_X64)
|
||||
#define _x64 1
|
||||
#elif defined(_x86) || defined(__i386) || defined (_M_IX86)
|
||||
#define _x86 1
|
||||
#endif
|
||||
|
||||
#include <PvApi.h>
|
||||
#ifdef _WIN32
|
||||
# include <io.h>
|
||||
#else
|
||||
# include <time.h>
|
||||
# include <unistd.h>
|
||||
#endif
|
||||
|
||||
//#include <arpa/inet.h>
|
||||
|
||||
#define MAX_CAMERAS 10
|
||||
|
||||
/********************* Capturing video from camera via PvAPI *********************/
|
||||
|
||||
class CvCaptureCAM_PvAPI : public CvCapture
|
||||
{
|
||||
public:
|
||||
CvCaptureCAM_PvAPI();
|
||||
virtual ~CvCaptureCAM_PvAPI()
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
virtual bool open( int index );
|
||||
virtual void close();
|
||||
virtual double getProperty(int) const CV_OVERRIDE;
|
||||
virtual bool setProperty(int, double) CV_OVERRIDE;
|
||||
virtual bool grabFrame() CV_OVERRIDE;
|
||||
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
|
||||
virtual int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return CV_CAP_PVAPI;
|
||||
}
|
||||
|
||||
protected:
|
||||
#ifndef _WIN32
|
||||
virtual void Sleep(unsigned int time);
|
||||
#endif
|
||||
|
||||
void stopCapture();
|
||||
bool startCapture();
|
||||
bool resizeCaptureFrame (int frameWidth, int frameHeight);
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned long UID;
|
||||
tPvHandle Handle;
|
||||
tPvFrame Frame;
|
||||
} tCamera;
|
||||
|
||||
IplImage *frame;
|
||||
tCamera Camera;
|
||||
tPvErr Errcode;
|
||||
};
|
||||
|
||||
|
||||
CvCaptureCAM_PvAPI::CvCaptureCAM_PvAPI()
|
||||
{
|
||||
frame = NULL;
|
||||
memset(&this->Camera, 0, sizeof(this->Camera));
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void CvCaptureCAM_PvAPI::Sleep(unsigned int time)
|
||||
{
|
||||
struct timespec t,r;
|
||||
|
||||
t.tv_sec = time / 1000;
|
||||
t.tv_nsec = (time % 1000) * 1000000;
|
||||
|
||||
while(nanosleep(&t,&r)==-1)
|
||||
t = r;
|
||||
}
|
||||
#endif
|
||||
|
||||
void CvCaptureCAM_PvAPI::close()
|
||||
{
|
||||
// Stop the acquisition & free the camera
|
||||
stopCapture();
|
||||
PvCameraClose(Camera.Handle);
|
||||
PvUnInitialize();
|
||||
}
|
||||
|
||||
// Initialize camera input
|
||||
bool CvCaptureCAM_PvAPI::open( int index )
|
||||
{
|
||||
tPvCameraInfo cameraList[MAX_CAMERAS];
|
||||
|
||||
tPvCameraInfo camInfo;
|
||||
tPvIpSettings ipSettings;
|
||||
|
||||
|
||||
if (PvInitialize()) {
|
||||
}
|
||||
//return false;
|
||||
|
||||
Sleep(1000);
|
||||
|
||||
//close();
|
||||
|
||||
int numCameras=PvCameraList(cameraList, MAX_CAMERAS, NULL);
|
||||
|
||||
if (numCameras <= 0 || index >= numCameras)
|
||||
return false;
|
||||
|
||||
Camera.UID = cameraList[index].UniqueId;
|
||||
|
||||
if (!PvCameraInfo(Camera.UID,&camInfo) && !PvCameraIpSettingsGet(Camera.UID,&ipSettings))
|
||||
{
|
||||
/*
|
||||
struct in_addr addr;
|
||||
addr.s_addr = ipSettings.CurrentIpAddress;
|
||||
printf("Current address:\t%s\n",inet_ntoa(addr));
|
||||
addr.s_addr = ipSettings.CurrentIpSubnet;
|
||||
printf("Current subnet:\t\t%s\n",inet_ntoa(addr));
|
||||
addr.s_addr = ipSettings.CurrentIpGateway;
|
||||
printf("Current gateway:\t%s\n",inet_ntoa(addr));
|
||||
*/
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr,"ERROR: could not retrieve camera IP settings.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
if (PvCameraOpen(Camera.UID, ePvAccessMaster, &(Camera.Handle))==ePvErrSuccess)
|
||||
{
|
||||
tPvUint32 frameWidth, frameHeight;
|
||||
unsigned long maxSize;
|
||||
|
||||
PvAttrUint32Get(Camera.Handle, "Width", &frameWidth);
|
||||
PvAttrUint32Get(Camera.Handle, "Height", &frameHeight);
|
||||
|
||||
// Determine the maximum packet size supported by the system (ethernet adapter)
|
||||
// and then configure the camera to use this value. If the system's NIC only supports
|
||||
// an MTU of 1500 or lower, this will automatically configure an MTU of 1500.
|
||||
// 8228 is the optimal size described by the API in order to enable jumbo frames
|
||||
|
||||
maxSize = 8228;
|
||||
//PvAttrUint32Get(Camera.Handle,"PacketSize",&maxSize);
|
||||
if (PvCaptureAdjustPacketSize(Camera.Handle,maxSize)!=ePvErrSuccess)
|
||||
return false;
|
||||
|
||||
resizeCaptureFrame(frameWidth, frameHeight);
|
||||
|
||||
return startCapture();
|
||||
|
||||
}
|
||||
fprintf(stderr,"Error cannot open camera\n");
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_PvAPI::grabFrame()
|
||||
{
|
||||
//if(Camera.Frame.Status != ePvErrUnplugged && Camera.Frame.Status != ePvErrCancelled)
|
||||
return PvCaptureQueueFrame(Camera.Handle, &(Camera.Frame), NULL) == ePvErrSuccess;
|
||||
}
|
||||
|
||||
|
||||
IplImage* CvCaptureCAM_PvAPI::retrieveFrame(int)
|
||||
{
|
||||
if (PvCaptureWaitForFrameDone(Camera.Handle, &(Camera.Frame), 1000) == ePvErrSuccess)
|
||||
{
|
||||
return frame;
|
||||
}
|
||||
else return NULL;
|
||||
}
|
||||
|
||||
double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
|
||||
{
|
||||
tPvUint32 nTemp;
|
||||
|
||||
switch ( property_id )
|
||||
{
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
PvAttrUint32Get(Camera.Handle, "Width", &nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
PvAttrUint32Get(Camera.Handle, "Height", &nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
PvAttrUint32Get(Camera.Handle,"ExposureValue",&nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_FPS:
|
||||
tPvFloat32 nfTemp;
|
||||
PvAttrFloat32Get(Camera.Handle, "StatFrameRate", &nfTemp);
|
||||
return (double)nfTemp;
|
||||
case CV_CAP_PROP_PVAPI_MULTICASTIP:
|
||||
char mEnable[2];
|
||||
char mIp[11];
|
||||
PvAttrEnumGet(Camera.Handle,"MulticastEnable",mEnable,sizeof(mEnable),NULL);
|
||||
if (strcmp(mEnable, "Off") == 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
long int ip;
|
||||
int a,b,c,d;
|
||||
PvAttrStringGet(Camera.Handle, "MulticastIPAddress",mIp,sizeof(mIp),NULL);
|
||||
sscanf(mIp, "%d.%d.%d.%d", &a, &b, &c, &d); ip = ((a*256 + b)*256 + c)*256 + d;
|
||||
return (double)ip;
|
||||
}
|
||||
case CV_CAP_PROP_GAIN:
|
||||
PvAttrUint32Get(Camera.Handle, "GainValue", &nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
|
||||
char triggerMode[256];
|
||||
PvAttrEnumGet(Camera.Handle, "FrameStartTriggerMode", triggerMode, 256, NULL);
|
||||
if (strcmp(triggerMode, "Freerun")==0)
|
||||
return 0.0;
|
||||
else if (strcmp(triggerMode, "SyncIn1")==0)
|
||||
return 1.0;
|
||||
else if (strcmp(triggerMode, "SyncIn2")==0)
|
||||
return 2.0;
|
||||
else if (strcmp(triggerMode, "FixedRate")==0)
|
||||
return 3.0;
|
||||
else if (strcmp(triggerMode, "Software")==0)
|
||||
return 4.0;
|
||||
else
|
||||
return -1.0;
|
||||
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
|
||||
PvAttrUint32Get(Camera.Handle, "DecimationHorizontal", &nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
|
||||
PvAttrUint32Get(Camera.Handle, "DecimationVertical", &nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_PVAPI_BINNINGX:
|
||||
PvAttrUint32Get(Camera.Handle,"BinningX",&nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_PVAPI_BINNINGY:
|
||||
PvAttrUint32Get(Camera.Handle,"BinningY",&nTemp);
|
||||
return (double)nTemp;
|
||||
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
|
||||
char pixelFormat[256];
|
||||
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
|
||||
if (strcmp(pixelFormat, "Mono8")==0)
|
||||
return 1.0;
|
||||
else if (strcmp(pixelFormat, "Mono16")==0)
|
||||
return 2.0;
|
||||
else if (strcmp(pixelFormat, "Bayer8")==0)
|
||||
return 3.0;
|
||||
else if (strcmp(pixelFormat, "Bayer16")==0)
|
||||
return 4.0;
|
||||
else if (strcmp(pixelFormat, "Rgb24")==0)
|
||||
return 5.0;
|
||||
else if (strcmp(pixelFormat, "Bgr24")==0)
|
||||
return 6.0;
|
||||
else if (strcmp(pixelFormat, "Rgba32")==0)
|
||||
return 7.0;
|
||||
else if (strcmp(pixelFormat, "Bgra32")==0)
|
||||
return 8.0;
|
||||
}
|
||||
return -1.0;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
|
||||
{
|
||||
tPvErr error;
|
||||
|
||||
switch ( property_id )
|
||||
{
|
||||
case CV_CAP_PROP_FRAME_WIDTH:
|
||||
{
|
||||
tPvUint32 currHeight;
|
||||
|
||||
PvAttrUint32Get(Camera.Handle, "Height", &currHeight);
|
||||
|
||||
stopCapture();
|
||||
// Reallocate Frames
|
||||
if (!resizeCaptureFrame(value, currHeight))
|
||||
{
|
||||
startCapture();
|
||||
return false;
|
||||
}
|
||||
|
||||
startCapture();
|
||||
|
||||
break;
|
||||
}
|
||||
case CV_CAP_PROP_FRAME_HEIGHT:
|
||||
{
|
||||
tPvUint32 currWidth;
|
||||
|
||||
PvAttrUint32Get(Camera.Handle, "Width", &currWidth);
|
||||
|
||||
stopCapture();
|
||||
|
||||
// Reallocate Frames
|
||||
if (!resizeCaptureFrame(currWidth, value))
|
||||
{
|
||||
startCapture();
|
||||
return false;
|
||||
}
|
||||
|
||||
startCapture();
|
||||
|
||||
break;
|
||||
}
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
if ((PvAttrUint32Set(Camera.Handle,"ExposureValue",(tPvUint32)value)==ePvErrSuccess))
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_MULTICASTIP:
|
||||
if (value==-1)
|
||||
{
|
||||
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "Off")==ePvErrSuccess))
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::String ip=cv::format("%d.%d.%d.%d", ((unsigned int)value>>24)&255, ((unsigned int)value>>16)&255, ((unsigned int)value>>8)&255, (unsigned int)value&255);
|
||||
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "On")==ePvErrSuccess) &&
|
||||
(PvAttrStringSet(Camera.Handle, "MulticastIPAddress", ip.c_str())==ePvErrSuccess))
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
case CV_CAP_PROP_GAIN:
|
||||
if (PvAttrUint32Set(Camera.Handle,"GainValue",(tPvUint32)value)!=ePvErrSuccess)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
|
||||
if (value==0)
|
||||
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun");
|
||||
else if (value==1)
|
||||
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn1");
|
||||
else if (value==2)
|
||||
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn2");
|
||||
else if (value==3)
|
||||
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "FixedRate");
|
||||
else if (value==4)
|
||||
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Software");
|
||||
else
|
||||
error = ePvErrOutOfRange;
|
||||
if(error==ePvErrSuccess)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
|
||||
if (value >= 1 && value <= 8)
|
||||
error = PvAttrUint32Set(Camera.Handle, "DecimationHorizontal", value);
|
||||
else
|
||||
error = ePvErrOutOfRange;
|
||||
if(error==ePvErrSuccess)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
|
||||
if (value >= 1 && value <= 8)
|
||||
error = PvAttrUint32Set(Camera.Handle, "DecimationVertical", value);
|
||||
else
|
||||
error = ePvErrOutOfRange;
|
||||
if(error==ePvErrSuccess)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_BINNINGX:
|
||||
error = PvAttrUint32Set(Camera.Handle, "BinningX", value);
|
||||
if(error==ePvErrSuccess)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_BINNINGY:
|
||||
error = PvAttrUint32Set(Camera.Handle, "BinningY", value);
|
||||
if(error==ePvErrSuccess)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
|
||||
{
|
||||
cv::String pixelFormat;
|
||||
|
||||
if (value==1)
|
||||
pixelFormat = "Mono8";
|
||||
else if (value==2)
|
||||
pixelFormat = "Mono16";
|
||||
else if (value==3)
|
||||
pixelFormat = "Bayer8";
|
||||
else if (value==4)
|
||||
pixelFormat = "Bayer16";
|
||||
else if (value==5)
|
||||
pixelFormat = "Rgb24";
|
||||
else if (value==6)
|
||||
pixelFormat = "Bgr24";
|
||||
else if (value==7)
|
||||
pixelFormat = "Rgba32";
|
||||
else if (value==8)
|
||||
pixelFormat = "Bgra32";
|
||||
else
|
||||
return false;
|
||||
|
||||
if ((PvAttrEnumSet(Camera.Handle,"PixelFormat", pixelFormat.c_str())==ePvErrSuccess))
|
||||
{
|
||||
tPvUint32 currWidth;
|
||||
tPvUint32 currHeight;
|
||||
|
||||
PvAttrUint32Get(Camera.Handle, "Width", &currWidth);
|
||||
PvAttrUint32Get(Camera.Handle, "Height", &currHeight);
|
||||
|
||||
stopCapture();
|
||||
// Reallocate Frames
|
||||
if (!resizeCaptureFrame(currWidth, currHeight))
|
||||
{
|
||||
startCapture();
|
||||
return false;
|
||||
}
|
||||
|
||||
startCapture();
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void CvCaptureCAM_PvAPI::stopCapture()
|
||||
{
|
||||
PvCommandRun(Camera.Handle, "AcquisitionStop");
|
||||
PvCaptureEnd(Camera.Handle);
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_PvAPI::startCapture()
|
||||
{
|
||||
// Start the camera
|
||||
PvCaptureStart(Camera.Handle);
|
||||
|
||||
// Set the camera to capture continuously
|
||||
if(PvAttrEnumSet(Camera.Handle, "AcquisitionMode", "Continuous")!= ePvErrSuccess)
|
||||
{
|
||||
fprintf(stderr,"Could not set PvAPI Acquisition Mode\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(PvCommandRun(Camera.Handle, "AcquisitionStart")!= ePvErrSuccess)
|
||||
{
|
||||
fprintf(stderr,"Could not start PvAPI acquisition\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if(PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun")!= ePvErrSuccess)
|
||||
{
|
||||
fprintf(stderr,"Error setting PvAPI trigger to \"Freerun\"");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
|
||||
{
|
||||
char pixelFormat[256];
|
||||
tPvUint32 frameSize;
|
||||
tPvUint32 sensorHeight;
|
||||
tPvUint32 sensorWidth;
|
||||
|
||||
if (frame)
|
||||
{
|
||||
cvReleaseImage(&frame);
|
||||
frame = NULL;
|
||||
}
|
||||
|
||||
if (PvAttrUint32Get(Camera.Handle, "SensorWidth", &sensorWidth) != ePvErrSuccess)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (PvAttrUint32Get(Camera.Handle, "SensorHeight", &sensorHeight) != ePvErrSuccess)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cap out of bounds widths to the max supported by the sensor
|
||||
if ((frameWidth < 0) || ((tPvUint32)frameWidth > sensorWidth))
|
||||
{
|
||||
frameWidth = sensorWidth;
|
||||
}
|
||||
|
||||
if ((frameHeight < 0) || ((tPvUint32)frameHeight > sensorHeight))
|
||||
{
|
||||
frameHeight = sensorHeight;
|
||||
}
|
||||
|
||||
|
||||
if (PvAttrUint32Set(Camera.Handle, "Height", frameHeight) != ePvErrSuccess)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (PvAttrUint32Set(Camera.Handle, "Width", frameWidth) != ePvErrSuccess)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
|
||||
PvAttrUint32Get(Camera.Handle, "TotalBytesPerFrame", &frameSize);
|
||||
|
||||
|
||||
if ( (strcmp(pixelFormat, "Mono8")==0) || (strcmp(pixelFormat, "Bayer8")==0) )
|
||||
{
|
||||
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 1);
|
||||
frame->widthStep = (int)frameWidth;
|
||||
Camera.Frame.ImageBufferSize = frameSize;
|
||||
Camera.Frame.ImageBuffer = frame->imageData;
|
||||
}
|
||||
else if ( (strcmp(pixelFormat, "Mono16")==0) || (strcmp(pixelFormat, "Bayer16")==0) )
|
||||
{
|
||||
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_16U, 1);
|
||||
frame->widthStep = (int)frameWidth*2;
|
||||
Camera.Frame.ImageBufferSize = frameSize;
|
||||
Camera.Frame.ImageBuffer = frame->imageData;
|
||||
}
|
||||
else if ( (strcmp(pixelFormat, "Rgb24")==0) || (strcmp(pixelFormat, "Bgr24")==0) )
|
||||
{
|
||||
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 3);
|
||||
frame->widthStep = (int)frameWidth*3;
|
||||
Camera.Frame.ImageBufferSize = frameSize;
|
||||
Camera.Frame.ImageBuffer = frame->imageData;
|
||||
}
|
||||
else if ( (strcmp(pixelFormat, "Rgba32")==0) || (strcmp(pixelFormat, "Bgra32")==0) )
|
||||
{
|
||||
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 4);
|
||||
frame->widthStep = (int)frameWidth*4;
|
||||
Camera.Frame.ImageBufferSize = frameSize;
|
||||
Camera.Frame.ImageBuffer = frame->imageData;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
cv::Ptr<cv::IVideoCapture> cv::create_PvAPI_capture( int index )
|
||||
{
|
||||
CvCaptureCAM_PvAPI* capture = new CvCaptureCAM_PvAPI;
|
||||
|
||||
if ( capture->open( index ))
|
||||
return cv::makePtr<cv::LegacyCapture>(capture);
|
||||
|
||||
delete capture;
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
514
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ueye.cpp
vendored
Normal file
514
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ueye.cpp
vendored
Normal file
@@ -0,0 +1,514 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
/*
|
||||
This file adds support for uEye cameras in OpenCV.
|
||||
|
||||
Cameras can be opened by ID. If 0 is passed as ID the first available camera
|
||||
will be used. For any other number, the camera associated with that ID will be
|
||||
opened (c.f. IDS documentation for is_InitCamera).
|
||||
|
||||
Images are double buffered in a ring buffer of size 2 (called 'image memory
|
||||
sequence' in the uEye SDK c.f. is_AddToSequence). The memory is locked on a
|
||||
'grab' call and copied and unlocked during 'retrieve'. The image queue provided
|
||||
in the uEye SDK is not used since it automatically locks the buffers when a new
|
||||
image arrives, which means the buffer can fill up when frames are retrieved too
|
||||
slowly.
|
||||
*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <ueye.h>
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace
|
||||
{
|
||||
struct image_buffer
|
||||
{
|
||||
char* data;
|
||||
INT id;
|
||||
};
|
||||
}
|
||||
#define ASSERT_UEYE(expr) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_Error_(Error::StsAssert, ("%s %s %d: failed with code %u", #expr, __FILE__, __LINE__, expr_result)); }
|
||||
#define PRINT_ON_UEYE_ERROR( expr ) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << #expr << " " << __FILE__ << " " << __LINE__ << ": failed with code " << expr_result); }
|
||||
|
||||
struct VideoCapture_uEye CV_FINAL: public IVideoCapture
|
||||
{
|
||||
int getCaptureDomain() CV_OVERRIDE
|
||||
{
|
||||
return cv::CAP_UEYE;
|
||||
}
|
||||
|
||||
VideoCapture_uEye(int camera);
|
||||
|
||||
bool isOpened() const CV_OVERRIDE
|
||||
{
|
||||
return 255 != cam_id;
|
||||
}
|
||||
|
||||
~VideoCapture_uEye() CV_OVERRIDE
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
double getProperty(int property_id) const CV_OVERRIDE;
|
||||
bool setProperty(int property_id, double value) CV_OVERRIDE;
|
||||
bool grabFrame() CV_OVERRIDE;
|
||||
bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
|
||||
|
||||
void close();
|
||||
void start_camera();
|
||||
void stop_camera();
|
||||
|
||||
void unlock_image_buffer();
|
||||
|
||||
HIDS cam_id = 255;
|
||||
SENSORINFO sensor_info;
|
||||
double fps;
|
||||
int width;
|
||||
int height;
|
||||
int pitch;
|
||||
std::array<image_buffer, 2> ring_buffer = {{{nullptr, 0}, {nullptr, 0}}};
|
||||
char* locked_image = nullptr;
|
||||
};
|
||||
|
||||
Ptr<IVideoCapture> create_ueye_camera(int camera)
|
||||
{
|
||||
return cv::makePtr<VideoCapture_uEye>(camera);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
std::vector<IMAGE_FORMAT_INFO> get_freerun_formats(HIDS cam_id)
|
||||
{
|
||||
UINT count;
|
||||
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count)));
|
||||
UINT sizeof_list = sizeof(IMAGE_FORMAT_LIST) + (count - 1) * sizeof(IMAGE_FORMAT_INFO);
|
||||
std::unique_ptr<IMAGE_FORMAT_LIST> list(new (std::malloc(sizeof_list)) IMAGE_FORMAT_LIST);
|
||||
|
||||
list->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
|
||||
list->nNumListElements = count;
|
||||
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_LIST, list.get(), sizeof_list));
|
||||
|
||||
// copy to vector and filter out non-live modes
|
||||
std::vector<IMAGE_FORMAT_INFO> formats;
|
||||
formats.reserve(count + 1);
|
||||
std::copy_if(list->FormatInfo, list->FormatInfo+count, std::back_inserter(formats), [](const IMAGE_FORMAT_INFO& format)
|
||||
{
|
||||
return (format.nSupportedCaptureModes & CAPTMODE_FREERUN);
|
||||
});
|
||||
|
||||
return formats;
|
||||
}
|
||||
|
||||
void set_matching_format(HIDS cam_id, const SENSORINFO& sensor_info, int width, int height)
|
||||
{
|
||||
// uEye camera formats sometimes do not include the native resolution (without binning, subsampling or AOI)
|
||||
if(width == int(sensor_info.nMaxWidth) && height == int(sensor_info.nMaxHeight))
|
||||
{
|
||||
ASSERT_UEYE(is_SetBinning(cam_id, IS_BINNING_DISABLE));
|
||||
ASSERT_UEYE(is_SetSubSampling(cam_id, IS_SUBSAMPLING_DISABLE));
|
||||
IS_RECT rectAOI = {0, 0, width, height};
|
||||
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_SET_AOI, &rectAOI, sizeof(rectAOI)));
|
||||
return;
|
||||
}
|
||||
auto formats = get_freerun_formats(cam_id);
|
||||
CV_Assert(formats.size() > 0);
|
||||
auto calc_err = [=](const IMAGE_FORMAT_INFO& format)
|
||||
{
|
||||
return format.nWidth - width + format.nHeight - height + (sensor_info.nMaxWidth - width)/2 - format.nX0 + (sensor_info.nMaxHeight - height)/2 - format.nY0;
|
||||
};
|
||||
|
||||
std::sort(formats.begin(), formats.end(), [=](const IMAGE_FORMAT_INFO& f0, const IMAGE_FORMAT_INFO& f1)
|
||||
{
|
||||
return calc_err(f0) < calc_err(f1);
|
||||
});
|
||||
|
||||
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_SET_FORMAT, &formats.front().nFormatID, sizeof(UINT)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VideoCapture_uEye::VideoCapture_uEye(int camera)
|
||||
{
|
||||
CV_Assert(camera >= 0);
|
||||
CV_Assert(camera < 255); // max camera id is 254
|
||||
cam_id = static_cast<HIDS>(camera);
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): opening...");
|
||||
ASSERT_UEYE(is_InitCamera(&cam_id, nullptr));
|
||||
|
||||
IS_INIT_EVENT init_event = {IS_SET_EVENT_FRAME, FALSE, FALSE};
|
||||
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_INIT, &init_event, sizeof(init_event)));
|
||||
UINT frame_event = IS_SET_EVENT_FRAME;
|
||||
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_ENABLE, &frame_event, sizeof(frame_event)));
|
||||
|
||||
ASSERT_UEYE(is_ResetToDefault(cam_id));
|
||||
|
||||
ASSERT_UEYE(is_SetFrameRate(cam_id, IS_GET_FRAMERATE, &fps));
|
||||
|
||||
start_camera();
|
||||
}
|
||||
|
||||
double VideoCapture_uEye::getProperty(int property_id) const
|
||||
{
|
||||
auto value = 0.;
|
||||
switch (property_id)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
value = width;
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
value = height;
|
||||
break;
|
||||
case CAP_PROP_FPS:
|
||||
value = fps;
|
||||
break;
|
||||
case CAP_PROP_EXPOSURE:
|
||||
ASSERT_UEYE(is_Exposure(cam_id, IS_EXPOSURE_CMD_GET_EXPOSURE, (void*)&value, sizeof(value)));
|
||||
break;
|
||||
case CAP_PROP_GAIN:
|
||||
auto gain = is_SetHWGainFactor(cam_id, IS_GET_MASTER_GAIN_FACTOR, 100);
|
||||
value = static_cast<double>(gain)/100.0;
|
||||
break;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
bool VideoCapture_uEye::setProperty(int property_id, double value)
|
||||
{
|
||||
if(!isOpened())
|
||||
return false;
|
||||
try
|
||||
{
|
||||
bool set_format = false;
|
||||
switch (property_id)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
if(width == value)
|
||||
break;
|
||||
width = static_cast<int>(value);
|
||||
set_format = true;
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
if(height == value)
|
||||
break;
|
||||
height = static_cast<int>(value);
|
||||
set_format = true;
|
||||
break;
|
||||
case CAP_PROP_FPS:
|
||||
if(fps == value)
|
||||
break;
|
||||
ASSERT_UEYE(is_SetFrameRate(cam_id, value, &fps));
|
||||
break;
|
||||
case CAP_PROP_EXPOSURE:
|
||||
ASSERT_UEYE(is_Exposure(cam_id, IS_EXPOSURE_CMD_SET_EXPOSURE, (void*)&value, sizeof(value)));
|
||||
break;
|
||||
case CAP_PROP_GAIN:
|
||||
is_SetHWGainFactor(cam_id, IS_SET_MASTER_GAIN_FACTOR, static_cast<int>(value));
|
||||
break;
|
||||
}
|
||||
if(set_format)
|
||||
{
|
||||
set_matching_format(cam_id, sensor_info, width, height);
|
||||
start_camera();
|
||||
}
|
||||
}
|
||||
catch(const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VideoCapture_uEye::grabFrame()
|
||||
{
|
||||
if (!isOpened())
|
||||
return false;
|
||||
|
||||
try
|
||||
{
|
||||
IS_WAIT_EVENT wait_event{IS_SET_EVENT_FRAME, static_cast<UINT>(3*1000/fps), 0, 0}; // wait for the time it should take to get 3 frames
|
||||
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_WAIT, &wait_event, sizeof(wait_event)));
|
||||
INT current_buffer_id;
|
||||
char* current_buffer;
|
||||
char* last;
|
||||
ASSERT_UEYE(is_GetActSeqBuf(cam_id, ¤t_buffer_id, ¤t_buffer, &last));
|
||||
|
||||
const int lock_tries = 4;
|
||||
std::chrono::milliseconds lock_time_out(static_cast<int>(1000/(fps*4))); // wait for a quarter of a frame if not lockable, should not occur in event mode
|
||||
UINT ret;
|
||||
for(int i = 0; i < lock_tries; i++) // try locking the buffer
|
||||
{
|
||||
ret = is_LockSeqBuf(cam_id, IS_IGNORE_PARAMETER, last);
|
||||
if(IS_SEQ_BUFFER_IS_LOCKED == ret)
|
||||
std::this_thread::sleep_for(lock_time_out);
|
||||
else
|
||||
break;
|
||||
}
|
||||
ASSERT_UEYE(ret);
|
||||
locked_image = last;
|
||||
}
|
||||
catch(const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
|
||||
close();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VideoCapture_uEye::retrieveFrame(int /*outputType*/, OutputArray frame)
|
||||
{
|
||||
if(!locked_image)
|
||||
return false;
|
||||
Mat(height, width, CV_8UC3, locked_image, pitch).copyTo(frame);
|
||||
try
|
||||
{
|
||||
unlock_image_buffer();
|
||||
}
|
||||
catch(const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void VideoCapture_uEye::start_camera()
|
||||
{
|
||||
stop_camera();
|
||||
|
||||
IS_RECT aoi;
|
||||
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_AOI, &aoi, sizeof(aoi)));
|
||||
|
||||
UINT x_is_abs_pos;
|
||||
UINT y_is_abs_pos;
|
||||
|
||||
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_X_ABS, &x_is_abs_pos , sizeof(x_is_abs_pos)));
|
||||
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_Y_ABS, &y_is_abs_pos , sizeof(y_is_abs_pos)));
|
||||
|
||||
ASSERT_UEYE(is_GetSensorInfo(cam_id, &sensor_info));
|
||||
width = x_is_abs_pos? sensor_info.nMaxWidth: aoi.s32Width;
|
||||
height = y_is_abs_pos? sensor_info.nMaxHeight: aoi.s32Height;
|
||||
|
||||
// allocate ring_buffer
|
||||
int bpp = 24;
|
||||
for(auto& image_memory: ring_buffer)
|
||||
{
|
||||
ASSERT_UEYE(is_AllocImageMem(cam_id, width, height, bpp, &image_memory.data, &image_memory.id));
|
||||
ASSERT_UEYE(is_AddToSequence(cam_id, image_memory.data, image_memory.id));
|
||||
}
|
||||
|
||||
// TODO: this could be set according to sensor_info.nColorMode and CAP_PROP_FOURCC
|
||||
ASSERT_UEYE(is_SetColorMode(cam_id, IS_CM_BGR8_PACKED));
|
||||
ASSERT_UEYE(is_GetImageMemPitch (cam_id, &pitch));
|
||||
|
||||
ASSERT_UEYE(is_CaptureVideo(cam_id, IS_DONT_WAIT));
|
||||
}
|
||||
|
||||
void VideoCapture_uEye::stop_camera()
|
||||
{
|
||||
if(is_CaptureVideo(cam_id, IS_GET_LIVE))
|
||||
ASSERT_UEYE(is_StopLiveVideo(cam_id, IS_FORCE_VIDEO_STOP));
|
||||
|
||||
if(locked_image)
|
||||
unlock_image_buffer();
|
||||
ASSERT_UEYE(is_ClearSequence(cam_id));
|
||||
for(auto buffer: ring_buffer)
|
||||
{
|
||||
if(buffer.data)
|
||||
{
|
||||
ASSERT_UEYE(is_FreeImageMem(cam_id, buffer.data, buffer.id));
|
||||
buffer.data = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCapture_uEye::close()
|
||||
{
|
||||
if(!isOpened())
|
||||
return;
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): closing...");
|
||||
// During closing we do not care about correct error handling as much.
|
||||
// Either something has gone wrong already or it has been called from the
|
||||
// destructor. Just make sure that all calls are done.
|
||||
try
|
||||
{
|
||||
stop_camera();
|
||||
}
|
||||
catch(const cv::Exception& e)
|
||||
{
|
||||
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
|
||||
}
|
||||
UINT frame_event = IS_SET_EVENT_FRAME;
|
||||
PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_DISABLE, &frame_event, sizeof(frame_event)));
|
||||
PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_EXIT, &frame_event, sizeof(frame_event)));
|
||||
PRINT_ON_UEYE_ERROR(is_ExitCamera(cam_id));
|
||||
cam_id = 255;
|
||||
}
|
||||
|
||||
void VideoCapture_uEye::unlock_image_buffer()
|
||||
{
|
||||
char* tmp_buffer = nullptr;
|
||||
std::swap(locked_image, tmp_buffer);
|
||||
ASSERT_UEYE(is_UnlockSeqBuf(cam_id, IS_IGNORE_PARAMETER, tmp_buffer));
|
||||
}
|
||||
} // namespace cv
|
||||
|
||||
// plugin glue
|
||||
#if defined(BUILD_PLUGIN)
|
||||
|
||||
#define ABI_VERSION 0
|
||||
#define API_VERSION 0
|
||||
#include "plugin_api.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
#define CV_PLUGIN_NULL_FAIL(ptr) if(!ptr) return CV_ERROR_FAIL;
|
||||
#define CV_PLUGIN_CALL_BEGIN CV_PLUGIN_NULL_FAIL(handle) try {
|
||||
#define CV_PLUGIN_CALL_END } catch (...) { return CV_ERROR_FAIL; }
|
||||
|
||||
CvResult CV_API_CALL cv_capture_open(const char*, int cam_id, CV_OUT CvPluginCapture* handle)
|
||||
{
|
||||
CV_PLUGIN_CALL_BEGIN
|
||||
|
||||
*handle = NULL;
|
||||
std::unique_ptr<VideoCapture_uEye> cap(new VideoCapture_uEye(cam_id));
|
||||
if (cap->isOpened())
|
||||
{
|
||||
*handle = (CvPluginCapture)cap.release();
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
return CV_ERROR_FAIL;
|
||||
|
||||
CV_PLUGIN_CALL_END
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
|
||||
{
|
||||
CV_PLUGIN_NULL_FAIL(handle)
|
||||
|
||||
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
|
||||
delete instance;
|
||||
return CV_ERROR_OK;
|
||||
}
|
||||
|
||||
|
||||
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
|
||||
{
|
||||
CV_PLUGIN_NULL_FAIL(val)
|
||||
CV_PLUGIN_CALL_BEGIN
|
||||
|
||||
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
|
||||
*val = instance->getProperty(prop);
|
||||
return CV_ERROR_OK;
|
||||
|
||||
CV_PLUGIN_CALL_END
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
|
||||
{
|
||||
CV_PLUGIN_CALL_BEGIN
|
||||
|
||||
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
|
||||
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
|
||||
CV_PLUGIN_CALL_END
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
|
||||
{
|
||||
CV_PLUGIN_CALL_BEGIN
|
||||
|
||||
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
|
||||
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
|
||||
|
||||
CV_PLUGIN_CALL_END
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
|
||||
{
|
||||
CV_PLUGIN_CALL_BEGIN
|
||||
|
||||
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
|
||||
Mat img;
|
||||
if (instance->retrieveFrame(stream_idx, img))
|
||||
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
|
||||
return CV_ERROR_FAIL;
|
||||
|
||||
CV_PLUGIN_CALL_END
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_writer_open(const char* /*filename*/, int /*fourcc*/, double /*fps*/, int /*width*/, int /*height*/, int /*isColor*/,
|
||||
CV_OUT CvPluginWriter* /*handle*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_writer_release(CvPluginWriter /*handle*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
CvResult CV_API_CALL cv_writer_write(CvPluginWriter /*handle*/, const unsigned char* /*data*/, int /*step*/, int /*width*/, int /*height*/, int /*cn*/)
|
||||
{
|
||||
return CV_ERROR_FAIL;
|
||||
}
|
||||
|
||||
const OpenCV_VideoIO_Plugin_API_preview plugin_api =
|
||||
{
|
||||
{
|
||||
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
|
||||
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
|
||||
"uEye OpenCV Video I/O plugin"
|
||||
},
|
||||
{
|
||||
/* 1*/CAP_UEYE,
|
||||
/* 2*/cv_capture_open,
|
||||
/* 3*/cv_capture_release,
|
||||
/* 4*/cv_capture_get_prop,
|
||||
/* 5*/cv_capture_set_prop,
|
||||
/* 6*/cv_capture_grab,
|
||||
/* 7*/cv_capture_retrieve,
|
||||
/* 8*/cv_writer_open,
|
||||
/* 9*/cv_writer_release,
|
||||
/* 10*/cv_writer_get_prop,
|
||||
/* 11*/cv_writer_set_prop,
|
||||
/* 12*/cv_writer_write
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
} // namespace cv
|
||||
|
||||
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
|
||||
{
|
||||
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
|
||||
return &cv::plugin_api;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif // BUILD_PLUGIN
|
||||
2265
3rdparty/opencv-4.5.4/modules/videoio/src/cap_v4l.cpp
vendored
Normal file
2265
3rdparty/opencv-4.5.4/modules/videoio/src/cap_v4l.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
173
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/CaptureFrameGrabber.cpp
vendored
Normal file
173
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/CaptureFrameGrabber.cpp
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#include "MediaStreamSink.hpp"
|
||||
#include "MediaSink.hpp"
|
||||
#include "CaptureFrameGrabber.hpp"
|
||||
|
||||
using namespace Media;
|
||||
using namespace Platform;
|
||||
using namespace Windows::Foundation;
|
||||
using namespace Windows::Media;
|
||||
using namespace Windows::Media::Capture;
|
||||
using namespace Windows::Media::MediaProperties;
|
||||
using namespace concurrency;
|
||||
using namespace Microsoft::WRL::Details;
|
||||
using namespace Microsoft::WRL;
|
||||
|
||||
task<Media::CaptureFrameGrabber^> Media::CaptureFrameGrabber::CreateAsync(_In_ MediaCapture^ capture, _In_ VideoEncodingProperties^ props, CaptureStreamType streamType)
|
||||
{
|
||||
auto reader = ref new Media::CaptureFrameGrabber(capture, props, streamType);
|
||||
|
||||
auto profile = ref new MediaEncodingProfile();
|
||||
profile->Video = props;
|
||||
|
||||
task<void> task;
|
||||
if (reader->_streamType == CaptureStreamType::Preview)
|
||||
{
|
||||
task = create_task(capture->StartPreviewToCustomSinkAsync(profile, reader->_mediaExtension));
|
||||
}
|
||||
else
|
||||
{
|
||||
task = create_task(capture->StartRecordToCustomSinkAsync(profile, reader->_mediaExtension));
|
||||
}
|
||||
|
||||
return task.then([reader]()
|
||||
{
|
||||
reader->_state = State::Started;
|
||||
return reader;
|
||||
});
|
||||
}
|
||||
|
||||
Media::CaptureFrameGrabber::CaptureFrameGrabber(_In_ MediaCapture^ capture, _In_ VideoEncodingProperties^ props, CaptureStreamType streamType)
|
||||
: _state(State::Created)
|
||||
, _streamType(streamType)
|
||||
, _capture(capture)
|
||||
{
|
||||
auto videoSampleHandler = ref new MediaSampleHandler(this, &Media::CaptureFrameGrabber::ProcessSample);
|
||||
|
||||
_mediaSink = Make<MediaSink>(nullptr, props, nullptr, videoSampleHandler);
|
||||
_mediaExtension = reinterpret_cast<IMediaExtension^>(static_cast<AWM::IMediaExtension*>(_mediaSink.Get()));
|
||||
}
|
||||
|
||||
Media::CaptureFrameGrabber::~CaptureFrameGrabber()
|
||||
{
|
||||
if (_state == State::Started)
|
||||
{
|
||||
if (_streamType == CaptureStreamType::Preview)
|
||||
{
|
||||
(void)_capture->StopPreviewAsync();
|
||||
}
|
||||
else
|
||||
{
|
||||
(void)_capture->StopRecordAsync();
|
||||
}
|
||||
}
|
||||
|
||||
if (_mediaSink != nullptr)
|
||||
{
|
||||
(void)_mediaSink->Shutdown();
|
||||
_mediaSink = nullptr;
|
||||
}
|
||||
_mediaExtension = nullptr;
|
||||
_capture = nullptr;
|
||||
}
|
||||
|
||||
void Media::CaptureFrameGrabber::ShowCameraSettings()
|
||||
{
|
||||
#if (WINAPI_FAMILY != WINAPI_FAMILY_PHONE_APP) && (WINAPI_FAMILY != WINAPI_FAMILY_PC_APP)
|
||||
if (_state == State::Started)
|
||||
{
|
||||
CameraOptionsUI::Show(_capture.Get());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
task<void> Media::CaptureFrameGrabber::FinishAsync()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
if (_state != State::Started)
|
||||
{
|
||||
throw ref new COMException(E_UNEXPECTED, L"State");
|
||||
}
|
||||
_state = State::Closing;
|
||||
|
||||
if (_mediaSink != nullptr)
|
||||
{
|
||||
(void)_mediaSink->Shutdown();
|
||||
_mediaSink = nullptr;
|
||||
}
|
||||
_mediaExtension = nullptr;
|
||||
|
||||
task<void> task;
|
||||
if (_streamType == CaptureStreamType::Preview)
|
||||
{
|
||||
task = create_task(_capture->StopPreviewAsync());
|
||||
}
|
||||
else
|
||||
{
|
||||
task = create_task(_capture->StopRecordAsync());
|
||||
}
|
||||
|
||||
return task.then([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
_state = State::Closed;
|
||||
_capture = nullptr;
|
||||
});
|
||||
}
|
||||
|
||||
task<ComPtr<IMF2DBuffer2>> Media::CaptureFrameGrabber::GetFrameAsync()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
if (_state != State::Started)
|
||||
{
|
||||
throw ref new COMException(E_UNEXPECTED, L"State");
|
||||
}
|
||||
|
||||
_mediaSink->RequestVideoSample();
|
||||
|
||||
task_completion_event<ComPtr<IMF2DBuffer2>> taskEvent;
|
||||
_videoSampleRequestQueue.push(taskEvent);
|
||||
|
||||
return create_task(taskEvent);
|
||||
}
|
||||
|
||||
void Media::CaptureFrameGrabber::ProcessSample(_In_ MediaSample^ sample)
|
||||
{
|
||||
task_completion_event<ComPtr<IMF2DBuffer2>> t;
|
||||
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
t = _videoSampleRequestQueue.front();
|
||||
_videoSampleRequestQueue.pop();
|
||||
}
|
||||
|
||||
ComPtr<IMFMediaBuffer> buffer;
|
||||
CHK(sample->Sample->ConvertToContiguousBuffer(&buffer));
|
||||
|
||||
// Dispatch without the lock taken to avoid deadlocks
|
||||
t.set(As<IMF2DBuffer2>(buffer));
|
||||
}
|
||||
85
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/CaptureFrameGrabber.hpp
vendored
Normal file
85
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/CaptureFrameGrabber.hpp
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "MFIncludes.hpp"
|
||||
|
||||
|
||||
namespace Media {
|
||||
|
||||
class MediaSink;
|
||||
|
||||
enum class CaptureStreamType
|
||||
{
|
||||
Preview = 0,
|
||||
Record
|
||||
};
|
||||
|
||||
ref class CaptureFrameGrabber sealed
|
||||
{
|
||||
public:
|
||||
|
||||
// IClosable
|
||||
virtual ~CaptureFrameGrabber();
|
||||
|
||||
virtual void ShowCameraSettings();
|
||||
|
||||
internal:
|
||||
|
||||
static concurrency::task<CaptureFrameGrabber^> CreateAsync(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props)
|
||||
{
|
||||
return CreateAsync(capture, props, CaptureStreamType::Preview);
|
||||
}
|
||||
|
||||
static concurrency::task<CaptureFrameGrabber^> CreateAsync(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props, CaptureStreamType streamType);
|
||||
|
||||
concurrency::task<MW::ComPtr<IMF2DBuffer2>> GetFrameAsync();
|
||||
concurrency::task<void> FinishAsync();
|
||||
|
||||
private:
|
||||
|
||||
CaptureFrameGrabber(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props, CaptureStreamType streamType);
|
||||
|
||||
void ProcessSample(_In_ MediaSample^ sample);
|
||||
|
||||
Platform::Agile<WMC::MediaCapture> _capture;
|
||||
::Windows::Media::IMediaExtension^ _mediaExtension;
|
||||
|
||||
MW::ComPtr<MediaSink> _mediaSink;
|
||||
|
||||
CaptureStreamType _streamType;
|
||||
|
||||
enum class State
|
||||
{
|
||||
Created,
|
||||
Started,
|
||||
Closing,
|
||||
Closed
|
||||
} _state;
|
||||
|
||||
std::queue<concurrency::task_completion_event<MW::ComPtr<IMF2DBuffer2>>> _videoSampleRequestQueue;
|
||||
AutoMF _mf;
|
||||
MWW::SRWLock _lock;
|
||||
};
|
||||
|
||||
}
|
||||
172
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MFIncludes.hpp
vendored
Normal file
172
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MFIncludes.hpp
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
// Header for standard system include files.
|
||||
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <collection.h>
|
||||
#include <ppltasks.h>
|
||||
|
||||
#include <wrl\implements.h>
|
||||
#include <wrl\wrappers\corewrappers.h>
|
||||
#include <Roerrorapi.h>
|
||||
|
||||
#include <queue>
|
||||
#include <sstream>
|
||||
|
||||
#include <robuffer.h>
|
||||
|
||||
#include <mfapi.h>
|
||||
#include <mfidl.h>
|
||||
#include <Mferror.h>
|
||||
|
||||
#include <windows.media.h>
|
||||
#include <windows.media.mediaproperties.h>
|
||||
|
||||
namespace AWM = ::ABI::Windows::Media;
|
||||
namespace AWMMp = ::ABI::Windows::Media::MediaProperties;
|
||||
namespace AWFC = ::ABI::Windows::Foundation::Collections;
|
||||
namespace MW = ::Microsoft::WRL;
|
||||
namespace MWD = ::Microsoft::WRL::Details;
|
||||
namespace MWW = ::Microsoft::WRL::Wrappers;
|
||||
namespace WMC = ::Windows::Media::Capture;
|
||||
namespace WF = ::Windows::Foundation;
|
||||
namespace WMMp = ::Windows::Media::MediaProperties;
|
||||
namespace WSS = ::Windows::Storage::Streams;
|
||||
|
||||
// Exception-based error handling
|
||||
#define CHK(statement) {HRESULT _hr = (statement); if (FAILED(_hr)) { throw ref new Platform::COMException(_hr); };}
|
||||
#define CHKNULL(p) {if ((p) == nullptr) { throw ref new Platform::NullReferenceException(L#p); };}
|
||||
|
||||
// Exception-free error handling
|
||||
#define CHK_RETURN(statement) {hr = (statement); if (FAILED(hr)) { return hr; };}
|
||||
|
||||
// Cast a C++/CX msartpointer to an ABI smartpointer
|
||||
template<typename T, typename U>
|
||||
MW::ComPtr<T> As(U^ in)
|
||||
{
|
||||
MW::ComPtr<T> out;
|
||||
CHK(reinterpret_cast<IInspectable*>(in)->QueryInterface(IID_PPV_ARGS(&out)));
|
||||
return out;
|
||||
}
|
||||
|
||||
// Cast an ABI smartpointer
|
||||
template<typename T, typename U>
|
||||
Microsoft::WRL::ComPtr<T> As(const Microsoft::WRL::ComPtr<U>& in)
|
||||
{
|
||||
Microsoft::WRL::ComPtr<T> out;
|
||||
CHK(in.As(&out));
|
||||
return out;
|
||||
}
|
||||
|
||||
// Cast an ABI smartpointer
|
||||
template<typename T, typename U>
|
||||
Microsoft::WRL::ComPtr<T> As(U* in)
|
||||
{
|
||||
Microsoft::WRL::ComPtr<T> out;
|
||||
CHK(in->QueryInterface(IID_PPV_ARGS(&out)));
|
||||
return out;
|
||||
}
|
||||
|
||||
// Get access to bytes in IBuffer
|
||||
inline unsigned char* GetData(_In_ WSS::IBuffer^ buffer)
|
||||
{
|
||||
unsigned char* bytes = nullptr;
|
||||
CHK(As<WSS::IBufferByteAccess>(buffer)->Buffer(&bytes));
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// Class to start and shutdown Media Foundation
|
||||
class AutoMF
|
||||
{
|
||||
public:
|
||||
AutoMF()
|
||||
: _bInitialized(false)
|
||||
{
|
||||
CHK(MFStartup(MF_VERSION));
|
||||
}
|
||||
|
||||
~AutoMF()
|
||||
{
|
||||
if (_bInitialized)
|
||||
{
|
||||
(void)MFShutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
bool _bInitialized;
|
||||
};
|
||||
|
||||
// Class to track error origin
|
||||
template <size_t N>
|
||||
HRESULT OriginateError(__in HRESULT hr, __in wchar_t const (&str)[N])
|
||||
{
|
||||
if (FAILED(hr))
|
||||
{
|
||||
::RoOriginateErrorW(hr, N - 1, str);
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
// Class to track error origin
|
||||
inline HRESULT OriginateError(__in HRESULT hr)
|
||||
{
|
||||
if (FAILED(hr))
|
||||
{
|
||||
::RoOriginateErrorW(hr, 0, nullptr);
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
// Converts exceptions into HRESULTs
|
||||
template <typename Lambda>
|
||||
HRESULT ExceptionBoundary(Lambda&& lambda)
|
||||
{
|
||||
try
|
||||
{
|
||||
lambda();
|
||||
return S_OK;
|
||||
}
|
||||
catch (Platform::Exception^ e)
|
||||
{
|
||||
return e->HResult;
|
||||
}
|
||||
catch (const std::bad_alloc&)
|
||||
{
|
||||
return E_OUTOFMEMORY;
|
||||
}
|
||||
catch (const std::exception&)
|
||||
{
|
||||
return E_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
// Wraps an IMFSample in a C++/CX class to be able to define a callback delegate
|
||||
ref class MediaSample sealed
|
||||
{
|
||||
internal:
|
||||
MW::ComPtr<IMFSample> Sample;
|
||||
};
|
||||
|
||||
delegate void MediaSampleHandler(MediaSample^ sample);
|
||||
396
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaSink.hpp
vendored
Normal file
396
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaSink.hpp
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "MediaStreamSink.hpp"
|
||||
#include "MFIncludes.hpp"
|
||||
|
||||
namespace Media {
|
||||
|
||||
const unsigned int c_audioStreamSinkId = 0;
|
||||
const unsigned int c_videoStreamSinkId = 1;
|
||||
|
||||
class MediaSink WrlSealed
|
||||
: public MW::RuntimeClass<
|
||||
MW::RuntimeClassFlags<
|
||||
MW::RuntimeClassType::WinRtClassicComMix>
|
||||
, AWM::IMediaExtension
|
||||
, IMFMediaSink
|
||||
, IMFClockStateSink
|
||||
, MW::FtmBase
|
||||
>
|
||||
{
|
||||
InspectableClass(L"MediaSink", BaseTrust)
|
||||
|
||||
public:
|
||||
|
||||
MediaSink(
|
||||
_In_opt_ WMMp::AudioEncodingProperties^ audioProps,
|
||||
_In_opt_ WMMp::VideoEncodingProperties^ videoProps,
|
||||
_In_opt_ MediaSampleHandler^ audioSampleHandler,
|
||||
_In_opt_ MediaSampleHandler^ videoSampleHandler
|
||||
)
|
||||
: _shutdown(false)
|
||||
{
|
||||
MW::ComPtr<IMFMediaType> audioMT;
|
||||
if (audioProps != nullptr)
|
||||
{
|
||||
CHK(MFCreateMediaTypeFromProperties(As<IUnknown>(audioProps).Get(), &audioMT));
|
||||
_audioStreamSink = MW::Make<MediaStreamSink>(
|
||||
this,
|
||||
c_audioStreamSinkId,
|
||||
audioMT,
|
||||
audioSampleHandler
|
||||
);
|
||||
}
|
||||
|
||||
MW::ComPtr<IMFMediaType> videoMT;
|
||||
if (videoProps != nullptr)
|
||||
{
|
||||
CHK(MFCreateMediaTypeFromProperties(As<IUnknown>(videoProps).Get(), &videoMT));
|
||||
_videoStreamSink = MW::Make<MediaStreamSink>(
|
||||
this,
|
||||
c_videoStreamSinkId,
|
||||
videoMT,
|
||||
videoSampleHandler
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void RequestAudioSample()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
_audioStreamSink->RequestSample();
|
||||
}
|
||||
|
||||
void RequestVideoSample()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
_videoStreamSink->RequestSample();
|
||||
}
|
||||
|
||||
void SetCurrentAudioMediaType(_In_ IMFMediaType* mt)
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
_audioStreamSink->InternalSetCurrentMediaType(mt);
|
||||
}
|
||||
|
||||
void SetCurrentVideoMediaType(_In_ IMFMediaType* mt)
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
_videoStreamSink->InternalSetCurrentMediaType(mt);
|
||||
}
|
||||
|
||||
//
|
||||
// IMediaExtension
|
||||
//
|
||||
|
||||
IFACEMETHODIMP SetProperties(_In_ AWFC::IPropertySet * /*configuration*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// IMFMediaSink
|
||||
//
|
||||
|
||||
IFACEMETHODIMP GetCharacteristics(_Out_ DWORD *characteristics)
|
||||
{
|
||||
return ExceptionBoundary([this, characteristics]()
|
||||
{
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHKNULL(characteristics);
|
||||
*characteristics = MEDIASINK_RATELESS | MEDIASINK_FIXED_STREAMS;
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP AddStreamSink(
|
||||
DWORD /*streamSinkIdentifier*/,
|
||||
_In_ IMFMediaType * /*mediaType*/,
|
||||
_COM_Outptr_ IMFStreamSink **streamSink
|
||||
)
|
||||
{
|
||||
return ExceptionBoundary([this, streamSink]()
|
||||
{
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHKNULL(streamSink);
|
||||
*streamSink = nullptr;
|
||||
|
||||
CHK(MF_E_STREAMSINKS_FIXED);
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP RemoveStreamSink(DWORD /*streamSinkIdentifier*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(MF_E_STREAMSINKS_FIXED);
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP GetStreamSinkCount(_Out_ DWORD *streamSinkCount)
|
||||
{
|
||||
return ExceptionBoundary([this, streamSinkCount]()
|
||||
{
|
||||
CHKNULL(streamSinkCount);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
*streamSinkCount = (_audioStreamSink != nullptr) + (_videoStreamSink != nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP GetStreamSinkByIndex(DWORD index, _COM_Outptr_ IMFStreamSink **streamSink)
|
||||
{
|
||||
return ExceptionBoundary([this, index, streamSink]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(streamSink);
|
||||
*streamSink = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
switch (index)
|
||||
{
|
||||
case 0:
|
||||
if (_audioStreamSink != nullptr)
|
||||
{
|
||||
CHK(_audioStreamSink.CopyTo(streamSink));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHK(_videoStreamSink.CopyTo(streamSink));
|
||||
}
|
||||
break;
|
||||
|
||||
case 1:
|
||||
if ((_audioStreamSink != nullptr) && (_videoStreamSink != nullptr))
|
||||
{
|
||||
CHK(_videoStreamSink.CopyTo(streamSink));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHK(E_INVALIDARG);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
CHK(E_INVALIDARG);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP GetStreamSinkById(DWORD identifier, _COM_Outptr_ IMFStreamSink **streamSink)
|
||||
{
|
||||
return ExceptionBoundary([this, identifier, streamSink]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(streamSink);
|
||||
*streamSink = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
if ((identifier == 0) && (_audioStreamSink != nullptr))
|
||||
{
|
||||
CHK(_audioStreamSink.CopyTo(streamSink));
|
||||
}
|
||||
else if ((identifier == 1) && (_videoStreamSink != nullptr))
|
||||
{
|
||||
CHK(_videoStreamSink.CopyTo(streamSink));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHK(E_INVALIDARG);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP SetPresentationClock(_In_ IMFPresentationClock *clock)
|
||||
{
|
||||
return ExceptionBoundary([this, clock]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
if (_clock != nullptr)
|
||||
{
|
||||
CHK(_clock->RemoveClockStateSink(this));
|
||||
_clock = nullptr;
|
||||
}
|
||||
|
||||
if (clock != nullptr)
|
||||
{
|
||||
CHK(clock->AddClockStateSink(this));
|
||||
_clock = clock;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP GetPresentationClock(_COM_Outptr_ IMFPresentationClock **clock)
|
||||
{
|
||||
return ExceptionBoundary([this, clock]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(clock);
|
||||
*clock = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
if (_clock != nullptr)
|
||||
{
|
||||
CHK(_clock.CopyTo(clock))
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP Shutdown()
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
if (_shutdown)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_shutdown = true;
|
||||
|
||||
if (_audioStreamSink != nullptr)
|
||||
{
|
||||
_audioStreamSink->Shutdown();
|
||||
_audioStreamSink = nullptr;
|
||||
}
|
||||
|
||||
if (_videoStreamSink != nullptr)
|
||||
{
|
||||
_videoStreamSink->Shutdown();
|
||||
_videoStreamSink = nullptr;
|
||||
}
|
||||
|
||||
if (_clock != nullptr)
|
||||
{
|
||||
(void)_clock->RemoveClockStateSink(this);
|
||||
_clock = nullptr;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// IMFClockStateSink methods
|
||||
//
|
||||
|
||||
IFACEMETHODIMP OnClockStart(MFTIME /*hnsSystemTime*/, LONGLONG /*llClockStartOffset*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP OnClockStop(MFTIME /*hnsSystemTime*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP OnClockPause(MFTIME /*hnsSystemTime*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP OnClockRestart(MFTIME /*hnsSystemTime*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
IFACEMETHODIMP OnClockSetRate(MFTIME /*hnsSystemTime*/, float /*flRate*/)
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
bool _shutdown;
|
||||
|
||||
void _VerifyNotShutdown()
|
||||
{
|
||||
if (_shutdown)
|
||||
{
|
||||
CHK(MF_E_SHUTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
MW::ComPtr<MediaStreamSink> _audioStreamSink;
|
||||
MW::ComPtr<MediaStreamSink> _videoStreamSink;
|
||||
MW::ComPtr<IMFPresentationClock> _clock;
|
||||
|
||||
MWW::SRWLock _lock;
|
||||
};
|
||||
|
||||
}
|
||||
384
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaStreamSink.cpp
vendored
Normal file
384
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaStreamSink.cpp
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#include "MediaStreamSink.hpp"
|
||||
#include "MFIncludes.hpp"
|
||||
|
||||
using namespace Media;
|
||||
using namespace Microsoft::WRL;
|
||||
using namespace Platform;
|
||||
using namespace Windows::Foundation;
|
||||
|
||||
MediaStreamSink::MediaStreamSink(
|
||||
__in const MW::ComPtr<IMFMediaSink>& sink,
|
||||
__in DWORD id,
|
||||
__in const MW::ComPtr<IMFMediaType>& mt,
|
||||
__in MediaSampleHandler^ sampleHandler
|
||||
)
|
||||
: _shutdown(false)
|
||||
, _id(-1)
|
||||
, _width(0)
|
||||
, _height(0)
|
||||
{
|
||||
CHK(MFCreateEventQueue(&_eventQueue));
|
||||
CHK(MFCreateMediaType(&_curMT));
|
||||
|
||||
_UpdateMediaType(mt);
|
||||
|
||||
_sink = sink;
|
||||
_id = id;
|
||||
_sampleHandler = sampleHandler;
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetMediaSink(__deref_out IMFMediaSink **sink)
|
||||
{
|
||||
return ExceptionBoundary([this, sink]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(sink);
|
||||
*sink = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_sink.CopyTo(sink));
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetIdentifier(__out DWORD *identifier)
|
||||
{
|
||||
return ExceptionBoundary([this, identifier]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(identifier);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
*identifier = _id;
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetMediaTypeHandler(__deref_out IMFMediaTypeHandler **handler)
|
||||
{
|
||||
return ExceptionBoundary([this, handler]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(handler);
|
||||
*handler = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
*handler = this;
|
||||
this->AddRef();
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
void MediaStreamSink::RequestSample()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_eventQueue->QueueEventParamVar(MEStreamSinkRequestSample, GUID_NULL, S_OK, nullptr));
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::ProcessSample(__in_opt IMFSample *sample)
|
||||
{
|
||||
return ExceptionBoundary([this, sample]()
|
||||
{
|
||||
MediaSampleHandler^ sampleHandler;
|
||||
auto mediaSample = ref new MediaSample();
|
||||
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
if (sample == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
mediaSample->Sample = sample;
|
||||
sampleHandler = _sampleHandler;
|
||||
}
|
||||
|
||||
// Call back without the lock taken to avoid deadlocks
|
||||
sampleHandler(mediaSample);
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::PlaceMarker(__in MFSTREAMSINK_MARKER_TYPE /*markerType*/, __in const PROPVARIANT * /*markerValue*/, __in const PROPVARIANT * contextValue)
|
||||
{
|
||||
return ExceptionBoundary([this, contextValue]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
CHKNULL(contextValue);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_eventQueue->QueueEventParamVar(MEStreamSinkMarker, GUID_NULL, S_OK, contextValue));
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::Flush()
|
||||
{
|
||||
return ExceptionBoundary([this]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetEvent(__in DWORD flags, __deref_out IMFMediaEvent **event)
|
||||
{
|
||||
return ExceptionBoundary([this, flags, event]()
|
||||
{
|
||||
CHKNULL(event);
|
||||
*event = nullptr;
|
||||
|
||||
ComPtr<IMFMediaEventQueue> eventQueue;
|
||||
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
eventQueue = _eventQueue;
|
||||
}
|
||||
|
||||
// May block for a while
|
||||
CHK(eventQueue->GetEvent(flags, event));
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::BeginGetEvent(__in IMFAsyncCallback *callback, __in_opt IUnknown *state)
|
||||
{
|
||||
return ExceptionBoundary([this, callback, state]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_eventQueue->BeginGetEvent(callback, state));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
HRESULT MediaStreamSink::EndGetEvent(__in IMFAsyncResult *result, __deref_out IMFMediaEvent **event)
|
||||
{
|
||||
return ExceptionBoundary([this, result, event]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(event);
|
||||
*event = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_eventQueue->EndGetEvent(result, event));
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::QueueEvent(
|
||||
__in MediaEventType met,
|
||||
__in REFGUID extendedType,
|
||||
__in HRESULT status,
|
||||
__in_opt const PROPVARIANT *value
|
||||
)
|
||||
{
|
||||
return ExceptionBoundary([this, met, extendedType, status, value]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
CHK(_eventQueue->QueueEventParamVar(met, extendedType, status, value));
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::IsMediaTypeSupported(__in IMFMediaType *mediaType, __deref_out_opt IMFMediaType **closestMediaType)
|
||||
{
|
||||
bool supported = false;
|
||||
|
||||
HRESULT hr = ExceptionBoundary([this, mediaType, closestMediaType, &supported]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
if (closestMediaType != nullptr)
|
||||
{
|
||||
*closestMediaType = nullptr;
|
||||
}
|
||||
|
||||
CHKNULL(mediaType);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
supported = _IsMediaTypeSupported(mediaType);
|
||||
});
|
||||
|
||||
// Avoid throwing an exception to return MF_E_INVALIDMEDIATYPE as this is not a exceptional case
|
||||
return FAILED(hr) ? hr : supported ? S_OK : MF_E_INVALIDMEDIATYPE;
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetMediaTypeCount(__out DWORD *typeCount)
|
||||
{
|
||||
return ExceptionBoundary([this, typeCount]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(typeCount);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
// No media type provided by default (app needs to specify it)
|
||||
*typeCount = 0;
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetMediaTypeByIndex(__in DWORD /*index*/, __deref_out IMFMediaType **mediaType)
|
||||
{
|
||||
HRESULT hr = ExceptionBoundary([this, mediaType]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(mediaType);
|
||||
*mediaType = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
});
|
||||
|
||||
// Avoid throwing an exception to return MF_E_NO_MORE_TYPES as this is not a exceptional case
|
||||
return FAILED(hr) ? hr : MF_E_NO_MORE_TYPES;
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::SetCurrentMediaType(__in IMFMediaType *mediaType)
|
||||
{
|
||||
return ExceptionBoundary([this, mediaType]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(mediaType);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
if (!_IsMediaTypeSupported(mediaType))
|
||||
{
|
||||
CHK(MF_E_INVALIDMEDIATYPE);
|
||||
}
|
||||
|
||||
_UpdateMediaType(mediaType);
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetCurrentMediaType(__deref_out_opt IMFMediaType **mediaType)
|
||||
{
|
||||
return ExceptionBoundary([this, mediaType]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(mediaType);
|
||||
*mediaType = nullptr;
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
ComPtr<IMFMediaType> mt;
|
||||
CHK(MFCreateMediaType(&mt));
|
||||
CHK(_curMT->CopyAllItems(mt.Get()));
|
||||
*mediaType = mt.Detach();
|
||||
});
|
||||
}
|
||||
|
||||
HRESULT MediaStreamSink::GetMajorType(__out GUID *majorType)
|
||||
{
|
||||
return ExceptionBoundary([this, majorType]()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(majorType);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
*majorType = _majorType;
|
||||
});
|
||||
}
|
||||
|
||||
void MediaStreamSink::InternalSetCurrentMediaType(__in const ComPtr<IMFMediaType>& mediaType)
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
CHKNULL(mediaType);
|
||||
|
||||
_VerifyNotShutdown();
|
||||
|
||||
_UpdateMediaType(mediaType);
|
||||
}
|
||||
|
||||
void MediaStreamSink::Shutdown()
|
||||
{
|
||||
auto lock = _lock.LockExclusive();
|
||||
|
||||
if (_shutdown)
|
||||
{
|
||||
return;
|
||||
}
|
||||
_shutdown = true;
|
||||
|
||||
(void)_eventQueue->Shutdown();
|
||||
_eventQueue = nullptr;
|
||||
|
||||
_curMT = nullptr;
|
||||
_sink = nullptr;
|
||||
_sampleHandler = nullptr;
|
||||
}
|
||||
|
||||
bool MediaStreamSink::_IsMediaTypeSupported(__in const ComPtr<IMFMediaType>& mt) const
|
||||
{
|
||||
GUID majorType;
|
||||
GUID subType;
|
||||
if (SUCCEEDED(mt->GetGUID(MF_MT_MAJOR_TYPE, &majorType)) &&
|
||||
SUCCEEDED(mt->GetGUID(MF_MT_SUBTYPE, &subType)) &&
|
||||
(majorType == _majorType) &&
|
||||
(subType == _subType))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void MediaStreamSink::_UpdateMediaType(__in const ComPtr<IMFMediaType>& mt)
|
||||
{
|
||||
CHK(mt->GetGUID(MF_MT_MAJOR_TYPE, &_majorType));
|
||||
CHK(mt->GetGUID(MF_MT_SUBTYPE, &_subType));
|
||||
|
||||
if (_majorType == MFMediaType_Video)
|
||||
{
|
||||
CHK(MFGetAttributeSize(mt.Get(), MF_MT_FRAME_SIZE, &_width, &_height));
|
||||
}
|
||||
|
||||
CHK(mt->CopyAllItems(_curMT.Get()));
|
||||
}
|
||||
114
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaStreamSink.hpp
vendored
Normal file
114
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt/MediaStreamSink.hpp
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (c) Microsoft. All rights reserved.
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files(the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions :
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "MFIncludes.hpp"
|
||||
|
||||
namespace Media {
|
||||
|
||||
class MediaStreamSink WrlSealed :
|
||||
public Microsoft::WRL::RuntimeClass<
|
||||
Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::ClassicCom>,
|
||||
IMFStreamSink,
|
||||
IMFMediaEventGenerator,
|
||||
IMFMediaTypeHandler
|
||||
>
|
||||
{
|
||||
public:
|
||||
|
||||
MediaStreamSink(
|
||||
__in const MW::ComPtr<IMFMediaSink>& sink,
|
||||
__in DWORD id,
|
||||
__in const MW::ComPtr<IMFMediaType>& mt,
|
||||
__in MediaSampleHandler^ sampleHandler
|
||||
);
|
||||
|
||||
//
|
||||
// IMFStreamSink
|
||||
//
|
||||
|
||||
IFACEMETHODIMP GetMediaSink(__deref_out IMFMediaSink **sink);
|
||||
IFACEMETHODIMP GetIdentifier(__out DWORD *identifier);
|
||||
IFACEMETHODIMP GetMediaTypeHandler(__deref_out IMFMediaTypeHandler **handler);
|
||||
IFACEMETHODIMP ProcessSample(__in_opt IMFSample *sample);
|
||||
IFACEMETHODIMP PlaceMarker(__in MFSTREAMSINK_MARKER_TYPE markerType, __in const PROPVARIANT * markerValue, __in const PROPVARIANT * contextValue);
|
||||
IFACEMETHODIMP Flush();
|
||||
|
||||
//
|
||||
// IMFMediaEventGenerator
|
||||
//
|
||||
|
||||
IFACEMETHODIMP GetEvent(__in DWORD flags, __deref_out IMFMediaEvent **event);
|
||||
IFACEMETHODIMP BeginGetEvent(__in IMFAsyncCallback *callback, __in_opt IUnknown *state);
|
||||
IFACEMETHODIMP EndGetEvent(__in IMFAsyncResult *result, __deref_out IMFMediaEvent **event);
|
||||
IFACEMETHODIMP QueueEvent(__in MediaEventType met, __in REFGUID extendedType, __in HRESULT status, __in_opt const PROPVARIANT *value);
|
||||
|
||||
//
|
||||
// IMFMediaTypeHandler
|
||||
//
|
||||
|
||||
IFACEMETHODIMP IsMediaTypeSupported(__in IMFMediaType *mediaType, __deref_out_opt IMFMediaType **closestMediaType);
|
||||
IFACEMETHODIMP GetMediaTypeCount(__out DWORD *typeCount);
|
||||
IFACEMETHODIMP GetMediaTypeByIndex(__in DWORD index, __deref_out IMFMediaType **mediaType);
|
||||
IFACEMETHODIMP SetCurrentMediaType(__in IMFMediaType *mediaType);
|
||||
IFACEMETHODIMP GetCurrentMediaType(__deref_out_opt IMFMediaType **mediaType);
|
||||
IFACEMETHODIMP GetMajorType(__out GUID *majorType);
|
||||
|
||||
//
|
||||
// Misc
|
||||
//
|
||||
|
||||
void InternalSetCurrentMediaType(__in const MW::ComPtr<IMFMediaType>& mediaType);
|
||||
void RequestSample();
|
||||
void Shutdown();
|
||||
|
||||
private:
|
||||
|
||||
bool _IsMediaTypeSupported(__in const MW::ComPtr<IMFMediaType>& mt) const;
|
||||
void _UpdateMediaType(__in const MW::ComPtr<IMFMediaType>& mt);
|
||||
|
||||
void _VerifyNotShutdown()
|
||||
{
|
||||
if (_shutdown)
|
||||
{
|
||||
CHK(MF_E_SHUTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
MW::ComPtr<IMFMediaSink> _sink;
|
||||
MW::ComPtr<IMFMediaEventQueue> _eventQueue;
|
||||
MW::ComPtr<IMFMediaType> _curMT;
|
||||
|
||||
MediaSampleHandler^ _sampleHandler;
|
||||
|
||||
GUID _majorType;
|
||||
GUID _subType;
|
||||
unsigned int _width;
|
||||
unsigned int _height;
|
||||
DWORD _id;
|
||||
bool _shutdown;
|
||||
|
||||
MWW::SRWLock _lock;
|
||||
};
|
||||
|
||||
}
|
||||
158
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_bridge.cpp
vendored
Normal file
158
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_bridge.cpp
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
// videoio to XAML bridge for OpenCV
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "opencv2\videoio\cap_winrt.hpp"
|
||||
#include "cap_winrt_capture.hpp"
|
||||
#include "cap_winrt_bridge.hpp"
|
||||
#include "cap_winrt_video.hpp"
|
||||
|
||||
using namespace Windows::Foundation;
|
||||
using namespace Windows::Media::Capture;
|
||||
using namespace Windows::Media::MediaProperties;
|
||||
using namespace Windows::Devices::Enumeration;
|
||||
|
||||
using namespace Windows::UI::Xaml::Media::Imaging;
|
||||
using namespace Microsoft::WRL;
|
||||
|
||||
using namespace Platform;
|
||||
using namespace ::Concurrency;
|
||||
|
||||
using namespace ::std;
|
||||
|
||||
/***************************** VideoioBridge class ******************************/
|
||||
|
||||
// non-blocking
|
||||
void VideoioBridge::requestForUIthreadAsync(int action)
|
||||
{
|
||||
reporter.report(action);
|
||||
}
|
||||
|
||||
VideoioBridge& VideoioBridge::getInstance()
|
||||
{
|
||||
static VideoioBridge instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void VideoioBridge::swapInputBuffers()
|
||||
{
|
||||
// TODO: already locked, check validity
|
||||
// lock_guard<mutex> lock(inputBufferMutex);
|
||||
swap(backInputPtr, frontInputPtr);
|
||||
//if (currentFrame != frameCounter)
|
||||
//{
|
||||
// currentFrame = frameCounter;
|
||||
// swap(backInputPtr, frontInputPtr);
|
||||
//}
|
||||
}
|
||||
|
||||
void VideoioBridge::swapOutputBuffers()
|
||||
{
|
||||
lock_guard<mutex> lock(outputBufferMutex);
|
||||
swap(frontOutputBuffer, backOutputBuffer);
|
||||
}
|
||||
|
||||
void VideoioBridge::allocateOutputBuffers()
|
||||
{
|
||||
frontOutputBuffer = ref new WriteableBitmap(width, height);
|
||||
backOutputBuffer = ref new WriteableBitmap(width, height);
|
||||
}
|
||||
|
||||
// performed on UI thread
|
||||
void VideoioBridge::allocateBuffers(int width, int height)
|
||||
{
|
||||
// allocate input Mats (bgra8 = CV_8UC4, RGB24 = CV_8UC3)
|
||||
frontInputMat.create(height, width, CV_8UC3);
|
||||
backInputMat.create(height, width, CV_8UC3);
|
||||
|
||||
frontInputPtr = frontInputMat.ptr(0);
|
||||
backInputPtr = backInputMat.ptr(0);
|
||||
|
||||
allocateOutputBuffers();
|
||||
}
|
||||
|
||||
// performed on UI thread
|
||||
bool VideoioBridge::openCamera()
|
||||
{
|
||||
// buffers must alloc'd on UI thread
|
||||
allocateBuffers(width, height);
|
||||
|
||||
// nb. video capture device init must be done on UI thread;
|
||||
if (!Video::getInstance().isStarted())
|
||||
{
|
||||
Video::getInstance().initGrabber(deviceIndex, width, height);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// nb on UI thread
|
||||
void VideoioBridge::updateFrameContainer()
|
||||
{
|
||||
// copy output Mat to WBM
|
||||
Video::getInstance().CopyOutput();
|
||||
|
||||
// set XAML image element with image WBM
|
||||
cvImage->Source = backOutputBuffer;
|
||||
}
|
||||
|
||||
void VideoioBridge::imshow()
|
||||
{
|
||||
swapOutputBuffers();
|
||||
requestForUIthreadAsync(cv::UPDATE_IMAGE_ELEMENT);
|
||||
}
|
||||
|
||||
int VideoioBridge::getDeviceIndex()
|
||||
{
|
||||
return deviceIndex;
|
||||
}
|
||||
|
||||
void VideoioBridge::setDeviceIndex(int index)
|
||||
{
|
||||
deviceIndex = index;
|
||||
}
|
||||
|
||||
int VideoioBridge::getWidth()
|
||||
{
|
||||
return width;
|
||||
}
|
||||
|
||||
int VideoioBridge::getHeight()
|
||||
{
|
||||
return height;
|
||||
}
|
||||
|
||||
void VideoioBridge::setWidth(int _width)
|
||||
{
|
||||
width = _width;
|
||||
}
|
||||
|
||||
void VideoioBridge::setHeight(int _height)
|
||||
{
|
||||
height = _height;
|
||||
}
|
||||
|
||||
// end
|
||||
117
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_bridge.hpp
vendored
Normal file
117
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_bridge.hpp
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
// videoio to XAML bridge for OpenCV
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#pragma once
|
||||
|
||||
// this header is included in the XAML App, so it cannot include any
|
||||
// OpenCV headers, or a static assert will be raised
|
||||
|
||||
#include <ppl.h>
|
||||
#include <ppltasks.h>
|
||||
#include <concrt.h>
|
||||
#include <agile.h>
|
||||
#include <opencv2\core.hpp>
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
|
||||
|
||||
// Class VideoioBridge (singleton) is needed because the interface for
|
||||
// VideoCapture_WinRT in cap_winrt_capture.hpp is fixed by OpenCV.
|
||||
class VideoioBridge
|
||||
{
|
||||
public:
|
||||
|
||||
static VideoioBridge& getInstance();
|
||||
|
||||
// call after initialization
|
||||
void setReporter(Concurrency::progress_reporter<int> pr) { reporter = pr; }
|
||||
|
||||
// to be called from cvMain via cap_winrt on bg thread - non-blocking (async)
|
||||
void requestForUIthreadAsync(int action);
|
||||
|
||||
// TODO: modify in window.cpp: void cv::imshow( const String& winname, InputArray _img )
|
||||
void imshow(/*cv::InputArray matToShow*/); // shows Mat in the cvImage element
|
||||
void swapInputBuffers();
|
||||
void allocateOutputBuffers();
|
||||
void swapOutputBuffers();
|
||||
void updateFrameContainer();
|
||||
bool openCamera();
|
||||
void allocateBuffers(int width, int height);
|
||||
|
||||
int getDeviceIndex();
|
||||
void setDeviceIndex(int index);
|
||||
int getWidth();
|
||||
void setWidth(int width);
|
||||
int getHeight();
|
||||
void setHeight(int height);
|
||||
|
||||
std::atomic<bool> bIsFrameNew;
|
||||
std::mutex inputBufferMutex; // input is double buffered
|
||||
unsigned char * frontInputPtr; // OpenCV reads this
|
||||
unsigned char * backInputPtr; // Video grabber writes this
|
||||
std::atomic<unsigned long> frameCounter;
|
||||
unsigned long currentFrame;
|
||||
|
||||
std::mutex outputBufferMutex; // output is double buffered
|
||||
Windows::UI::Xaml::Media::Imaging::WriteableBitmap^ frontOutputBuffer; // OpenCV write this
|
||||
Windows::UI::Xaml::Media::Imaging::WriteableBitmap^ backOutputBuffer; // XAML reads this
|
||||
Windows::UI::Xaml::Controls::Image ^cvImage;
|
||||
|
||||
private:
|
||||
|
||||
VideoioBridge() {
|
||||
deviceIndex = 0;
|
||||
width = 640;
|
||||
height = 480;
|
||||
deviceReady = false;
|
||||
bIsFrameNew = false;
|
||||
currentFrame = 0;
|
||||
frameCounter = 0;
|
||||
};
|
||||
|
||||
// singleton
|
||||
VideoioBridge(VideoioBridge const &);
|
||||
void operator=(const VideoioBridge &);
|
||||
|
||||
std::atomic<bool> deviceReady;
|
||||
Concurrency::progress_reporter<int> reporter;
|
||||
|
||||
// Mats are wrapped with singleton class, we do not support more than one
|
||||
// capture device simultaneously with the design at this time
|
||||
//
|
||||
// nb. inputBufferMutex was not able to guarantee that OpenCV Mats were
|
||||
// ready to accept data in the UI thread (memory access exceptions were thrown
|
||||
// even though buffer address was good).
|
||||
// Therefore allocation of Mats is also done on the UI thread before the video
|
||||
// device is initialized.
|
||||
cv::Mat frontInputMat;
|
||||
cv::Mat backInputMat;
|
||||
|
||||
int deviceIndex, width, height;
|
||||
};
|
||||
206
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_capture.cpp
vendored
Normal file
206
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_capture.cpp
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
// Capture support for WinRT
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "cap_winrt_capture.hpp"
|
||||
#include "cap_winrt_bridge.hpp"
|
||||
#include "cap_winrt_video.hpp"
|
||||
#include <opencv2\videoio\cap_winrt.hpp>
|
||||
|
||||
using namespace Windows::Foundation;
|
||||
using namespace Windows::Media::Capture;
|
||||
using namespace Windows::Media::MediaProperties;
|
||||
using namespace Windows::Devices::Enumeration;
|
||||
|
||||
using namespace Platform;
|
||||
|
||||
using namespace Windows::UI::Xaml::Media::Imaging;
|
||||
using namespace Microsoft::WRL;
|
||||
|
||||
using namespace ::std;
|
||||
|
||||
namespace cv {
|
||||
|
||||
/******************************* exported API functions **************************************/
|
||||
|
||||
template <typename ...Args>
|
||||
void winrt_startMessageLoop(std::function<void(Args...)>&& callback, Args... args)
|
||||
{
|
||||
auto asyncTask = ::concurrency::create_async([=](::concurrency::progress_reporter<int> reporter)
|
||||
{
|
||||
VideoioBridge::getInstance().setReporter(reporter);
|
||||
|
||||
// frame reading loop
|
||||
callback(args...);
|
||||
});
|
||||
|
||||
asyncTask->Progress = ref new AsyncActionProgressHandler<int>([=](IAsyncActionWithProgress<int>^ act, int progress)
|
||||
{
|
||||
int action = progress;
|
||||
|
||||
// these actions will be processed on the UI thread asynchronously
|
||||
switch (action)
|
||||
{
|
||||
case OPEN_CAMERA:
|
||||
VideoioBridge::getInstance().openCamera();
|
||||
break;
|
||||
case CLOSE_CAMERA:
|
||||
Video::getInstance().closeGrabber();
|
||||
break;
|
||||
case UPDATE_IMAGE_ELEMENT:
|
||||
VideoioBridge::getInstance().updateFrameContainer();
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
template <typename ...Args>
|
||||
void winrt_startMessageLoop(void callback(Args...), Args... args)
|
||||
{
|
||||
winrt_startMessageLoop(std::function<void(Args...)>(callback), args...);
|
||||
}
|
||||
|
||||
void winrt_onVisibilityChanged(bool visible)
|
||||
{
|
||||
if (visible)
|
||||
{
|
||||
VideoioBridge& bridge = VideoioBridge::getInstance();
|
||||
|
||||
// only start the grabber if the camera was opened in OpenCV
|
||||
if (bridge.backInputPtr != nullptr)
|
||||
{
|
||||
if (Video::getInstance().isStarted()) return;
|
||||
|
||||
int device = bridge.getDeviceIndex();
|
||||
int width = bridge.getWidth();
|
||||
int height = bridge.getHeight();
|
||||
|
||||
Video::getInstance().initGrabber(device, width, height);
|
||||
}
|
||||
} else
|
||||
{
|
||||
//grabberStarted = false;
|
||||
Video::getInstance().closeGrabber();
|
||||
}
|
||||
}
|
||||
|
||||
void winrt_imshow()
|
||||
{
|
||||
VideoioBridge::getInstance().imshow();
|
||||
}
|
||||
|
||||
void winrt_setFrameContainer(::Windows::UI::Xaml::Controls::Image^ image)
|
||||
{
|
||||
VideoioBridge::getInstance().cvImage = image;
|
||||
}
|
||||
|
||||
/********************************* VideoCapture_WinRT class ****************************/
|
||||
|
||||
VideoCapture_WinRT::VideoCapture_WinRT(int device) : started(false)
|
||||
{
|
||||
VideoioBridge::getInstance().setDeviceIndex(device);
|
||||
}
|
||||
|
||||
bool VideoCapture_WinRT::isOpened() const
|
||||
{
|
||||
return true; // started;
|
||||
}
|
||||
|
||||
// grab a frame:
|
||||
// this will NOT block per spec
|
||||
// should be called on the image processing thread, not the UI thread
|
||||
bool VideoCapture_WinRT::grabFrame()
|
||||
{
|
||||
// if device is not started we must return true so retrieveFrame() is called to start device
|
||||
// nb. we cannot start the device here because we do not know the size of the input Mat
|
||||
if (!started) return true;
|
||||
|
||||
if (VideoioBridge::getInstance().bIsFrameNew)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// nb. if blocking is to be added:
|
||||
// unique_lock<mutex> lock(VideoioBridge::getInstance().frameReadyMutex);
|
||||
// VideoioBridge::getInstance().frameReadyEvent.wait(lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
// should be called on the image processing thread after grabFrame
|
||||
// see VideoCapture::read
|
||||
bool VideoCapture_WinRT::retrieveFrame(int channel, cv::OutputArray outArray)
|
||||
{
|
||||
if (!started) {
|
||||
|
||||
int width, height;
|
||||
width = outArray.size().width;
|
||||
height = outArray.size().height;
|
||||
if (width == 0) width = 640;
|
||||
if (height == 0) height = 480;
|
||||
|
||||
VideoioBridge::getInstance().setWidth(width);
|
||||
VideoioBridge::getInstance().setHeight(height);
|
||||
|
||||
// nb. Mats will be alloc'd on UI thread
|
||||
|
||||
// request device init on UI thread - this does not block, and is async
|
||||
VideoioBridge::getInstance().requestForUIthreadAsync(OPEN_CAMERA);
|
||||
|
||||
started = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!started) return false;
|
||||
|
||||
return VideoioBridge::getInstance().bIsFrameNew;
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture_WinRT::setProperty(int property_id, double value)
|
||||
{
|
||||
switch (property_id)
|
||||
{
|
||||
case CAP_PROP_FRAME_WIDTH:
|
||||
size.width = (int)value;
|
||||
break;
|
||||
case CAP_PROP_FRAME_HEIGHT:
|
||||
size.height = (int)value;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Ptr<IVideoCapture> create_WRT_capture(int device)
|
||||
{
|
||||
return makePtr<VideoCapture_WinRT>(device);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// end
|
||||
70
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_capture.hpp
vendored
Normal file
70
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_capture.hpp
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Capture support for WinRT
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <condition_variable>
|
||||
#include <atomic>
|
||||
|
||||
#include <agile.h>
|
||||
|
||||
|
||||
// nb. implemented the newer IVideoCapture C++ interface so that we can work
|
||||
// directly with Mat, not the older C cv interface
|
||||
// (which may have added overhead for IPL file conversion)
|
||||
|
||||
namespace cv {
|
||||
|
||||
class VideoCapture_WinRT : public IVideoCapture
|
||||
{
|
||||
public:
|
||||
VideoCapture_WinRT() : started(false) {}
|
||||
VideoCapture_WinRT(int device);
|
||||
virtual ~VideoCapture_WinRT() {}
|
||||
|
||||
// from base class IVideoCapture
|
||||
virtual double getProperty(int) { return 0; }
|
||||
virtual bool setProperty(int, double);
|
||||
virtual bool grabFrame();
|
||||
virtual bool retrieveFrame(int channel, cv::OutputArray outArray);
|
||||
|
||||
virtual int getCaptureDomain() CV_OVERRIDE { return CAP_WINRT; }
|
||||
|
||||
virtual bool isOpened() const;
|
||||
|
||||
protected:
|
||||
|
||||
bool started;
|
||||
CvSize size;
|
||||
int bytesPerPixel;
|
||||
unsigned long frameCurrent;
|
||||
std::atomic<bool> isFrameNew;
|
||||
};
|
||||
}
|
||||
320
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_video.cpp
vendored
Normal file
320
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_video.cpp
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
// Video support with XAML
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "cap_winrt_video.hpp"
|
||||
|
||||
#include <ppl.h>
|
||||
#include <ppltasks.h>
|
||||
#include <concrt.h>
|
||||
#include <agile.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <future>
|
||||
#include <vector>
|
||||
|
||||
|
||||
using namespace ::concurrency;
|
||||
using namespace ::Windows::Foundation;
|
||||
using namespace ::std;
|
||||
|
||||
using namespace Microsoft::WRL;
|
||||
using namespace Windows::Media::Devices;
|
||||
using namespace Windows::Media::MediaProperties;
|
||||
using namespace Windows::Media::Capture;
|
||||
using namespace Windows::UI::Xaml::Media::Imaging;
|
||||
using namespace Windows::Devices::Enumeration;
|
||||
|
||||
#include "cap_winrt/CaptureFrameGrabber.hpp"
|
||||
|
||||
// pull in Media Foundation libs
|
||||
#pragma comment(lib, "mfplat")
|
||||
#pragma comment(lib, "mf")
|
||||
#pragma comment(lib, "mfuuid")
|
||||
|
||||
#if (WINAPI_FAMILY!=WINAPI_FAMILY_PHONE_APP) && !defined(_M_ARM)
|
||||
#pragma comment(lib, "Shlwapi")
|
||||
#endif
|
||||
|
||||
#include "cap_winrt_bridge.hpp"
|
||||
|
||||
Video::Video() {}
|
||||
|
||||
Video &Video::getInstance() {
|
||||
static Video v;
|
||||
return v;
|
||||
}
|
||||
|
||||
bool Video::isStarted() {
|
||||
return bGrabberInited.load();
|
||||
}
|
||||
|
||||
void Video::closeGrabber() {
|
||||
// assigning nullptr causes deref of grabber and thus closes the device
|
||||
m_frameGrabber = nullptr;
|
||||
bGrabberInited = false;
|
||||
bGrabberInitInProgress = false;
|
||||
}
|
||||
|
||||
// non-blocking
|
||||
bool Video::initGrabber(int device, int w, int h) {
|
||||
// already started?
|
||||
if (bGrabberInited || bGrabberInitInProgress) return false;
|
||||
|
||||
width = w;
|
||||
height = h;
|
||||
|
||||
bGrabberInited = false;
|
||||
bGrabberInitInProgress = true;
|
||||
|
||||
m_deviceID = device;
|
||||
|
||||
create_task(DeviceInformation::FindAllAsync(DeviceClass::VideoCapture))
|
||||
.then([this](task<DeviceInformationCollection^> findTask)
|
||||
{
|
||||
m_devices = findTask.get();
|
||||
|
||||
// got selected device?
|
||||
if ((unsigned)m_deviceID >= m_devices.Get()->Size)
|
||||
{
|
||||
OutputDebugStringA("Video::initGrabber - no video device found\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
auto devInfo = m_devices.Get()->GetAt(m_deviceID);
|
||||
|
||||
auto settings = ref new MediaCaptureInitializationSettings();
|
||||
settings->StreamingCaptureMode = StreamingCaptureMode::Video; // Video-only capture
|
||||
settings->VideoDeviceId = devInfo->Id;
|
||||
|
||||
auto location = devInfo->EnclosureLocation;
|
||||
bFlipImageX = true;
|
||||
if (location != nullptr && location->Panel == Windows::Devices::Enumeration::Panel::Back)
|
||||
{
|
||||
bFlipImageX = false;
|
||||
}
|
||||
|
||||
m_capture = ref new MediaCapture();
|
||||
create_task(m_capture->InitializeAsync(settings)).then([this](){
|
||||
|
||||
auto props = safe_cast<VideoEncodingProperties^>(m_capture->VideoDeviceController->GetMediaStreamProperties(MediaStreamType::VideoPreview));
|
||||
|
||||
// for 24 bpp
|
||||
props->Subtype = MediaEncodingSubtypes::Rgb24; bytesPerPixel = 3;
|
||||
|
||||
// XAML & WBM use BGRA8, so it would look like
|
||||
// props->Subtype = MediaEncodingSubtypes::Bgra8; bytesPerPixel = 4;
|
||||
|
||||
props->Width = width;
|
||||
props->Height = height;
|
||||
|
||||
return ::Media::CaptureFrameGrabber::CreateAsync(m_capture.Get(), props);
|
||||
|
||||
}).then([this](::Media::CaptureFrameGrabber^ frameGrabber)
|
||||
{
|
||||
m_frameGrabber = frameGrabber;
|
||||
bGrabberInited = true;
|
||||
bGrabberInitInProgress = false;
|
||||
//ready = true;
|
||||
_GrabFrameAsync(frameGrabber);
|
||||
});
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
// nb. cannot block here - this will lock the UI thread:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Video::_GrabFrameAsync(::Media::CaptureFrameGrabber^ frameGrabber) {
|
||||
// use rgb24 layout
|
||||
create_task(frameGrabber->GetFrameAsync()).then([this, frameGrabber](const ComPtr<IMF2DBuffer2>& buffer)
|
||||
{
|
||||
// do the RGB swizzle while copying the pixels from the IMF2DBuffer2
|
||||
BYTE *pbScanline;
|
||||
LONG plPitch;
|
||||
unsigned int colBytes = width * bytesPerPixel;
|
||||
CHK(buffer->Lock2D(&pbScanline, &plPitch));
|
||||
|
||||
// flip
|
||||
if (bFlipImageX)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(VideoioBridge::getInstance().inputBufferMutex);
|
||||
|
||||
// ptr to input Mat data array
|
||||
auto buf = VideoioBridge::getInstance().backInputPtr;
|
||||
|
||||
for (unsigned int row = 0; row < height; row++)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
unsigned int j = colBytes - 1;
|
||||
|
||||
while (i < colBytes)
|
||||
{
|
||||
// reverse the scan line
|
||||
// as a side effect this also swizzles R and B channels
|
||||
buf[j--] = pbScanline[i++];
|
||||
buf[j--] = pbScanline[i++];
|
||||
buf[j--] = pbScanline[i++];
|
||||
}
|
||||
pbScanline += plPitch;
|
||||
buf += colBytes;
|
||||
}
|
||||
VideoioBridge::getInstance().bIsFrameNew = true;
|
||||
} else
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(VideoioBridge::getInstance().inputBufferMutex);
|
||||
|
||||
// ptr to input Mat data array
|
||||
auto buf = VideoioBridge::getInstance().backInputPtr;
|
||||
|
||||
for (unsigned int row = 0; row < height; row++)
|
||||
{
|
||||
// used for Bgr8:
|
||||
//for (unsigned int i = 0; i < colBytes; i++ )
|
||||
// buf[i] = pbScanline[i];
|
||||
|
||||
// used for RGB24:
|
||||
for (unsigned int i = 0; i < colBytes; i += bytesPerPixel)
|
||||
{
|
||||
// swizzle the R and B values (BGR to RGB)
|
||||
buf[i] = pbScanline[i + 2];
|
||||
buf[i + 1] = pbScanline[i + 1];
|
||||
buf[i + 2] = pbScanline[i];
|
||||
|
||||
// no swizzle
|
||||
//buf[i] = pbScanline[i];
|
||||
//buf[i + 1] = pbScanline[i + 1];
|
||||
//buf[i + 2] = pbScanline[i + 2];
|
||||
}
|
||||
|
||||
pbScanline += plPitch;
|
||||
buf += colBytes;
|
||||
}
|
||||
VideoioBridge::getInstance().bIsFrameNew = true;
|
||||
}
|
||||
CHK(buffer->Unlock2D());
|
||||
|
||||
VideoioBridge::getInstance().frameCounter++;
|
||||
|
||||
if (bGrabberInited)
|
||||
{
|
||||
_GrabFrameAsync(frameGrabber);
|
||||
}
|
||||
}, task_continuation_context::use_current());
|
||||
}
|
||||
|
||||
|
||||
// copy from input Mat to output WBM
|
||||
// must be on UI thread
|
||||
void Video::CopyOutput() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(VideoioBridge::getInstance().outputBufferMutex);
|
||||
|
||||
auto inAr = VideoioBridge::getInstance().frontInputPtr;
|
||||
auto outAr = GetData(VideoioBridge::getInstance().frontOutputBuffer->PixelBuffer);
|
||||
|
||||
const unsigned int bytesPerPixel = 3;
|
||||
auto pbScanline = inAr;
|
||||
auto plPitch = width * bytesPerPixel;
|
||||
|
||||
auto buf = outAr;
|
||||
unsigned int colBytes = width * 4;
|
||||
|
||||
// copy RGB24 to bgra8
|
||||
for (unsigned int row = 0; row < height; row++)
|
||||
{
|
||||
// used for Bgr8:
|
||||
// nb. no alpha
|
||||
// for (unsigned int i = 0; i < colBytes; i++ ) buf[i] = pbScanline[i];
|
||||
|
||||
// used for RGB24:
|
||||
// nb. alpha is set to full opaque
|
||||
for (unsigned int i = 0, j = 0; i < plPitch; i += bytesPerPixel, j += 4)
|
||||
{
|
||||
// swizzle the R and B values (RGB24 to Bgr8)
|
||||
buf[j] = pbScanline[i + 2];
|
||||
buf[j + 1] = pbScanline[i + 1];
|
||||
buf[j + 2] = pbScanline[i];
|
||||
buf[j + 3] = 0xff;
|
||||
|
||||
// if no swizzle is desired:
|
||||
//buf[i] = pbScanline[i];
|
||||
//buf[i + 1] = pbScanline[i + 1];
|
||||
//buf[i + 2] = pbScanline[i + 2];
|
||||
//buf[i + 3] = 0xff;
|
||||
}
|
||||
|
||||
pbScanline += plPitch;
|
||||
buf += colBytes;
|
||||
}
|
||||
VideoioBridge::getInstance().frontOutputBuffer->PixelBuffer->Length = width * height * 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Video::listDevicesTask() {
|
||||
std::atomic<bool> ready(false);
|
||||
|
||||
auto settings = ref new MediaCaptureInitializationSettings();
|
||||
|
||||
create_task(DeviceInformation::FindAllAsync(DeviceClass::VideoCapture))
|
||||
.then([this, &ready](task<DeviceInformationCollection^> findTask)
|
||||
{
|
||||
m_devices = findTask.get();
|
||||
|
||||
// TODO: collect device data
|
||||
// for (size_t i = 0; i < m_devices->Size; i++)
|
||||
// {
|
||||
// .. deviceInfo;
|
||||
// auto d = m_devices->GetAt(i);
|
||||
// deviceInfo.bAvailable = true;
|
||||
// deviceInfo.deviceName = PlatformStringToString(d->Name);
|
||||
// deviceInfo.hardwareName = deviceInfo.deviceName;
|
||||
// }
|
||||
|
||||
ready = true;
|
||||
});
|
||||
|
||||
// wait for async task to complete
|
||||
int count = 0;
|
||||
while (!ready)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool Video::listDevices() {
|
||||
// synchronous version of listing video devices on WinRT
|
||||
std::future<bool> result = std::async(std::launch::async, &Video::listDevicesTask, this);
|
||||
return result.get();
|
||||
}
|
||||
|
||||
// end
|
||||
74
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_video.hpp
vendored
Normal file
74
3rdparty/opencv-4.5.4/modules/videoio/src/cap_winrt_video.hpp
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Video support with XAML
|
||||
|
||||
// Copyright (c) Microsoft Open Technologies, Inc.
|
||||
// All rights reserved.
|
||||
//
|
||||
// (3 - clause BSD License)
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
|
||||
// the following conditions are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
||||
// following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
|
||||
// following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
|
||||
// promote products derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "cap_winrt/CaptureFrameGrabber.hpp"
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
|
||||
class Video {
|
||||
public:
|
||||
|
||||
// non-blocking
|
||||
bool initGrabber(int device, int w, int h);
|
||||
void closeGrabber();
|
||||
bool isStarted();
|
||||
|
||||
// singleton
|
||||
static Video &getInstance();
|
||||
|
||||
void CopyOutput();
|
||||
|
||||
private:
|
||||
// singleton
|
||||
Video();
|
||||
|
||||
void _GrabFrameAsync(::Media::CaptureFrameGrabber^ frameGrabber);
|
||||
|
||||
bool listDevices();
|
||||
|
||||
Platform::Agile<Windows::Media::Capture::MediaCapture> m_capture;
|
||||
Platform::Agile<Windows::Devices::Enumeration::DeviceInformationCollection> m_devices;
|
||||
|
||||
::Media::CaptureFrameGrabber^ m_frameGrabber;
|
||||
|
||||
bool listDevicesTask();
|
||||
|
||||
bool bChooseDevice;
|
||||
bool bVerbose;
|
||||
bool bFlipImageX;
|
||||
//std::atomic<bool> bGrabberInited;
|
||||
int m_deviceID;
|
||||
int attemptFramerate;
|
||||
std::atomic<bool> bIsFrameNew;
|
||||
std::atomic<bool> bGrabberInited;
|
||||
std::atomic<bool> bGrabberInitInProgress;
|
||||
unsigned int width, height;
|
||||
int bytesPerPixel;
|
||||
|
||||
};
|
||||
1794
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ximea.cpp
vendored
Normal file
1794
3rdparty/opencv-4.5.4/modules/videoio/src/cap_ximea.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
355
3rdparty/opencv-4.5.4/modules/videoio/src/cap_xine.cpp
vendored
Normal file
355
3rdparty/opencv-4.5.4/modules/videoio/src/cap_xine.cpp
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
/*M//////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
// Authors: Konstantin Dols <dols@ient.rwth-aachen.de>
|
||||
// Mark Asbach <asbach@ient.rwth-aachen.de>
|
||||
//
|
||||
// Institute of Communications Engineering
|
||||
// RWTH Aachen University
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
// required to enable some functions used here...
|
||||
#define XINE_ENABLE_EXPERIMENTAL_FEATURES
|
||||
#include <xine.h>
|
||||
#include <xine/xineutils.h>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
class XINECapture : public IVideoCapture
|
||||
{
|
||||
// method call table
|
||||
xine_t *xine;
|
||||
xine_stream_t *stream;
|
||||
xine_video_port_t *vo_port;
|
||||
xine_video_frame_t xine_frame;
|
||||
Size size;
|
||||
int frame_number;
|
||||
double frame_rate; // fps
|
||||
double frame_duration; // ms
|
||||
bool seekable;
|
||||
|
||||
public:
|
||||
XINECapture()
|
||||
: xine(0), stream(0), vo_port(0), frame_number(-1), frame_rate(0.), frame_duration(0.),
|
||||
seekable(false)
|
||||
{
|
||||
xine_video_frame_t z = {};
|
||||
xine_frame = z;
|
||||
}
|
||||
|
||||
~XINECapture() { close(); }
|
||||
|
||||
bool isOpened() const CV_OVERRIDE { return xine && stream; }
|
||||
|
||||
int getCaptureDomain() CV_OVERRIDE { return CAP_XINE; }
|
||||
|
||||
void close()
|
||||
{
|
||||
if (vo_port && xine_frame.data)
|
||||
{
|
||||
xine_free_video_frame(vo_port, &xine_frame);
|
||||
}
|
||||
if (stream)
|
||||
{
|
||||
xine_close(stream);
|
||||
stream = 0;
|
||||
}
|
||||
if (vo_port)
|
||||
{
|
||||
xine_close_video_driver(xine, vo_port);
|
||||
vo_port = 0;
|
||||
}
|
||||
if (xine)
|
||||
{
|
||||
xine_exit(xine);
|
||||
xine = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool open(const char *filename)
|
||||
{
|
||||
CV_Assert_N(!xine, !stream, !vo_port);
|
||||
char configfile[2048] = {0};
|
||||
|
||||
xine = xine_new();
|
||||
sprintf(configfile, "%s%s", xine_get_homedir(), "/.xine/config");
|
||||
xine_config_load(xine, configfile);
|
||||
xine_init(xine);
|
||||
xine_engine_set_param(xine, 0, 0);
|
||||
|
||||
vo_port = xine_new_framegrab_video_port(xine);
|
||||
if (!vo_port)
|
||||
return false;
|
||||
|
||||
stream = xine_stream_new(xine, NULL, vo_port);
|
||||
if (!xine_open(stream, filename))
|
||||
return false;
|
||||
|
||||
// reset stream...
|
||||
if (!xine_play(stream, 0, 0))
|
||||
return false;
|
||||
|
||||
// initialize some internals...
|
||||
frame_number = 0;
|
||||
|
||||
|
||||
if ( !xine_get_next_video_frame( vo_port, &xine_frame ) )
|
||||
return false;
|
||||
|
||||
size = Size( xine_frame.width, xine_frame.height );
|
||||
|
||||
xine_free_video_frame( vo_port, &xine_frame );
|
||||
xine_frame.data = 0;
|
||||
|
||||
{
|
||||
xine_video_frame_t tmp;
|
||||
if (!xine_play( stream, 0, 300 )) /* 300msec */
|
||||
return false;
|
||||
if (!xine_get_next_video_frame( vo_port, &tmp ))
|
||||
return false;
|
||||
seekable = ( tmp.frame_number != 0 );
|
||||
xine_free_video_frame( vo_port, &tmp );
|
||||
if (!xine_play( stream, 0, 0 ))
|
||||
return false;
|
||||
}
|
||||
|
||||
frame_duration = xine_get_stream_info( stream, XINE_STREAM_INFO_FRAME_DURATION ) / 90.;
|
||||
frame_rate = frame_duration > 0 ? 1000 / frame_duration : 0.;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool grabFrame() CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(vo_port);
|
||||
bool res = xine_get_next_video_frame(vo_port, &xine_frame);
|
||||
if (res)
|
||||
frame_number++;
|
||||
return res;
|
||||
}
|
||||
|
||||
bool retrieveFrame(int, OutputArray out) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(stream);
|
||||
CV_Assert(vo_port);
|
||||
|
||||
if (xine_frame.data == 0)
|
||||
return false;
|
||||
|
||||
bool res = false;
|
||||
Mat frame_bgr;
|
||||
|
||||
switch (xine_frame.colorspace)
|
||||
{
|
||||
case XINE_IMGFMT_YV12: // actual format seems to be I420 (or IYUV)
|
||||
{
|
||||
Mat frame(Size(xine_frame.width, xine_frame.height * 3 / 2), CV_8UC1, xine_frame.data);
|
||||
cv::cvtColor(frame, out, cv::COLOR_YUV2BGR_I420);
|
||||
res = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case XINE_IMGFMT_YUY2:
|
||||
{
|
||||
Mat frame(Size(xine_frame.width, xine_frame.height), CV_8UC2, xine_frame.data);
|
||||
cv::cvtColor(frame, out, cv::COLOR_YUV2BGR_YUY2);
|
||||
res = true;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// always release last xine_frame, not needed anymore
|
||||
xine_free_video_frame(vo_port, &xine_frame);
|
||||
xine_frame.data = 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
double getProperty(int property_id) const CV_OVERRIDE
|
||||
{
|
||||
CV_Assert_N(xine, vo_port, stream);
|
||||
int pos_t, pos_l, length;
|
||||
bool res = (bool)xine_get_pos_length(stream, &pos_l, &pos_t, &length);
|
||||
|
||||
switch (property_id)
|
||||
{
|
||||
case CV_CAP_PROP_POS_MSEC: return res ? pos_t : 0;
|
||||
case CV_CAP_PROP_POS_FRAMES: return frame_number;
|
||||
case CV_CAP_PROP_POS_AVI_RATIO: return length && res ? pos_l / 65535.0 : 0.0;
|
||||
case CV_CAP_PROP_FRAME_WIDTH: return size.width;
|
||||
case CV_CAP_PROP_FRAME_HEIGHT: return size.height;
|
||||
case CV_CAP_PROP_FPS: return frame_rate;
|
||||
case CV_CAP_PROP_FOURCC: return (double)xine_get_stream_info(stream, XINE_STREAM_INFO_VIDEO_FOURCC);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool setProperty(int property_id, double value) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(stream);
|
||||
CV_Assert(vo_port);
|
||||
switch (property_id)
|
||||
{
|
||||
case CV_CAP_PROP_POS_MSEC: return seekTime((int)value);
|
||||
case CV_CAP_PROP_POS_FRAMES: return seekFrame((int)value);
|
||||
case CV_CAP_PROP_POS_AVI_RATIO: return seekRatio(value);
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
bool oldSeekFrame(int f)
|
||||
{
|
||||
CV_Assert_N(xine, vo_port, stream);
|
||||
// no need to seek if we are already there...
|
||||
if (f == frame_number)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else if (f > frame_number)
|
||||
{
|
||||
// if the requested position is behind out actual position,
|
||||
// we just need to read the remaining amount of frames until we are there.
|
||||
for (; frame_number < f; frame_number++)
|
||||
{
|
||||
// un-increment framenumber grabbing failed
|
||||
if (!xine_get_next_video_frame(vo_port, &xine_frame))
|
||||
{
|
||||
frame_number--;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
xine_free_video_frame(vo_port, &xine_frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
else // f < frame_number
|
||||
{
|
||||
// otherwise we need to reset the stream and
|
||||
// start reading frames from the beginning.
|
||||
// reset stream, should also work with non-seekable input
|
||||
xine_play(stream, 0, 0);
|
||||
// read frames until we are at the requested frame
|
||||
for (frame_number = 0; frame_number < f; frame_number++)
|
||||
{
|
||||
// un-increment last framenumber if grabbing failed
|
||||
if (!xine_get_next_video_frame(vo_port, &xine_frame))
|
||||
{
|
||||
frame_number--;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
xine_free_video_frame(vo_port, &xine_frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
return f == frame_number;
|
||||
}
|
||||
|
||||
bool seekFrame(int f)
|
||||
{
|
||||
CV_Assert_N(xine, vo_port, stream);
|
||||
if (seekable)
|
||||
{
|
||||
int new_time = (int)((f + 1) * (float)frame_duration);
|
||||
if (xine_play(stream, 0, new_time))
|
||||
{
|
||||
frame_number = f;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return oldSeekFrame(f);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool seekTime(int t)
|
||||
{
|
||||
CV_Assert_N(xine, vo_port, stream);
|
||||
if (seekable)
|
||||
{
|
||||
if (xine_play(stream, 0, t))
|
||||
{
|
||||
frame_number = (int)((double)t * frame_rate / 1000);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int new_frame = (int)((double)t * frame_rate / 1000);
|
||||
return oldSeekFrame(new_frame);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool seekRatio(double ratio)
|
||||
{
|
||||
CV_Assert_N(xine, vo_port, stream);
|
||||
if (ratio > 1 || ratio < 0)
|
||||
return false;
|
||||
if (seekable)
|
||||
{
|
||||
// TODO: FIX IT, DOESN'T WORK PROPERLY, YET...!
|
||||
int pos_t, pos_l, length;
|
||||
bool res = (bool)xine_get_pos_length(stream, &pos_l, &pos_t, &length);
|
||||
if (res && xine_play(stream, (int)(ratio * (double)length), 0))
|
||||
{
|
||||
frame_number = (int)(ratio * length / frame_duration);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
Ptr<IVideoCapture> cv::createXINECapture(const std::string &filename)
|
||||
{
|
||||
Ptr<XINECapture> res = makePtr<XINECapture>();
|
||||
if (res && res->open(filename.c_str()))
|
||||
return res;
|
||||
return Ptr<IVideoCapture>();
|
||||
}
|
||||
1024
3rdparty/opencv-4.5.4/modules/videoio/src/container_avi.cpp
vendored
Normal file
1024
3rdparty/opencv-4.5.4/modules/videoio/src/container_avi.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
300
3rdparty/opencv-4.5.4/modules/videoio/src/ffmpeg_codecs.hpp
vendored
Normal file
300
3rdparty/opencv-4.5.4/modules/videoio/src/ffmpeg_codecs.hpp
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) || defined(__MINGW32__)
|
||||
// some versions of FFMPEG assume a C99 compiler, and don't define INT64_C
|
||||
#include <stdint.h>
|
||||
|
||||
// some versions of FFMPEG assume a C99 compiler, and don't define INT64_C
|
||||
#ifndef INT64_C
|
||||
#define INT64_C(c) (c##LL)
|
||||
#endif
|
||||
|
||||
#ifndef UINT64_C
|
||||
#define UINT64_C(c) (c##ULL)
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
#endif
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef MKTAG
|
||||
#define MKTAG(a,b,c,d) (a | (b << 8) | (c << 16) | (d << 24))
|
||||
#endif
|
||||
|
||||
// required to look up the correct codec ID depending on the FOURCC code,
|
||||
// this is just a snipped from the file riff.c from ffmpeg/libavformat
|
||||
typedef struct AVCodecTag {
|
||||
int id;
|
||||
unsigned int tag;
|
||||
} AVCodecTag;
|
||||
|
||||
#if (LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(54, 51, 100))
|
||||
#define AV_CODEC_ID_H264 CODEC_ID_H264
|
||||
#define AV_CODEC_ID_H263 CODEC_ID_H263
|
||||
#define AV_CODEC_ID_H263P CODEC_ID_H263P
|
||||
#define AV_CODEC_ID_H263I CODEC_ID_H263I
|
||||
#define AV_CODEC_ID_H261 CODEC_ID_H261
|
||||
#define AV_CODEC_ID_MPEG4 CODEC_ID_MPEG4
|
||||
#define AV_CODEC_ID_MSMPEG4V3 CODEC_ID_MSMPEG4V3
|
||||
#define AV_CODEC_ID_MSMPEG4V2 CODEC_ID_MSMPEG4V2
|
||||
#define AV_CODEC_ID_MSMPEG4V1 CODEC_ID_MSMPEG4V1
|
||||
#define AV_CODEC_ID_WMV1 CODEC_ID_WMV1
|
||||
#define AV_CODEC_ID_WMV2 CODEC_ID_WMV1
|
||||
#define AV_CODEC_ID_DVVIDEO CODEC_ID_DVVIDEO
|
||||
#define AV_CODEC_ID_MPEG1VIDEO CODEC_ID_MPEG1VIDEO
|
||||
#define AV_CODEC_ID_MPEG2VIDEO CODEC_ID_MPEG2VIDEO
|
||||
#define AV_CODEC_ID_MJPEG CODEC_ID_MJPEG
|
||||
#define AV_CODEC_ID_LJPEG CODEC_ID_LJPEG
|
||||
#define AV_CODEC_ID_HUFFYUV CODEC_ID_HUFFYUV
|
||||
#define AV_CODEC_ID_FFVHUFF CODEC_ID_FFVHUFF
|
||||
#define AV_CODEC_ID_CYUV CODEC_ID_CYUV
|
||||
#define AV_CODEC_ID_RAWVIDEO CODEC_ID_RAWVIDEO
|
||||
#define AV_CODEC_ID_INDEO3 CODEC_ID_INDEO3
|
||||
#define AV_CODEC_ID_VP3 CODEC_ID_VP3
|
||||
#define AV_CODEC_ID_ASV1 CODEC_ID_ASV1
|
||||
#define AV_CODEC_ID_ASV2 CODEC_ID_ASV2
|
||||
#define AV_CODEC_ID_VCR1 CODEC_ID_VCR1
|
||||
#define AV_CODEC_ID_FFV1 CODEC_ID_FFV1
|
||||
#define AV_CODEC_ID_XAN_WC4 CODEC_ID_XAN_WC4
|
||||
#define AV_CODEC_ID_MSRLE CODEC_ID_MSRLE
|
||||
#define AV_CODEC_ID_MSVIDEO1 CODEC_ID_MSVIDEO1
|
||||
#define AV_CODEC_ID_CINEPAK CODEC_ID_CINEPAK
|
||||
#define AV_CODEC_ID_TRUEMOTION1 CODEC_ID_TRUEMOTION1
|
||||
#define AV_CODEC_ID_MSZH CODEC_ID_MSZH
|
||||
#define AV_CODEC_ID_ZLIB CODEC_ID_ZLIB
|
||||
#define AV_CODEC_ID_SNOW CODEC_ID_SNOW
|
||||
#define AV_CODEC_ID_4XM CODEC_ID_4XM
|
||||
#define AV_CODEC_ID_FLV1 CODEC_ID_FLV1
|
||||
#define AV_CODEC_ID_SVQ1 CODEC_ID_SVQ1
|
||||
#define AV_CODEC_ID_TSCC CODEC_ID_TSCC
|
||||
#define AV_CODEC_ID_ULTI CODEC_ID_ULTI
|
||||
#define AV_CODEC_ID_VIXL CODEC_ID_VIXL
|
||||
#define AV_CODEC_ID_QPEG CODEC_ID_QPEG
|
||||
#define AV_CODEC_ID_WMV3 CODEC_ID_WMV3
|
||||
#define AV_CODEC_ID_LOCO CODEC_ID_LOCO
|
||||
#define AV_CODEC_ID_THEORA CODEC_ID_THEORA
|
||||
#define AV_CODEC_ID_WNV1 CODEC_ID_WNV1
|
||||
#define AV_CODEC_ID_AASC CODEC_ID_AASC
|
||||
#define AV_CODEC_ID_INDEO2 CODEC_ID_INDEO2
|
||||
#define AV_CODEC_ID_FRAPS CODEC_ID_FRAPS
|
||||
#define AV_CODEC_ID_TRUEMOTION2 CODEC_ID_TRUEMOTION2
|
||||
#define AV_CODEC_ID_FLASHSV CODEC_ID_FLASHSV
|
||||
#define AV_CODEC_ID_JPEGLS CODEC_ID_JPEGLS
|
||||
#define AV_CODEC_ID_VC1 CODEC_ID_VC1
|
||||
#define AV_CODEC_ID_CSCD CODEC_ID_CSCD
|
||||
#define AV_CODEC_ID_ZMBV CODEC_ID_ZMBV
|
||||
#define AV_CODEC_ID_KMVC CODEC_ID_KMVC
|
||||
#define AV_CODEC_ID_VP5 CODEC_ID_VP5
|
||||
#define AV_CODEC_ID_VP6 CODEC_ID_VP6
|
||||
#define AV_CODEC_ID_VP6F CODEC_ID_VP6F
|
||||
#define AV_CODEC_ID_JPEG2000 CODEC_ID_JPEG2000
|
||||
#define AV_CODEC_ID_VMNC CODEC_ID_VMNC
|
||||
#define AV_CODEC_ID_TARGA CODEC_ID_TARGA
|
||||
#define AV_CODEC_ID_NONE CODEC_ID_NONE
|
||||
#endif
|
||||
|
||||
const AVCodecTag codec_bmp_tags[] = {
|
||||
{ AV_CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
|
||||
{ AV_CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
|
||||
{ AV_CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
|
||||
{ AV_CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
|
||||
{ AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
|
||||
{ AV_CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
|
||||
|
||||
{ AV_CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
|
||||
{ AV_CODEC_ID_H263P, MKTAG('H', '2', '6', '3') },
|
||||
{ AV_CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */
|
||||
{ AV_CODEC_ID_H261, MKTAG('H', '2', '6', '1') },
|
||||
|
||||
/* added based on MPlayer */
|
||||
{ AV_CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
|
||||
{ AV_CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') },
|
||||
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
|
||||
|
||||
/* added based on MPlayer */
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') },
|
||||
|
||||
{ AV_CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
|
||||
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') }, /* default signature when using MSMPEG4 */
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
|
||||
|
||||
/* added based on MPlayer */
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
|
||||
{ AV_CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
|
||||
|
||||
{ AV_CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
|
||||
|
||||
/* added based on MPlayer */
|
||||
{ AV_CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
|
||||
|
||||
{ AV_CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
|
||||
|
||||
{ AV_CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
|
||||
|
||||
/* added based on MPlayer */
|
||||
{ AV_CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
|
||||
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
|
||||
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
|
||||
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
|
||||
{ AV_CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
|
||||
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
|
||||
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
|
||||
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
|
||||
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') },
|
||||
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
|
||||
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
|
||||
{ AV_CODEC_ID_MPEG1VIDEO, 0x10000001 },
|
||||
{ AV_CODEC_ID_MPEG2VIDEO, 0x10000002 },
|
||||
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
|
||||
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('M', 'M', 'E', 'S') },
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
|
||||
{ AV_CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') },
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('I', 'J', 'P', 'G') },
|
||||
{ AV_CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') },
|
||||
{ AV_CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
|
||||
{ AV_CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
|
||||
{ AV_CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, 0 },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') },
|
||||
{ AV_CODEC_ID_RAWVIDEO, MKTAG('H', 'D', 'Y', 'C') },
|
||||
{ AV_CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') },
|
||||
{ AV_CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') },
|
||||
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
|
||||
{ AV_CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
|
||||
{ AV_CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
|
||||
{ AV_CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
|
||||
{ AV_CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') },
|
||||
{ AV_CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') },
|
||||
{ AV_CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') },
|
||||
{ AV_CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') },
|
||||
{ AV_CODEC_ID_MSRLE, MKTAG(0x1, 0x0, 0x0, 0x0) },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') },
|
||||
{ AV_CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') },
|
||||
{ AV_CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') },
|
||||
{ AV_CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') },
|
||||
{ AV_CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') },
|
||||
{ AV_CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') },
|
||||
{ AV_CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') },
|
||||
{ AV_CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') },
|
||||
{ AV_CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') },
|
||||
{ AV_CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') },
|
||||
{ AV_CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') },
|
||||
{ AV_CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') },
|
||||
{ AV_CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') },
|
||||
{ AV_CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
|
||||
{ AV_CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
|
||||
{ AV_CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
|
||||
{ AV_CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
|
||||
{ AV_CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') },
|
||||
#if LIBAVCODEC_VERSION_INT>0x000409
|
||||
{ AV_CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
|
||||
{ AV_CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
|
||||
{ AV_CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') },
|
||||
{ AV_CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') },
|
||||
{ AV_CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') },
|
||||
#endif
|
||||
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
|
||||
{ AV_CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') },
|
||||
{ AV_CODEC_ID_JPEGLS,MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */
|
||||
{ AV_CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
|
||||
{ AV_CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
|
||||
{ AV_CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') },
|
||||
{ AV_CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') },
|
||||
{ AV_CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') },
|
||||
#endif
|
||||
#if LIBAVCODEC_VERSION_INT>((51<<16)+(11<<8)+0)
|
||||
{ AV_CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
|
||||
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
|
||||
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
|
||||
{ AV_CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
|
||||
{ AV_CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') },
|
||||
{ AV_CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
|
||||
{ AV_CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
|
||||
#endif
|
||||
#if LIBAVCODEC_VERSION_INT>=((51<<16)+(49<<8)+0)
|
||||
// this tag seems not to exist in older versions of FFMPEG
|
||||
{ AV_CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') },
|
||||
#endif
|
||||
{ AV_CODEC_ID_NONE, 0 },
|
||||
};
|
||||
228
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_api.hpp
vendored
Normal file
228
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_api.hpp
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
//
|
||||
// DEPRECATED. Do not use in new plugins
|
||||
//
|
||||
|
||||
#ifndef PLUGIN_API_HPP
|
||||
#define PLUGIN_API_HPP
|
||||
|
||||
#include <opencv2/core/cvdef.h>
|
||||
#include <opencv2/core/llapi/llapi.h>
|
||||
|
||||
#if !defined(BUILD_PLUGIN)
|
||||
|
||||
/// increased for backward-compatible changes, e.g. add new function
|
||||
/// Caller API <= Plugin API -> plugin is fully compatible
|
||||
/// Caller API > Plugin API -> plugin is not fully compatible, caller should use extra checks to use plugins with older API
|
||||
#define API_VERSION 1 // preview
|
||||
|
||||
/// increased for incompatible changes, e.g. remove function argument
|
||||
/// Caller ABI == Plugin ABI -> plugin is compatible
|
||||
/// Caller ABI > Plugin ABI -> plugin is not compatible, caller should use shim code to use old ABI plugins (caller may know how lower ABI works, so it is possible)
|
||||
/// Caller ABI < Plugin ABI -> plugin can't be used (plugin should provide interface with lower ABI to handle that)
|
||||
#define ABI_VERSION 0 // preview
|
||||
|
||||
#else // !defined(BUILD_PLUGIN)
|
||||
|
||||
#if !defined(ABI_VERSION) || !defined(API_VERSION)
|
||||
#error "Plugin must define ABI_VERSION and API_VERSION before including plugin_api.hpp"
|
||||
#endif
|
||||
|
||||
#endif // !defined(BUILD_PLUGIN)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef CvResult (CV_API_CALL *cv_videoio_retrieve_cb_t)(int stream_idx, unsigned const char* data, int step, int width, int height, int cn, void* userdata);
|
||||
|
||||
typedef struct CvPluginCapture_t* CvPluginCapture;
|
||||
typedef struct CvPluginWriter_t* CvPluginWriter;
|
||||
|
||||
struct OpenCV_VideoIO_Plugin_API_v0_0_api_entries
|
||||
{
|
||||
/** OpenCV capture ID (VideoCaptureAPIs)
|
||||
@note API-ENTRY 1, API-Version == 0
|
||||
*/
|
||||
int captureAPI;
|
||||
|
||||
/** @brief Open video capture
|
||||
|
||||
@param filename File name or NULL to use camera_index instead
|
||||
@param camera_index Camera index (used if filename == NULL)
|
||||
@param[out] handle pointer on Capture handle
|
||||
|
||||
@note API-CALL 2, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_open)(const char* filename, int camera_index, CV_OUT CvPluginCapture* handle);
|
||||
|
||||
/** @brief Release Capture handle
|
||||
|
||||
@param handle Capture handle
|
||||
|
||||
@note API-CALL 3, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_release)(CvPluginCapture handle);
|
||||
|
||||
/** @brief Get property value
|
||||
|
||||
@param handle Capture handle
|
||||
@param prop Property index
|
||||
@param[out] val property value
|
||||
|
||||
@note API-CALL 4, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_getProperty)(CvPluginCapture handle, int prop, CV_OUT double* val);
|
||||
|
||||
/** @brief Set property value
|
||||
|
||||
@param handle Capture handle
|
||||
@param prop Property index
|
||||
@param val property value
|
||||
|
||||
@note API-CALL 5, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_setProperty)(CvPluginCapture handle, int prop, double val);
|
||||
|
||||
/** @brief Grab frame
|
||||
|
||||
@param handle Capture handle
|
||||
|
||||
@note API-CALL 6, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_grab)(CvPluginCapture handle);
|
||||
|
||||
/** @brief Retrieve frame
|
||||
|
||||
@param handle Capture handle
|
||||
@param stream_idx stream index to retrieve (BGR/IR/depth data)
|
||||
@param callback retrieve callback (synchronous)
|
||||
@param userdata callback context data
|
||||
|
||||
@note API-CALL 7, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_retreive)(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata);
|
||||
|
||||
|
||||
/** @brief Try to open video writer
|
||||
|
||||
@param filename Destination location
|
||||
@param fourcc FOURCC code
|
||||
@param fps FPS
|
||||
@param width frame width
|
||||
@param height frame height
|
||||
@param isColor true if video stream should save color frames
|
||||
@param[out] handle pointer on Writer handle
|
||||
|
||||
@note API-CALL 8, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_open)(const char* filename, int fourcc, double fps, int width, int height, int isColor,
|
||||
CV_OUT CvPluginWriter* handle);
|
||||
|
||||
/** @brief Release Writer handle
|
||||
|
||||
@param handle Writer handle
|
||||
|
||||
@note API-CALL 9, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_release)(CvPluginWriter handle);
|
||||
|
||||
/** @brief Get property value
|
||||
|
||||
@param handle Writer handle
|
||||
@param prop Property index
|
||||
@param[out] val property value
|
||||
|
||||
@note API-CALL 10, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_getProperty)(CvPluginWriter handle, int prop, CV_OUT double* val);
|
||||
|
||||
/** @brief Set property value
|
||||
|
||||
@param handle Writer handle
|
||||
@param prop Property index
|
||||
@param val property value
|
||||
|
||||
@note API-CALL 11, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_setProperty)(CvPluginWriter handle, int prop, double val);
|
||||
|
||||
/** @brief Write frame
|
||||
|
||||
@param handle Writer handle
|
||||
@param data frame data
|
||||
@param step step in bytes
|
||||
@param width frame width in pixels
|
||||
@param height frame height
|
||||
@param cn number of channels per pixel
|
||||
|
||||
@note API-CALL 12, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_write)(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn);
|
||||
}; // OpenCV_VideoIO_Plugin_API_v0_0_api_entries
|
||||
|
||||
struct OpenCV_VideoIO_Plugin_API_v0_1_api_entries
|
||||
{
|
||||
/** @brief Try to open video writer
|
||||
|
||||
@param filename Destination location
|
||||
@param fourcc FOURCC code
|
||||
@param fps FPS
|
||||
@param width frame width
|
||||
@param height frame height
|
||||
@param params pointer on 2*n_params array of 'key,value' pairs
|
||||
@param n_params number of passed parameters
|
||||
@param[out] handle pointer on Writer handle
|
||||
|
||||
@note API-CALL 13, API-Version == 1
|
||||
*/
|
||||
CvResult (CV_API_CALL* Writer_open_with_params)(
|
||||
const char* filename, int fourcc, double fps, int width, int height,
|
||||
int* params, unsigned n_params,
|
||||
CV_OUT CvPluginWriter* handle
|
||||
);
|
||||
}; // OpenCV_VideoIO_Plugin_API_v0_1_api_entries
|
||||
|
||||
typedef struct OpenCV_VideoIO_Plugin_API_preview_v0
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Plugin_API_v0_0_api_entries v0;
|
||||
} OpenCV_VideoIO_Plugin_API_preview_v0;
|
||||
|
||||
typedef struct OpenCV_VideoIO_Plugin_API_preview_v1
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Plugin_API_v0_0_api_entries v0;
|
||||
struct OpenCV_VideoIO_Plugin_API_v0_1_api_entries v1;
|
||||
} OpenCV_VideoIO_Plugin_API_preview_v1;
|
||||
|
||||
|
||||
#if ABI_VERSION == 0 && API_VERSION == 1
|
||||
typedef struct OpenCV_VideoIO_Plugin_API_preview_v1 OpenCV_VideoIO_Plugin_API_preview;
|
||||
#elif ABI_VERSION == 0 && API_VERSION == 0
|
||||
typedef struct OpenCV_VideoIO_Plugin_API_preview_v0 OpenCV_VideoIO_Plugin_API_preview;
|
||||
#else
|
||||
#error "Not supported configuration: check ABI_VERSION/API_VERSION"
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_PLUGIN
|
||||
|
||||
CV_PLUGIN_EXPORTS
|
||||
const OpenCV_VideoIO_Plugin_API_preview* CV_API_CALL opencv_videoio_plugin_init_v0
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/) CV_NOEXCEPT;
|
||||
|
||||
#else // BUILD_PLUGIN
|
||||
typedef const OpenCV_VideoIO_Plugin_API_preview* (CV_API_CALL *FN_opencv_videoio_plugin_init_t)
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/);
|
||||
#endif // BUILD_PLUGIN
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // PLUGIN_API_HPP
|
||||
161
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_capture_api.hpp
vendored
Normal file
161
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_capture_api.hpp
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef PLUGIN_CAPTURE_API_HPP
|
||||
#define PLUGIN_CAPTURE_API_HPP
|
||||
|
||||
#include <opencv2/core/cvdef.h>
|
||||
#include <opencv2/core/llapi/llapi.h>
|
||||
|
||||
#if !defined(BUILD_PLUGIN)
|
||||
|
||||
/// increased for backward-compatible changes, e.g. add new function
|
||||
/// Caller API <= Plugin API -> plugin is fully compatible
|
||||
/// Caller API > Plugin API -> plugin is not fully compatible, caller should use extra checks to use plugins with older API
|
||||
#define CAPTURE_API_VERSION 1
|
||||
|
||||
/// increased for incompatible changes, e.g. remove function argument
|
||||
/// Caller ABI == Plugin ABI -> plugin is compatible
|
||||
/// Caller ABI > Plugin ABI -> plugin is not compatible, caller should use shim code to use old ABI plugins (caller may know how lower ABI works, so it is possible)
|
||||
/// Caller ABI < Plugin ABI -> plugin can't be used (plugin should provide interface with lower ABI to handle that)
|
||||
#define CAPTURE_ABI_VERSION 1
|
||||
|
||||
#else // !defined(BUILD_PLUGIN)
|
||||
|
||||
#if !defined(CAPTURE_ABI_VERSION) || !defined(CAPTURE_API_VERSION)
|
||||
#error "Plugin must define CAPTURE_ABI_VERSION and CAPTURE_API_VERSION before including plugin_capture_api.hpp"
|
||||
#endif
|
||||
|
||||
#endif // !defined(BUILD_PLUGIN)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef CvResult (CV_API_CALL *cv_videoio_capture_retrieve_cb_t)(int stream_idx, unsigned const char* data, int step, int width, int height, int type, void* userdata);
|
||||
|
||||
typedef struct CvPluginCapture_t* CvPluginCapture;
|
||||
|
||||
struct OpenCV_VideoIO_Capture_Plugin_API_v1_0_api_entries
|
||||
{
|
||||
/** OpenCV capture ID (VideoCaptureAPIs)
|
||||
@note API-ENTRY 1, API-Version == 0
|
||||
*/
|
||||
int id;
|
||||
|
||||
/** @brief Open video capture
|
||||
|
||||
@param filename File name or NULL to use camera_index instead
|
||||
@param camera_index Camera index (used if filename == NULL)
|
||||
@param[out] handle pointer on Capture handle
|
||||
|
||||
@note API-CALL 2, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_open)(const char* filename, int camera_index, CV_OUT CvPluginCapture* handle);
|
||||
|
||||
/** @brief Release Capture handle
|
||||
|
||||
@param handle Capture handle
|
||||
|
||||
@note API-CALL 3, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_release)(CvPluginCapture handle);
|
||||
|
||||
/** @brief Get property value
|
||||
|
||||
@param handle Capture handle
|
||||
@param prop Property index
|
||||
@param[out] val property value
|
||||
|
||||
@note API-CALL 4, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_getProperty)(CvPluginCapture handle, int prop, CV_OUT double* val);
|
||||
|
||||
/** @brief Set property value
|
||||
|
||||
@param handle Capture handle
|
||||
@param prop Property index
|
||||
@param val property value
|
||||
|
||||
@note API-CALL 5, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_setProperty)(CvPluginCapture handle, int prop, double val);
|
||||
|
||||
/** @brief Grab frame
|
||||
|
||||
@param handle Capture handle
|
||||
|
||||
@note API-CALL 6, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_grab)(CvPluginCapture handle);
|
||||
|
||||
/** @brief Retrieve frame
|
||||
|
||||
@param handle Capture handle
|
||||
@param stream_idx stream index to retrieve (BGR/IR/depth data)
|
||||
@param callback retrieve callback (synchronous)
|
||||
@param userdata callback context data
|
||||
|
||||
@note API-CALL 7, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_retreive)(CvPluginCapture handle, int stream_idx, cv_videoio_capture_retrieve_cb_t callback, void* userdata);
|
||||
}; // OpenCV_VideoIO_Capture_Plugin_API_v1_0_api_entries
|
||||
|
||||
struct OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries
|
||||
{
|
||||
/** @brief Open video capture with parameters
|
||||
|
||||
@param filename File name or NULL to use camera_index instead
|
||||
@param camera_index Camera index (used if filename == NULL)
|
||||
@param params pointer on 2*n_params array of 'key,value' pairs
|
||||
@param n_params number of passed parameters
|
||||
@param[out] handle pointer on Capture handle
|
||||
|
||||
@note API-CALL 8, API-Version == 1
|
||||
*/
|
||||
CvResult (CV_API_CALL *Capture_open_with_params)(
|
||||
const char* filename, int camera_index,
|
||||
int* params, unsigned n_params,
|
||||
CV_OUT CvPluginCapture* handle);
|
||||
}; // OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries
|
||||
|
||||
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_0
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Capture_Plugin_API_v1_0_api_entries v0;
|
||||
} OpenCV_VideoIO_Capture_Plugin_API_v1_0;
|
||||
|
||||
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_1
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Capture_Plugin_API_v1_0_api_entries v0;
|
||||
struct OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries v1;
|
||||
} OpenCV_VideoIO_Capture_Plugin_API_v1_1;
|
||||
|
||||
#if CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 1
|
||||
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_1 OpenCV_VideoIO_Capture_Plugin_API;
|
||||
#elif CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 0
|
||||
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_0 OpenCV_VideoIO_Capture_Plugin_API;
|
||||
#else
|
||||
#error "Not supported configuration: check CAPTURE_ABI_VERSION/CAPTURE_API_VERSION"
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_PLUGIN
|
||||
|
||||
CV_PLUGIN_EXPORTS
|
||||
const OpenCV_VideoIO_Capture_Plugin_API* CV_API_CALL opencv_videoio_capture_plugin_init_v1
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/) CV_NOEXCEPT;
|
||||
|
||||
#else // BUILD_PLUGIN
|
||||
typedef const OpenCV_VideoIO_Capture_Plugin_API* (CV_API_CALL *FN_opencv_videoio_capture_plugin_init_t)
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/);
|
||||
#endif // BUILD_PLUGIN
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // PLUGIN_CAPTURE_API_HPP
|
||||
163
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_writer_api.hpp
vendored
Normal file
163
3rdparty/opencv-4.5.4/modules/videoio/src/plugin_writer_api.hpp
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef PLUGIN_WRITER_API_HPP
|
||||
#define PLUGIN_WRITER_API_HPP
|
||||
|
||||
#include <opencv2/core/cvdef.h>
|
||||
#include <opencv2/core/llapi/llapi.h>
|
||||
|
||||
#if !defined(BUILD_PLUGIN)
|
||||
|
||||
/// increased for backward-compatible changes, e.g. add new function
|
||||
/// Caller API <= Plugin API -> plugin is fully compatible
|
||||
/// Caller API > Plugin API -> plugin is not fully compatible, caller should use extra checks to use plugins with older API
|
||||
#define WRITER_API_VERSION 1
|
||||
|
||||
/// increased for incompatible changes, e.g. remove function argument
|
||||
/// Caller ABI == Plugin ABI -> plugin is compatible
|
||||
/// Caller ABI > Plugin ABI -> plugin is not compatible, caller should use shim code to use old ABI plugins (caller may know how lower ABI works, so it is possible)
|
||||
/// Caller ABI < Plugin ABI -> plugin can't be used (plugin should provide interface with lower ABI to handle that)
|
||||
#define WRITER_ABI_VERSION 1
|
||||
|
||||
#else // !defined(BUILD_PLUGIN)
|
||||
|
||||
#if !defined(WRITER_ABI_VERSION) || !defined(WRITER_API_VERSION)
|
||||
#error "Plugin must define WRITER_ABI_VERSION and WRITER_API_VERSION before including plugin_writer_api.hpp"
|
||||
#endif
|
||||
|
||||
#endif // !defined(BUILD_PLUGIN)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct CvPluginWriter_t* CvPluginWriter;
|
||||
|
||||
struct OpenCV_VideoIO_Writer_Plugin_API_v1_0_api_entries
|
||||
{
|
||||
/** OpenCV capture ID (VideoCaptureAPIs)
|
||||
@note API-ENTRY 1, API-Version == 0
|
||||
*/
|
||||
int id;
|
||||
|
||||
/** @brief Try to open video writer
|
||||
|
||||
@param filename Destination location
|
||||
@param fourcc FOURCC code
|
||||
@param fps FPS
|
||||
@param width frame width
|
||||
@param height frame height
|
||||
@param isColor true if video stream should save color frames
|
||||
@param[out] handle pointer on Writer handle
|
||||
|
||||
@note API-CALL 2, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_open)(const char* filename, int fourcc, double fps, int width, int height, int isColor,
|
||||
CV_OUT CvPluginWriter* handle);
|
||||
|
||||
/** @brief Release Writer handle
|
||||
|
||||
@param handle Writer handle
|
||||
|
||||
@note API-CALL 3, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_release)(CvPluginWriter handle);
|
||||
|
||||
/** @brief Get property value
|
||||
|
||||
@param handle Writer handle
|
||||
@param prop Property index
|
||||
@param[out] val property value
|
||||
|
||||
@note API-CALL 4, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_getProperty)(CvPluginWriter handle, int prop, CV_OUT double* val);
|
||||
|
||||
/** @brief Set property value
|
||||
|
||||
@param handle Writer handle
|
||||
@param prop Property index
|
||||
@param val property value
|
||||
|
||||
@note API-CALL 5, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_setProperty)(CvPluginWriter handle, int prop, double val);
|
||||
|
||||
/** @brief Write frame
|
||||
|
||||
@param handle Writer handle
|
||||
@param data frame data
|
||||
@param step step in bytes
|
||||
@param width frame width in pixels
|
||||
@param height frame height
|
||||
@param cn number of channels per pixel
|
||||
|
||||
@note API-CALL 6, API-Version == 0
|
||||
*/
|
||||
CvResult (CV_API_CALL *Writer_write)(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn);
|
||||
}; // OpenCV_VideoIO_Writer_Plugin_API_v1_0_api_entries
|
||||
|
||||
struct OpenCV_VideoIO_Writer_Plugin_API_v1_1_api_entries
|
||||
{
|
||||
/** @brief Try to open video writer
|
||||
|
||||
@param filename Destination location
|
||||
@param fourcc FOURCC code
|
||||
@param fps FPS
|
||||
@param width frame width
|
||||
@param height frame height
|
||||
@param params pointer on 2*n_params array of 'key,value' pairs
|
||||
@param n_params number of passed parameters
|
||||
@param[out] handle pointer on Writer handle
|
||||
|
||||
@note API-CALL 7, API-Version == 1
|
||||
*/
|
||||
CvResult (CV_API_CALL* Writer_open_with_params)(
|
||||
const char* filename, int fourcc, double fps, int width, int height,
|
||||
int* params, unsigned n_params,
|
||||
CV_OUT CvPluginWriter* handle
|
||||
);
|
||||
}; // OpenCV_VideoIO_Writer_Plugin_API_v1_1_api_entries
|
||||
|
||||
typedef struct OpenCV_VideoIO_Writer_Plugin_API_v1_0
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Writer_Plugin_API_v1_0_api_entries v0;
|
||||
} OpenCV_VideoIO_Writer_Plugin_API_v1_0;
|
||||
|
||||
typedef struct OpenCV_VideoIO_Writer_Plugin_API_v1_1
|
||||
{
|
||||
OpenCV_API_Header api_header;
|
||||
struct OpenCV_VideoIO_Writer_Plugin_API_v1_0_api_entries v0;
|
||||
struct OpenCV_VideoIO_Writer_Plugin_API_v1_1_api_entries v1;
|
||||
} OpenCV_VideoIO_Writer_Plugin_API_v1_1;
|
||||
|
||||
|
||||
#if WRITER_ABI_VERSION == 1 && WRITER_API_VERSION == 1
|
||||
typedef struct OpenCV_VideoIO_Writer_Plugin_API_v1_1 OpenCV_VideoIO_Writer_Plugin_API;
|
||||
#elif WRITER_ABI_VERSION == 1 && WRITER_API_VERSION == 0
|
||||
typedef struct OpenCV_VideoIO_Writer_Plugin_API_v1_0 OpenCV_VideoIO_Writer_Plugin_API;
|
||||
#else
|
||||
#error "Not supported configuration: check WRITER_ABI_VERSION/WRITER_API_VERSION"
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_PLUGIN
|
||||
|
||||
CV_PLUGIN_EXPORTS
|
||||
const OpenCV_VideoIO_Writer_Plugin_API* CV_API_CALL opencv_videoio_writer_plugin_init_v1
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/) CV_NOEXCEPT;
|
||||
|
||||
#else // BUILD_PLUGIN
|
||||
typedef const OpenCV_VideoIO_Writer_Plugin_API* (CV_API_CALL *FN_opencv_videoio_writer_plugin_init_t)
|
||||
(int requested_abi_version, int requested_api_version, void* reserved /*NULL*/);
|
||||
#endif // BUILD_PLUGIN
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // PLUGIN_WRITER_API_HPP
|
||||
105
3rdparty/opencv-4.5.4/modules/videoio/src/precomp.hpp
vendored
Normal file
105
3rdparty/opencv-4.5.4/modules/videoio/src/precomp.hpp
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __VIDEOIO_H_
|
||||
#define __VIDEOIO_H_
|
||||
|
||||
#if defined(__OPENCV_BUILD) && defined(BUILD_PLUGIN)
|
||||
#undef __OPENCV_BUILD // allow public API only
|
||||
#define OPENCV_HAVE_CVCONFIG_H 1 // but we still have access to cvconfig.h (TODO remove this)
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/core/utils/trace.hpp>
|
||||
#endif
|
||||
|
||||
#if defined __linux__ || defined __APPLE__ || defined __HAIKU__
|
||||
#include <unistd.h> // -D_FORTIFY_SOURCE=2 workaround: https://github.com/opencv/opencv/issues/15020
|
||||
#endif
|
||||
|
||||
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/videoio/legacy/constants_c.h"
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#ifdef __OPENCV_BUILD
|
||||
#include "opencv2/core/private.hpp"
|
||||
#endif
|
||||
|
||||
#include <opencv2/core/utils/configuration.private.hpp>
|
||||
#include <opencv2/core/utils/logger.defines.hpp>
|
||||
#ifdef NDEBUG
|
||||
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
|
||||
#else
|
||||
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1
|
||||
#endif
|
||||
#include <opencv2/core/utils/logger.hpp>
|
||||
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <ctype.h>
|
||||
#include <assert.h> // FIXIT remove this
|
||||
|
||||
#if defined _WIN32 || defined WINCE
|
||||
#if !defined _WIN32_WINNT
|
||||
#ifdef HAVE_MSMF
|
||||
#define _WIN32_WINNT 0x0600 // Windows Vista
|
||||
#else
|
||||
#define _WIN32_WINNT 0x0501 // Windows XP
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <windows.h>
|
||||
#undef small
|
||||
#undef min
|
||||
#undef max
|
||||
#undef abs
|
||||
#endif
|
||||
|
||||
#include "cap_interface.hpp"
|
||||
|
||||
#endif /* __VIDEOIO_H_ */
|
||||
91
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_c.cpp
vendored
Normal file
91
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_c.cpp
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/videoio/registry.hpp"
|
||||
#include "videoio_registry.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
// Legacy C-like API
|
||||
|
||||
CV_IMPL CvCapture* cvCreateCameraCapture(int)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "cvCreateCameraCapture doesn't support legacy API anymore.")
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CV_IMPL CvCapture* cvCreateFileCaptureWithPreference(const char*, int)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "cvCreateFileCaptureWithPreference doesn't support legacy API anymore.")
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CV_IMPL CvCapture* cvCreateFileCapture(const char * filename)
|
||||
{
|
||||
return cvCreateFileCaptureWithPreference(filename, CAP_ANY);
|
||||
}
|
||||
|
||||
CV_IMPL CvVideoWriter* cvCreateVideoWriter(const char*, int, double, CvSize, int)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "cvCreateVideoWriter doesn't support legacy API anymore.")
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CV_IMPL int cvWriteFrame(CvVideoWriter* writer, const IplImage* image)
|
||||
{
|
||||
return writer ? writer->writeFrame(image) : 0;
|
||||
}
|
||||
|
||||
CV_IMPL void cvReleaseVideoWriter(CvVideoWriter** pwriter)
|
||||
{
|
||||
if( pwriter && *pwriter )
|
||||
{
|
||||
delete *pwriter;
|
||||
*pwriter = 0;
|
||||
}
|
||||
}
|
||||
|
||||
CV_IMPL void cvReleaseCapture(CvCapture** pcapture)
|
||||
{
|
||||
if (pcapture && *pcapture)
|
||||
{
|
||||
delete *pcapture;
|
||||
*pcapture = 0;
|
||||
}
|
||||
}
|
||||
|
||||
CV_IMPL IplImage* cvQueryFrame(CvCapture* capture)
|
||||
{
|
||||
if (!capture)
|
||||
return 0;
|
||||
if (!capture->grabFrame())
|
||||
return 0;
|
||||
return capture->retrieveFrame(0);
|
||||
}
|
||||
|
||||
CV_IMPL int cvGrabFrame(CvCapture* capture)
|
||||
{
|
||||
return capture ? capture->grabFrame() : 0;
|
||||
}
|
||||
|
||||
CV_IMPL IplImage* cvRetrieveFrame(CvCapture* capture, int idx)
|
||||
{
|
||||
return capture ? capture->retrieveFrame(idx) : 0;
|
||||
}
|
||||
|
||||
CV_IMPL double cvGetCaptureProperty(CvCapture* capture, int id)
|
||||
{
|
||||
return capture ? capture->getProperty(id) : 0;
|
||||
}
|
||||
|
||||
CV_IMPL int cvSetCaptureProperty(CvCapture* capture, int id, double value)
|
||||
{
|
||||
return capture ? capture->setProperty(id, value) : 0;
|
||||
}
|
||||
|
||||
CV_IMPL int cvGetCaptureDomain(CvCapture* capture)
|
||||
{
|
||||
return capture ? capture->getCaptureDomain() : 0;
|
||||
}
|
||||
489
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_registry.cpp
vendored
Normal file
489
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_registry.cpp
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#include "videoio_registry.hpp"
|
||||
|
||||
#include "opencv2/videoio/registry.hpp"
|
||||
|
||||
#include "opencv2/core/utils/filesystem.private.hpp" // OPENCV_HAVE_FILESYSTEM_SUPPORT
|
||||
|
||||
#include "cap_librealsense.hpp"
|
||||
#include "cap_dshow.hpp"
|
||||
|
||||
#ifdef HAVE_MFX
|
||||
#include "cap_mfx_reader.hpp"
|
||||
#include "cap_mfx_writer.hpp"
|
||||
#endif
|
||||
|
||||
// All WinRT versions older than 8.0 should provide classes used for video support
|
||||
#if defined(WINRT) && !defined(WINRT_8_0) && defined(__cplusplus_winrt)
|
||||
# include "cap_winrt_capture.hpp"
|
||||
# include "cap_winrt_bridge.hpp"
|
||||
# define WINRT_VIDEO
|
||||
#endif
|
||||
|
||||
#if defined _M_X64 && defined _MSC_VER && !defined CV_ICC
|
||||
#pragma optimize("",off)
|
||||
#pragma warning(disable: 4748)
|
||||
#endif
|
||||
|
||||
using namespace cv;
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace {
|
||||
|
||||
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
|
||||
#define DECLARE_DYNAMIC_BACKEND(cap, name, mode) \
|
||||
{ \
|
||||
cap, (BackendMode)(mode), 1000, name, createPluginBackendFactory(cap, name) \
|
||||
},
|
||||
#else
|
||||
#define DECLARE_DYNAMIC_BACKEND(cap, name, mode) /* nothing */
|
||||
#endif
|
||||
|
||||
#define DECLARE_STATIC_BACKEND(cap, name, mode, createCaptureFile, createCaptureCamera, createWriter) \
|
||||
{ \
|
||||
cap, (BackendMode)(mode), 1000, name, createBackendFactory(createCaptureFile, createCaptureCamera, createWriter) \
|
||||
},
|
||||
|
||||
/** Ordering guidelines:
|
||||
- modern optimized, multi-platform libraries: ffmpeg, gstreamer, Media SDK
|
||||
- platform specific universal SDK: WINRT, AVFOUNDATION, MSMF/DSHOW, V4L/V4L2
|
||||
- RGB-D: OpenNI/OpenNI2, REALSENSE
|
||||
- special OpenCV (file-based): "images", "mjpeg"
|
||||
- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE)/uEye
|
||||
- other: XINE, gphoto2, etc
|
||||
*/
|
||||
static const struct VideoBackendInfo builtin_backends[] =
|
||||
{
|
||||
#ifdef HAVE_FFMPEG
|
||||
DECLARE_STATIC_BACKEND(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER, cvCreateFileCapture_FFMPEG_proxy, 0, cvCreateVideoWriter_FFMPEG_proxy)
|
||||
#elif defined(ENABLE_PLUGINS) || defined(HAVE_FFMPEG_WRAPPER)
|
||||
DECLARE_DYNAMIC_BACKEND(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_GSTREAMER
|
||||
DECLARE_STATIC_BACKEND(CAP_GSTREAMER, "GSTREAMER", MODE_CAPTURE_ALL | MODE_WRITER, createGStreamerCapture_file, createGStreamerCapture_cam, create_GStreamer_writer)
|
||||
#elif defined(ENABLE_PLUGINS)
|
||||
DECLARE_DYNAMIC_BACKEND(CAP_GSTREAMER, "GSTREAMER", MODE_CAPTURE_ALL | MODE_WRITER)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_MFX // Media SDK
|
||||
DECLARE_STATIC_BACKEND(CAP_INTEL_MFX, "INTEL_MFX", MODE_CAPTURE_BY_FILENAME | MODE_WRITER, create_MFX_capture, 0, create_MFX_writer)
|
||||
#elif defined(ENABLE_PLUGINS)
|
||||
DECLARE_DYNAMIC_BACKEND(CAP_INTEL_MFX, "INTEL_MFX", MODE_CAPTURE_BY_FILENAME | MODE_WRITER)
|
||||
#endif
|
||||
|
||||
// Apple platform
|
||||
#ifdef HAVE_AVFOUNDATION
|
||||
DECLARE_STATIC_BACKEND(CAP_AVFOUNDATION, "AVFOUNDATION", MODE_CAPTURE_ALL | MODE_WRITER, create_AVFoundation_capture_file, create_AVFoundation_capture_cam, create_AVFoundation_writer)
|
||||
#endif
|
||||
|
||||
// Windows
|
||||
#ifdef WINRT_VIDEO
|
||||
DECLARE_STATIC_BACKEND(CAP_WINRT, "WINRT", MODE_CAPTURE_BY_INDEX, 0, create_WRT_capture, 0)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_MSMF
|
||||
DECLARE_STATIC_BACKEND(CAP_MSMF, "MSMF", MODE_CAPTURE_ALL | MODE_WRITER, cvCreateCapture_MSMF, cvCreateCapture_MSMF, cvCreateVideoWriter_MSMF)
|
||||
#elif defined(ENABLE_PLUGINS) && defined(_WIN32)
|
||||
DECLARE_DYNAMIC_BACKEND(CAP_MSMF, "MSMF", MODE_CAPTURE_ALL | MODE_WRITER)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_DSHOW
|
||||
DECLARE_STATIC_BACKEND(CAP_DSHOW, "DSHOW", MODE_CAPTURE_BY_INDEX, 0, create_DShow_capture, 0)
|
||||
#endif
|
||||
|
||||
// Linux, some Unix
|
||||
#if defined HAVE_CAMV4L2
|
||||
DECLARE_STATIC_BACKEND(CAP_V4L2, "V4L2", MODE_CAPTURE_ALL, create_V4L_capture_file, create_V4L_capture_cam, 0)
|
||||
#elif defined HAVE_VIDEOIO
|
||||
DECLARE_STATIC_BACKEND(CAP_V4L, "V4L_BSD", MODE_CAPTURE_ALL, create_V4L_capture_file, create_V4L_capture_cam, 0)
|
||||
#endif
|
||||
|
||||
|
||||
// RGB-D universal
|
||||
#ifdef HAVE_OPENNI2
|
||||
DECLARE_STATIC_BACKEND(CAP_OPENNI2, "OPENNI2", MODE_CAPTURE_ALL, create_OpenNI2_capture_file, create_OpenNI2_capture_cam, 0)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_LIBREALSENSE
|
||||
DECLARE_STATIC_BACKEND(CAP_REALSENSE, "INTEL_REALSENSE", MODE_CAPTURE_BY_INDEX, 0, create_RealSense_capture, 0)
|
||||
#endif
|
||||
|
||||
// OpenCV file-based only
|
||||
DECLARE_STATIC_BACKEND(CAP_IMAGES, "CV_IMAGES", MODE_CAPTURE_BY_FILENAME | MODE_WRITER, create_Images_capture, 0, create_Images_writer)
|
||||
DECLARE_STATIC_BACKEND(CAP_OPENCV_MJPEG, "CV_MJPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER, createMotionJpegCapture, 0, createMotionJpegWriter)
|
||||
|
||||
// special interfaces / stereo cameras / other SDKs
|
||||
#if defined(HAVE_DC1394_2)
|
||||
DECLARE_STATIC_BACKEND(CAP_FIREWIRE, "FIREWIRE", MODE_CAPTURE_BY_INDEX, 0, create_DC1394_capture, 0)
|
||||
#endif
|
||||
// GigE
|
||||
#ifdef HAVE_PVAPI
|
||||
DECLARE_STATIC_BACKEND(CAP_PVAPI, "PVAPI", MODE_CAPTURE_BY_INDEX, 0, create_PvAPI_capture, 0)
|
||||
#endif
|
||||
#ifdef HAVE_XIMEA
|
||||
DECLARE_STATIC_BACKEND(CAP_XIAPI, "XIMEA", MODE_CAPTURE_ALL, create_XIMEA_capture_file, create_XIMEA_capture_cam, 0)
|
||||
#endif
|
||||
#ifdef HAVE_ARAVIS_API
|
||||
DECLARE_STATIC_BACKEND(CAP_ARAVIS, "ARAVIS", MODE_CAPTURE_BY_INDEX, 0, create_Aravis_capture, 0)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_UEYE // uEye
|
||||
DECLARE_STATIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX, 0, create_ueye_camera, 0)
|
||||
#elif defined(ENABLE_PLUGINS)
|
||||
DECLARE_DYNAMIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX)
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_GPHOTO2
|
||||
DECLARE_STATIC_BACKEND(CAP_GPHOTO2, "GPHOTO2", MODE_CAPTURE_ALL, createGPhoto2Capture, createGPhoto2Capture, 0)
|
||||
#endif
|
||||
#ifdef HAVE_XINE
|
||||
DECLARE_STATIC_BACKEND(CAP_XINE, "XINE", MODE_CAPTURE_BY_FILENAME, createXINECapture, 0, 0)
|
||||
#endif
|
||||
#if defined(HAVE_ANDROID_MEDIANDK) || defined(HAVE_ANDROID_NATIVE_CAMERA)
|
||||
DECLARE_STATIC_BACKEND(CAP_ANDROID, "ANDROID_NATIVE",
|
||||
#ifdef HAVE_ANDROID_MEDIANDK
|
||||
MODE_CAPTURE_BY_FILENAME
|
||||
#else
|
||||
0
|
||||
#endif
|
||||
|
|
||||
#ifdef HAVE_ANDROID_NATIVE_CAMERA
|
||||
MODE_CAPTURE_BY_INDEX,
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
#ifdef HAVE_ANDROID_MEDIANDK
|
||||
createAndroidCapture_file,
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
#ifdef HAVE_ANDROID_NATIVE_CAMERA
|
||||
createAndroidCapture_cam,
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
0)
|
||||
#endif
|
||||
// dropped backends: MIL, TYZX
|
||||
};
|
||||
|
||||
bool sortByPriority(const VideoBackendInfo &lhs, const VideoBackendInfo &rhs)
|
||||
{
|
||||
return lhs.priority > rhs.priority;
|
||||
}
|
||||
|
||||
/** @brief Manages list of enabled backends
|
||||
*/
|
||||
class VideoBackendRegistry
|
||||
{
|
||||
protected:
|
||||
std::vector<VideoBackendInfo> enabledBackends;
|
||||
VideoBackendRegistry()
|
||||
{
|
||||
const int N = sizeof(builtin_backends)/sizeof(builtin_backends[0]);
|
||||
enabledBackends.assign(builtin_backends, builtin_backends + N);
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
VideoBackendInfo& info = enabledBackends[i];
|
||||
info.priority = 1000 - i * 10;
|
||||
}
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO: Builtin backends(" << N << "): " << dumpBackends());
|
||||
if (readPrioritySettings())
|
||||
{
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: Updated backends priorities: " << dumpBackends());
|
||||
}
|
||||
int enabled = 0;
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
VideoBackendInfo& info = enabledBackends[enabled];
|
||||
if (enabled != i)
|
||||
info = enabledBackends[i];
|
||||
size_t param_priority = utils::getConfigurationParameterSizeT(cv::format("OPENCV_VIDEOIO_PRIORITY_%s", info.name).c_str(), (size_t)info.priority);
|
||||
CV_Assert(param_priority == (size_t)(int)param_priority); // overflow check
|
||||
if (param_priority > 0)
|
||||
{
|
||||
info.priority = (int)param_priority;
|
||||
enabled++;
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: Disable backend: " << info.name);
|
||||
}
|
||||
}
|
||||
enabledBackends.resize(enabled);
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO: Available backends(" << enabled << "): " << dumpBackends());
|
||||
std::sort(enabledBackends.begin(), enabledBackends.end(), sortByPriority);
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: Enabled backends(" << enabled << ", sorted by priority): " << dumpBackends());
|
||||
}
|
||||
|
||||
static std::vector<std::string> tokenize_string(const std::string& input, char token)
|
||||
{
|
||||
std::vector<std::string> result;
|
||||
std::string::size_type prev_pos = 0, pos = 0;
|
||||
while((pos = input.find(token, pos)) != std::string::npos)
|
||||
{
|
||||
result.push_back(input.substr(prev_pos, pos-prev_pos));
|
||||
prev_pos = ++pos;
|
||||
}
|
||||
result.push_back(input.substr(prev_pos));
|
||||
return result;
|
||||
}
|
||||
bool readPrioritySettings()
|
||||
{
|
||||
bool hasChanges = false;
|
||||
cv::String prioritized_backends = utils::getConfigurationParameterString("OPENCV_VIDEOIO_PRIORITY_LIST", NULL);
|
||||
if (prioritized_backends.empty())
|
||||
return hasChanges;
|
||||
CV_LOG_INFO(NULL, "VIDEOIO: Configured priority list (OPENCV_VIDEOIO_PRIORITY_LIST): " << prioritized_backends);
|
||||
const std::vector<std::string> names = tokenize_string(prioritized_backends, ',');
|
||||
for (size_t i = 0; i < names.size(); i++)
|
||||
{
|
||||
const std::string& name = names[i];
|
||||
bool found = false;
|
||||
for (size_t k = 0; k < enabledBackends.size(); k++)
|
||||
{
|
||||
VideoBackendInfo& info = enabledBackends[k];
|
||||
if (name == info.name)
|
||||
{
|
||||
info.priority = (int)(100000 + (names.size() - i) * 1000);
|
||||
CV_LOG_DEBUG(NULL, "VIDEOIO: New backend priority: '" << name << "' => " << info.priority);
|
||||
found = true;
|
||||
hasChanges = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
{
|
||||
CV_LOG_WARNING(NULL, "VIDEOIO: Can't prioritize unknown/unavailable backend: '" << name << "'");
|
||||
}
|
||||
}
|
||||
return hasChanges;
|
||||
}
|
||||
public:
|
||||
std::string dumpBackends() const
|
||||
{
|
||||
std::ostringstream os;
|
||||
for (size_t i = 0; i < enabledBackends.size(); i++)
|
||||
{
|
||||
if (i > 0) os << "; ";
|
||||
const VideoBackendInfo& info = enabledBackends[i];
|
||||
os << info.name << '(' << info.priority << ')';
|
||||
}
|
||||
return os.str();
|
||||
}
|
||||
|
||||
static VideoBackendRegistry& getInstance()
|
||||
{
|
||||
static VideoBackendRegistry g_instance;
|
||||
return g_instance;
|
||||
}
|
||||
|
||||
inline std::vector<VideoBackendInfo> getEnabledBackends() const { return enabledBackends; }
|
||||
|
||||
inline std::vector<VideoBackendInfo> getAvailableBackends_CaptureByIndex() const
|
||||
{
|
||||
std::vector<VideoBackendInfo> result;
|
||||
for (size_t i = 0; i < enabledBackends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = enabledBackends[i];
|
||||
if (info.mode & MODE_CAPTURE_BY_INDEX)
|
||||
result.push_back(info);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
inline std::vector<VideoBackendInfo> getAvailableBackends_CaptureByFilename() const
|
||||
{
|
||||
std::vector<VideoBackendInfo> result;
|
||||
for (size_t i = 0; i < enabledBackends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = enabledBackends[i];
|
||||
if (info.mode & MODE_CAPTURE_BY_FILENAME)
|
||||
result.push_back(info);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
inline std::vector<VideoBackendInfo> getAvailableBackends_Writer() const
|
||||
{
|
||||
std::vector<VideoBackendInfo> result;
|
||||
for (size_t i = 0; i < enabledBackends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = enabledBackends[i];
|
||||
if (info.mode & MODE_WRITER)
|
||||
result.push_back(info);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace videoio_registry {
|
||||
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByIndex()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByIndex();
|
||||
return result;
|
||||
}
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByFilename()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByFilename();
|
||||
return result;
|
||||
}
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_Writer()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_Writer();
|
||||
return result;
|
||||
}
|
||||
|
||||
cv::String getBackendName(VideoCaptureAPIs api)
|
||||
{
|
||||
if (api == CAP_ANY)
|
||||
return "CAP_ANY"; // special case, not a part of backends list
|
||||
const int N = sizeof(builtin_backends)/sizeof(builtin_backends[0]);
|
||||
for (size_t i = 0; i < N; i++)
|
||||
{
|
||||
const VideoBackendInfo& backend = builtin_backends[i];
|
||||
if (backend.id == api)
|
||||
return backend.name;
|
||||
}
|
||||
return cv::format("UnknownVideoAPI(%d)", (int)api);
|
||||
}
|
||||
|
||||
std::vector<VideoCaptureAPIs> getBackends()
|
||||
{
|
||||
std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getEnabledBackends();
|
||||
std::vector<VideoCaptureAPIs> result;
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
result.push_back((VideoCaptureAPIs)backends[i].id);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<VideoCaptureAPIs> getCameraBackends()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByIndex();
|
||||
std::vector<VideoCaptureAPIs> result;
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
result.push_back((VideoCaptureAPIs)backends[i].id);
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
std::vector<VideoCaptureAPIs> getStreamBackends()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByFilename();
|
||||
std::vector<VideoCaptureAPIs> result;
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
result.push_back((VideoCaptureAPIs)backends[i].id);
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
std::vector<VideoCaptureAPIs> getWriterBackends()
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_Writer();
|
||||
std::vector<VideoCaptureAPIs> result;
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
result.push_back((VideoCaptureAPIs)backends[i].id);
|
||||
return result;
|
||||
}
|
||||
|
||||
bool hasBackend(VideoCaptureAPIs api)
|
||||
{
|
||||
std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getEnabledBackends();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (api == info.id)
|
||||
{
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
return !info.backendFactory->getBackend().empty();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isBackendBuiltIn(VideoCaptureAPIs api)
|
||||
{
|
||||
std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getEnabledBackends();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (api == info.id)
|
||||
{
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
return info.backendFactory->isBuiltIn();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string getCameraBackendPluginVersion(VideoCaptureAPIs api,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
)
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByIndex();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (api == info.id)
|
||||
{
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
CV_Assert(!info.backendFactory->isBuiltIn());
|
||||
return getCapturePluginVersion(info.backendFactory, version_ABI, version_API);
|
||||
}
|
||||
}
|
||||
CV_Error(Error::StsError, "Unknown or wrong backend ID");
|
||||
}
|
||||
|
||||
std::string getStreamBackendPluginVersion(VideoCaptureAPIs api,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
)
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByFilename();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (api == info.id)
|
||||
{
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
CV_Assert(!info.backendFactory->isBuiltIn());
|
||||
return getCapturePluginVersion(info.backendFactory, version_ABI, version_API);
|
||||
}
|
||||
}
|
||||
CV_Error(Error::StsError, "Unknown or wrong backend ID");
|
||||
}
|
||||
|
||||
|
||||
/** @brief Returns description and ABI/API version of videoio plugin's writer interface */
|
||||
std::string getWriterBackendPluginVersion(VideoCaptureAPIs api,
|
||||
CV_OUT int& version_ABI,
|
||||
CV_OUT int& version_API
|
||||
)
|
||||
{
|
||||
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_Writer();
|
||||
for (size_t i = 0; i < backends.size(); i++)
|
||||
{
|
||||
const VideoBackendInfo& info = backends[i];
|
||||
if (api == info.id)
|
||||
{
|
||||
CV_Assert(!info.backendFactory.empty());
|
||||
CV_Assert(!info.backendFactory->isBuiltIn());
|
||||
return getWriterPluginVersion(info.backendFactory, version_ABI, version_API);
|
||||
}
|
||||
}
|
||||
CV_Error(Error::StsError, "Unknown or wrong backend ID");
|
||||
}
|
||||
|
||||
|
||||
} // namespace registry
|
||||
|
||||
} // namespace
|
||||
41
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_registry.hpp
vendored
Normal file
41
3rdparty/opencv-4.5.4/modules/videoio/src/videoio_registry.hpp
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#ifndef __OPENCV_VIDEOIO_VIDEOIO_REGISTRY_HPP__
|
||||
#define __OPENCV_VIDEOIO_VIDEOIO_REGISTRY_HPP__
|
||||
|
||||
#include "backend.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
/** Capabilities bitmask */
|
||||
enum BackendMode {
|
||||
MODE_CAPTURE_BY_INDEX = 1 << 0, //!< device index
|
||||
MODE_CAPTURE_BY_FILENAME = 1 << 1, //!< filename or device path (v4l2)
|
||||
MODE_WRITER = 1 << 4, //!< writer
|
||||
|
||||
MODE_CAPTURE_ALL = MODE_CAPTURE_BY_INDEX + MODE_CAPTURE_BY_FILENAME,
|
||||
};
|
||||
|
||||
struct VideoBackendInfo {
|
||||
VideoCaptureAPIs id;
|
||||
BackendMode mode;
|
||||
int priority; // 1000-<index*10> - default builtin priority
|
||||
// 0 - disabled (OPENCV_VIDEOIO_PRIORITY_<name> = 0)
|
||||
// >10000 - prioritized list (OPENCV_VIDEOIO_PRIORITY_LIST)
|
||||
const char* name;
|
||||
Ptr<IBackendFactory> backendFactory;
|
||||
};
|
||||
|
||||
namespace videoio_registry {
|
||||
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByIndex();
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByFilename();
|
||||
std::vector<VideoBackendInfo> getAvailableBackends_Writer();
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
||||
#endif // __OPENCV_VIDEOIO_VIDEOIO_REGISTRY_HPP__
|
||||
565
3rdparty/opencv-4.5.4/modules/videoio/src/wrl.h
vendored
Normal file
565
3rdparty/opencv-4.5.4/modules/videoio/src/wrl.h
vendored
Normal file
@@ -0,0 +1,565 @@
|
||||
#pragma once
|
||||
|
||||
#include <winstring.h>
|
||||
#include <stdio.h>
|
||||
#include <tchar.h>
|
||||
#include <crtdbg.h>
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include <wrl\implements.h>
|
||||
#include <wrl\event.h>
|
||||
#include <inspectable.h>
|
||||
#ifndef __cplusplus_winrt
|
||||
#include <windows.foundation.h>
|
||||
|
||||
__declspec(noreturn) void __stdcall __abi_WinRTraiseException(long);
|
||||
|
||||
inline void __abi_ThrowIfFailed(long __hrArg)
|
||||
{
|
||||
if (__hrArg < 0)
|
||||
{
|
||||
__abi_WinRTraiseException(__hrArg);
|
||||
}
|
||||
}
|
||||
|
||||
struct Guid
|
||||
{
|
||||
public:
|
||||
Guid();
|
||||
Guid(__rcGUID_t);
|
||||
operator ::__rcGUID_t();
|
||||
bool Equals(Guid __guidArg);
|
||||
bool Equals(__rcGUID_t __guidArg);
|
||||
Guid(unsigned int __aArg, unsigned short __bArg, unsigned short __cArg, unsigned __int8 __dArg,
|
||||
unsigned __int8 __eArg, unsigned __int8 __fArg, unsigned __int8 __gArg, unsigned __int8 __hArg,
|
||||
unsigned __int8 __iArg, unsigned __int8 __jArg, unsigned __int8 __kArg);
|
||||
Guid(unsigned int __aArg, unsigned short __bArg, unsigned short __cArg, const unsigned __int8* __dArg);
|
||||
private:
|
||||
unsigned long __a;
|
||||
unsigned short __b;
|
||||
unsigned short __c;
|
||||
unsigned char __d;
|
||||
unsigned char __e;
|
||||
unsigned char __f;
|
||||
unsigned char __g;
|
||||
unsigned char __h;
|
||||
unsigned char __i;
|
||||
unsigned char __j;
|
||||
unsigned char __k;
|
||||
};
|
||||
|
||||
static_assert(sizeof(Guid) == sizeof(::_GUID), "Incorrect size for Guid");
|
||||
static_assert(sizeof(__rcGUID_t) == sizeof(::_GUID), "Incorrect size for __rcGUID_t");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
inline Guid::Guid() : __a(0), __b(0), __c(0), __d(0), __e(0), __f(0), __g(0), __h(0), __i(0), __j(0), __k(0)
|
||||
{
|
||||
}
|
||||
|
||||
inline Guid::Guid(__rcGUID_t __guid) :
|
||||
__a(reinterpret_cast<const __s_GUID&>(__guid).Data1),
|
||||
__b(reinterpret_cast<const __s_GUID&>(__guid).Data2),
|
||||
__c(reinterpret_cast<const __s_GUID&>(__guid).Data3),
|
||||
__d(reinterpret_cast<const __s_GUID&>(__guid).Data4[0]),
|
||||
__e(reinterpret_cast<const __s_GUID&>(__guid).Data4[1]),
|
||||
__f(reinterpret_cast<const __s_GUID&>(__guid).Data4[2]),
|
||||
__g(reinterpret_cast<const __s_GUID&>(__guid).Data4[3]),
|
||||
__h(reinterpret_cast<const __s_GUID&>(__guid).Data4[4]),
|
||||
__i(reinterpret_cast<const __s_GUID&>(__guid).Data4[5]),
|
||||
__j(reinterpret_cast<const __s_GUID&>(__guid).Data4[6]),
|
||||
__k(reinterpret_cast<const __s_GUID&>(__guid).Data4[7])
|
||||
{
|
||||
}
|
||||
|
||||
inline Guid::operator ::__rcGUID_t()
|
||||
{
|
||||
return reinterpret_cast<__rcGUID_t>(*this);
|
||||
}
|
||||
|
||||
inline bool Guid::Equals(Guid __guidArg)
|
||||
{
|
||||
return *this == __guidArg;
|
||||
}
|
||||
|
||||
inline bool Guid::Equals(__rcGUID_t __guidArg)
|
||||
{
|
||||
return *this == static_cast< Guid>(__guidArg);
|
||||
}
|
||||
|
||||
inline bool operator==(Guid __aArg, Guid __bArg)
|
||||
{
|
||||
auto __a = reinterpret_cast<unsigned long*>(&__aArg);
|
||||
auto __b = reinterpret_cast<unsigned long*>(&__bArg);
|
||||
|
||||
return (__a[0] == __b[0] && __a[1] == __b[1] && __a[2] == __b[2] && __a[3] == __b[3]);
|
||||
}
|
||||
|
||||
inline bool operator!=(Guid __aArg, Guid __bArg)
|
||||
{
|
||||
return !(__aArg == __bArg);
|
||||
}
|
||||
|
||||
inline bool operator<(Guid __aArg, Guid __bArg)
|
||||
{
|
||||
auto __a = reinterpret_cast<unsigned long*>(&__aArg);
|
||||
auto __b = reinterpret_cast<unsigned long*>(&__bArg);
|
||||
|
||||
if (__a[0] != __b[0])
|
||||
{
|
||||
return __a[0] < __b[0];
|
||||
}
|
||||
|
||||
if (__a[1] != __b[1])
|
||||
{
|
||||
return __a[1] < __b[1];
|
||||
}
|
||||
|
||||
if (__a[2] != __b[2])
|
||||
{
|
||||
return __a[2] < __b[2];
|
||||
}
|
||||
|
||||
if (__a[3] != __b[3])
|
||||
{
|
||||
return __a[3] < __b[3];
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
inline Guid::Guid(unsigned int __aArg, unsigned short __bArg, unsigned short __cArg, unsigned __int8 __dArg,
|
||||
unsigned __int8 __eArg, unsigned __int8 __fArg, unsigned __int8 __gArg, unsigned __int8 __hArg,
|
||||
unsigned __int8 __iArg, unsigned __int8 __jArg, unsigned __int8 __kArg) :
|
||||
__a(__aArg), __b(__bArg), __c(__cArg), __d(__dArg), __e(__eArg), __f(__fArg), __g(__gArg), __h(__hArg), __i(__iArg), __j(__jArg), __k(__kArg)
|
||||
{
|
||||
}
|
||||
|
||||
inline Guid::Guid(unsigned int __aArg, unsigned short __bArg, unsigned short __cArg, const unsigned __int8 __dArg[8]) :
|
||||
__a(__aArg), __b(__bArg), __c(__cArg)
|
||||
{
|
||||
__d = __dArg[0];
|
||||
__e = __dArg[1];
|
||||
__f = __dArg[2];
|
||||
__g = __dArg[3];
|
||||
__h = __dArg[4];
|
||||
__i = __dArg[5];
|
||||
__j = __dArg[6];
|
||||
__k = __dArg[7];
|
||||
}
|
||||
|
||||
__declspec(selectany) Guid __winrt_GUID_NULL(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
|
||||
|
||||
//
|
||||
//// Don't want to define the real IUnknown from unknown.h here. That would means if the user has
|
||||
//// any broken code that uses it, compile errors will take the form of e.g.:
|
||||
//// predefined C++ WinRT types (compiler internal)(41) : see declaration of 'IUnknown::QueryInterface'
|
||||
//// This is not helpful. If they use IUnknown, we still need to point them to the actual unknown.h so
|
||||
//// that they can see the original definition.
|
||||
////
|
||||
//// For WinRT, we'll instead have a parallel COM interface hierarchy for basic interfaces starting with _.
|
||||
//// The type mismatch is not an issue. COM passes types through GUID / void* combos - the original type
|
||||
//// doesn't come into play unless the user static_casts an implementation type to one of these, but
|
||||
//// the WinRT implementation types are hidden.
|
||||
__interface __declspec(uuid("00000000-0000-0000-C000-000000000046")) __abi_IUnknown
|
||||
{
|
||||
public:
|
||||
virtual long __stdcall __abi_QueryInterface(Guid&, void**) = 0;
|
||||
virtual unsigned long __stdcall __abi_AddRef() = 0;
|
||||
virtual unsigned long __stdcall __abi_Release() = 0;
|
||||
};
|
||||
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseNotImplementedException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseInvalidCastException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseNullReferenceException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseOperationCanceledException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseFailureException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseAccessDeniedException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseOutOfMemoryException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseInvalidArgumentException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseOutOfBoundsException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseChangedStateException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseClassNotRegisteredException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseWrongThreadException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseDisconnectedException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseObjectDisposedException();
|
||||
__declspec(dllexport) __declspec(noreturn) void __stdcall __abi_WinRTraiseCOMException(long);
|
||||
|
||||
__declspec(noreturn) inline void __stdcall __abi_WinRTraiseException(long __hrArg)
|
||||
{
|
||||
switch (__hrArg)
|
||||
{
|
||||
case 0x80004001L: // E_NOTIMPL
|
||||
__abi_WinRTraiseNotImplementedException();
|
||||
|
||||
case 0x80004002L: // E_NOINTERFACE
|
||||
__abi_WinRTraiseInvalidCastException();
|
||||
|
||||
case 0x80004003L: // E_POINTER
|
||||
__abi_WinRTraiseNullReferenceException();
|
||||
|
||||
case 0x80004004L: // E_ABORT
|
||||
__abi_WinRTraiseOperationCanceledException();
|
||||
|
||||
case 0x80004005L: // E_FAIL
|
||||
__abi_WinRTraiseFailureException();
|
||||
|
||||
case 0x80070005L: // E_ACCESSDENIED
|
||||
__abi_WinRTraiseAccessDeniedException();
|
||||
|
||||
case 0x8007000EL: // E_OUTOFMEMORY
|
||||
__abi_WinRTraiseOutOfMemoryException();
|
||||
|
||||
case 0x80070057L: // E_INVALIDARG
|
||||
__abi_WinRTraiseInvalidArgumentException();
|
||||
|
||||
case 0x8000000BL: // E_BOUNDS
|
||||
__abi_WinRTraiseOutOfBoundsException();
|
||||
|
||||
case 0x8000000CL: // E_CHANGED_STATE
|
||||
__abi_WinRTraiseChangedStateException();
|
||||
|
||||
case 0x80040154L: // REGDB_E_CLASSNOTREG
|
||||
__abi_WinRTraiseClassNotRegisteredException();
|
||||
|
||||
case 0x8001010EL: // RPC_E_WRONG_THREAD
|
||||
__abi_WinRTraiseWrongThreadException();
|
||||
|
||||
case 0x80010108L: // RPC_E_DISCONNECTED
|
||||
__abi_WinRTraiseDisconnectedException();
|
||||
|
||||
case 0x80000013L: // RO_E_CLOSED
|
||||
__abi_WinRTraiseObjectDisposedException();
|
||||
|
||||
default:
|
||||
__abi_WinRTraiseCOMException(__hrArg);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct __abi_CaptureBase
|
||||
{
|
||||
protected:
|
||||
virtual __stdcall ~__abi_CaptureBase() {}
|
||||
|
||||
public:
|
||||
static const size_t __smallCaptureSize = 4 * sizeof(void*);
|
||||
void* operator new(size_t __sizeArg, void* __pSmallCaptureArg)
|
||||
{
|
||||
if (__sizeArg > __smallCaptureSize)
|
||||
{
|
||||
return reinterpret_cast<__abi_CaptureBase*>(HeapAlloc(GetProcessHeap(), 0, __sizeArg));
|
||||
}
|
||||
|
||||
return __pSmallCaptureArg;
|
||||
}
|
||||
|
||||
void operator delete(void* __ptrArg, void* __pSmallCaptureArg)
|
||||
{
|
||||
__abi_CaptureBase* __pThis = static_cast<__abi_CaptureBase*>(__ptrArg);
|
||||
__pThis->Delete(__pThis, __pSmallCaptureArg);
|
||||
}
|
||||
|
||||
inline void* GetVFunction(int __slotArg)
|
||||
{
|
||||
return (*reinterpret_cast<void***>(this))[__slotArg];
|
||||
}
|
||||
|
||||
void Delete(__abi_CaptureBase* __pThisArg, void* __pSmallCaptureArg)
|
||||
{
|
||||
__pThisArg->~__abi_CaptureBase();
|
||||
if (__pThisArg != __pSmallCaptureArg)
|
||||
{
|
||||
HeapFree(GetProcessHeap(), 0, __pThisArg);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct __abi_CapturePtr
|
||||
{
|
||||
char* smallCapture[__abi_CaptureBase::__smallCaptureSize];
|
||||
__abi_CaptureBase* ptr;
|
||||
__abi_CapturePtr() : ptr(reinterpret_cast<__abi_CaptureBase*>(smallCapture)) {}
|
||||
~__abi_CapturePtr()
|
||||
{
|
||||
ptr->Delete(ptr, smallCapture);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename __TFunctor, typename __TReturnType>
|
||||
struct __abi_FunctorCapture0 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture0(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke() { return functor(); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0>
|
||||
struct __abi_FunctorCapture1 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture1(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0) { return functor(__arg0); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1>
|
||||
struct __abi_FunctorCapture2 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture2(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1) { return functor(__arg0, __arg1); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2>
|
||||
struct __abi_FunctorCapture3 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture3(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2) { return functor(__arg0, __arg1, __arg2); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3>
|
||||
struct __abi_FunctorCapture4 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture4(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3) { return functor(__arg0, __arg1, __arg2, __arg3); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4>
|
||||
struct __abi_FunctorCapture5 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture5(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4, typename __TArg5>
|
||||
struct __abi_FunctorCapture6 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture6(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4, __TArg5 __arg5) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4, __arg5); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4, typename __TArg5, typename __TArg6>
|
||||
struct __abi_FunctorCapture7 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture7(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4, __TArg5 __arg5, __TArg6 __arg6) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4, typename __TArg5, typename __TArg6, typename __TArg7>
|
||||
struct __abi_FunctorCapture8 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture8(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4, __TArg5 __arg5, __TArg6 __arg6, __TArg7 __arg7) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6, __arg7); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4, typename __TArg5, typename __TArg6, typename __TArg7, typename __TArg8>
|
||||
struct __abi_FunctorCapture9 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture9(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4, __TArg5 __arg5, __TArg6 __arg6, __TArg7 __arg7, __TArg8 __arg8) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6, __arg7, __arg8); }
|
||||
};
|
||||
template <typename __TFunctor, typename __TReturnType, typename __TArg0, typename __TArg1, typename __TArg2, typename __TArg3, typename __TArg4, typename __TArg5, typename __TArg6, typename __TArg7, typename __TArg8, typename __TArg9>
|
||||
struct __abi_FunctorCapture10 : public __abi_CaptureBase
|
||||
{
|
||||
__TFunctor functor;
|
||||
__abi_FunctorCapture10(__TFunctor __functor) : functor(__functor) {}
|
||||
virtual __TReturnType __stdcall Invoke(__TArg0 __arg0, __TArg1 __arg1, __TArg2 __arg2, __TArg3 __arg3, __TArg4 __arg4, __TArg5 __arg5, __TArg6 __arg6, __TArg7 __arg7, __TArg8 __arg8, __TArg9 __arg9) { return functor(__arg0, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6, __arg7, __arg8, __arg9); }
|
||||
};
|
||||
|
||||
#define __is_winrt_array(type) (type == ABI::Windows::Foundation::PropertyType::PropertyType_UInt8Array || type == ABI::Windows::Foundation::PropertyType::PropertyType_Int16Array ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_UInt16Array || type == ABI::Windows::Foundation::PropertyType::PropertyType_Int32Array ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_UInt32Array || type == ABI::Windows::Foundation::PropertyType::PropertyType_Int64Array ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_UInt64Array || type == ABI::Windows::Foundation::PropertyType::PropertyType_SingleArray ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_DoubleArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_Char16Array ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_BooleanArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_StringArray ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_InspectableArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_DateTimeArray ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_TimeSpanArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_GuidArray ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_PointArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_SizeArray ||\
|
||||
type == ABI::Windows::Foundation::PropertyType::PropertyType_RectArray || type == ABI::Windows::Foundation::PropertyType::PropertyType_OtherTypeArray)
|
||||
|
||||
template<typename _Type, bool bUnknown = std::is_base_of<IUnknown, _Type>::value>
|
||||
struct winrt_type
|
||||
{
|
||||
};
|
||||
template<typename _Type>
|
||||
struct winrt_type<_Type, true>
|
||||
{
|
||||
static IUnknown* create(_Type* _ObjInCtx) {
|
||||
return reinterpret_cast<IUnknown*>(_ObjInCtx);
|
||||
}
|
||||
static IID getuuid() { return __uuidof(_Type); }
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_OtherType;
|
||||
};
|
||||
template <typename _Type>
|
||||
struct winrt_type<_Type, false>
|
||||
{
|
||||
static IUnknown* create(_Type* _ObjInCtx) {
|
||||
Microsoft::WRL::ComPtr<IInspectable> _PObj;
|
||||
Microsoft::WRL::ComPtr<IActivationFactory> objFactory;
|
||||
HRESULT hr = Windows::Foundation::GetActivationFactory(Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Foundation_PropertyValue).Get(), objFactory.ReleaseAndGetAddressOf());
|
||||
if (FAILED(hr)) return nullptr;
|
||||
Microsoft::WRL::ComPtr<ABI::Windows::Foundation::IPropertyValueStatics> spPropVal;
|
||||
if (SUCCEEDED(hr))
|
||||
hr = objFactory.As(&spPropVal);
|
||||
if (SUCCEEDED(hr)) {
|
||||
hr = winrt_type<_Type>::create(spPropVal.Get(), _ObjInCtx, _PObj.GetAddressOf());
|
||||
if (SUCCEEDED(hr))
|
||||
return reinterpret_cast<IUnknown*>(_PObj.Detach());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
static IID getuuid() { return __uuidof(ABI::Windows::Foundation::IPropertyValue); }
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_OtherType;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct winrt_type<void>
|
||||
{
|
||||
static HRESULT create(ABI::Windows::Foundation::IPropertyValueStatics* spPropVal, void* _ObjInCtx, IInspectable** ppInsp) {
|
||||
(void)_ObjInCtx;
|
||||
return spPropVal->CreateEmpty(ppInsp);
|
||||
}
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_Empty;
|
||||
};
|
||||
#define MAKE_TYPE(Type, Name) template<>\
|
||||
struct winrt_type<Type>\
|
||||
{\
|
||||
static HRESULT create(ABI::Windows::Foundation::IPropertyValueStatics* spPropVal, Type* _ObjInCtx, IInspectable** ppInsp) {\
|
||||
return spPropVal->Create##Name(*_ObjInCtx, ppInsp);\
|
||||
}\
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_##Name;\
|
||||
};
|
||||
|
||||
template<typename _Type>
|
||||
struct winrt_array_type
|
||||
{
|
||||
static IUnknown* create(_Type* _ObjInCtx, size_t N) {
|
||||
Microsoft::WRL::ComPtr<IInspectable> _PObj;
|
||||
Microsoft::WRL::ComPtr<IActivationFactory> objFactory;
|
||||
HRESULT hr = Windows::Foundation::GetActivationFactory(Microsoft::WRL::Wrappers::HStringReference(RuntimeClass_Windows_Foundation_PropertyValue).Get(), objFactory.ReleaseAndGetAddressOf());
|
||||
if (FAILED(hr)) return nullptr;
|
||||
Microsoft::WRL::ComPtr<ABI::Windows::Foundation::IPropertyValueStatics> spPropVal;
|
||||
if (SUCCEEDED(hr))
|
||||
hr = objFactory.As(&spPropVal);
|
||||
if (SUCCEEDED(hr)) {
|
||||
hr = winrt_array_type<_Type>::create(spPropVal.Get(), N, _ObjInCtx, _PObj.GetAddressOf());
|
||||
if (SUCCEEDED(hr))
|
||||
return reinterpret_cast<IUnknown*>(_PObj.Detach());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_OtherTypeArray;
|
||||
};
|
||||
template<int>
|
||||
struct winrt_prop_type {};
|
||||
|
||||
template <>
|
||||
struct winrt_prop_type<ABI::Windows::Foundation::PropertyType_Empty> {
|
||||
typedef void _Type;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct winrt_prop_type<ABI::Windows::Foundation::PropertyType_OtherType> {
|
||||
typedef void _Type;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct winrt_prop_type<ABI::Windows::Foundation::PropertyType_OtherTypeArray> {
|
||||
typedef void _Type;
|
||||
};
|
||||
|
||||
#define MAKE_PROP(Prop, Type) template <>\
|
||||
struct winrt_prop_type<ABI::Windows::Foundation::PropertyType_##Prop> {\
|
||||
typedef Type _Type;\
|
||||
};
|
||||
|
||||
#define MAKE_ARRAY_TYPE(Type, Name) MAKE_PROP(Name, Type)\
|
||||
MAKE_PROP(Name##Array, Type*)\
|
||||
MAKE_TYPE(Type, Name)\
|
||||
template<>\
|
||||
struct winrt_array_type<Type*>\
|
||||
{\
|
||||
static HRESULT create(ABI::Windows::Foundation::IPropertyValueStatics* spPropVal, UINT32 __valueSize, Type** _ObjInCtx, IInspectable** ppInsp) {\
|
||||
return spPropVal->Create##Name##Array(__valueSize, *_ObjInCtx, ppInsp);\
|
||||
}\
|
||||
static const ABI::Windows::Foundation::PropertyType _PropType = ABI::Windows::Foundation::PropertyType::PropertyType_##Name##Array;\
|
||||
static std::vector<Type> PropertyValueToVector(ABI::Windows::Foundation::IPropertyValue* propValue)\
|
||||
{\
|
||||
UINT32 uLen = 0;\
|
||||
Type* pArray = nullptr;\
|
||||
propValue->Get##Name##Array(&uLen, &pArray);\
|
||||
return std::vector<Type>(pArray, pArray + uLen);\
|
||||
}\
|
||||
};
|
||||
MAKE_ARRAY_TYPE(BYTE, UInt8)
|
||||
MAKE_ARRAY_TYPE(INT16, Int16)
|
||||
MAKE_ARRAY_TYPE(UINT16, UInt16)
|
||||
MAKE_ARRAY_TYPE(INT32, Int32)
|
||||
MAKE_ARRAY_TYPE(UINT32, UInt32)
|
||||
MAKE_ARRAY_TYPE(INT64, Int64)
|
||||
MAKE_ARRAY_TYPE(UINT64, UInt64)
|
||||
MAKE_ARRAY_TYPE(FLOAT, Single)
|
||||
MAKE_ARRAY_TYPE(DOUBLE, Double)
|
||||
MAKE_ARRAY_TYPE(WCHAR, Char16)
|
||||
//MAKE_ARRAY_TYPE(boolean, Boolean) //conflict with identical type in C++ of BYTE/UInt8
|
||||
MAKE_ARRAY_TYPE(HSTRING, String)
|
||||
MAKE_ARRAY_TYPE(IInspectable*, Inspectable)
|
||||
MAKE_ARRAY_TYPE(GUID, Guid)
|
||||
MAKE_ARRAY_TYPE(ABI::Windows::Foundation::DateTime, DateTime)
|
||||
MAKE_ARRAY_TYPE(ABI::Windows::Foundation::TimeSpan, TimeSpan)
|
||||
MAKE_ARRAY_TYPE(ABI::Windows::Foundation::Point, Point)
|
||||
MAKE_ARRAY_TYPE(ABI::Windows::Foundation::Size, Size)
|
||||
MAKE_ARRAY_TYPE(ABI::Windows::Foundation::Rect, Rect)
|
||||
|
||||
template < typename T >
|
||||
struct DerefHelper
|
||||
{
|
||||
typedef T DerefType;
|
||||
};
|
||||
|
||||
template < typename T >
|
||||
struct DerefHelper<T*>
|
||||
{
|
||||
typedef T DerefType;
|
||||
};
|
||||
|
||||
#define __is_valid_winrt_type(_Type) (std::is_void<_Type>::value || \
|
||||
std::is_same<_Type, BYTE>::value || \
|
||||
std::is_same<_Type, INT16>::value || \
|
||||
std::is_same<_Type, UINT16>::value || \
|
||||
std::is_same<_Type, INT32>::value || \
|
||||
std::is_same<_Type, UINT32>::value || \
|
||||
std::is_same<_Type, INT64>::value || \
|
||||
std::is_same<_Type, UINT64>::value || \
|
||||
std::is_same<_Type, FLOAT>::value || \
|
||||
std::is_same<_Type, DOUBLE>::value || \
|
||||
std::is_same<_Type, WCHAR>::value || \
|
||||
std::is_same<_Type, boolean>::value || \
|
||||
std::is_same<_Type, HSTRING>::value || \
|
||||
std::is_same<_Type, IInspectable *>::value || \
|
||||
std::is_base_of<Microsoft::WRL::Details::RuntimeClassBase, _Type>::value || \
|
||||
std::is_base_of<IInspectable, typename DerefHelper<_Type>::DerefType>::value || \
|
||||
std::is_same<_Type, GUID>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::DateTime>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::TimeSpan>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Point>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Size>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Rect>::value || \
|
||||
std::is_same<_Type, BYTE*>::value || \
|
||||
std::is_same<_Type, INT16*>::value || \
|
||||
std::is_same<_Type, UINT16*>::value || \
|
||||
std::is_same<_Type, INT32*>::value || \
|
||||
std::is_same<_Type, UINT32*>::value || \
|
||||
std::is_same<_Type, INT64*>::value || \
|
||||
std::is_same<_Type, UINT64*>::value || \
|
||||
std::is_same<_Type, FLOAT*>::value || \
|
||||
std::is_same<_Type, DOUBLE*>::value || \
|
||||
std::is_same<_Type, WCHAR*>::value || \
|
||||
std::is_same<_Type, boolean*>::value || \
|
||||
std::is_same<_Type, HSTRING*>::value || \
|
||||
std::is_same<_Type, IInspectable **>::value || \
|
||||
std::is_same<_Type, GUID*>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::DateTime*>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::TimeSpan*>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Point*>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Size*>::value || \
|
||||
std::is_same<_Type, ABI::Windows::Foundation::Rect*>::value)
|
||||
#endif
|
||||
Reference in New Issue
Block a user