feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,286 @@
set(VIDEOIO_ENABLE_PLUGINS_DEFAULT ON)
if(EMSCRIPTEN OR IOS OR WINRT)
set(VIDEOIO_ENABLE_PLUGINS_DEFAULT OFF)
endif()
set(VIDEOIO_PLUGIN_LIST "" CACHE STRING "List of videoio backends to be compiled as plugins (ffmpeg, gstreamer, mfx, msmf or special value 'all')")
set(VIDEOIO_ENABLE_PLUGINS "${VIDEOIO_ENABLE_PLUGINS_DEFAULT}" CACHE BOOL "Allow building and using of videoio plugins")
mark_as_advanced(VIDEOIO_PLUGIN_LIST VIDEOIO_ENABLE_PLUGINS)
string(REPLACE "," ";" VIDEOIO_PLUGIN_LIST "${VIDEOIO_PLUGIN_LIST}") # support comma-separated list (,) too
if(NOT VIDEOIO_ENABLE_PLUGINS)
if(VIDEOIO_PLUGIN_LIST)
message(WARNING "VideoIO: plugins are disabled through VIDEOIO_ENABLE_PLUGINS, so VIDEOIO_PLUGIN_LIST='${VIDEOIO_PLUGIN_LIST}' is ignored")
set(VIDEOIO_PLUGIN_LIST "")
endif()
else()
# Make virtual opencv_videoio_plugins target
if(NOT TARGET opencv_videoio_plugins)
add_custom_target(opencv_videoio_plugins ALL)
endif()
endif()
ocv_add_module(videoio opencv_imgproc opencv_imgcodecs WRAP java objc python)
set(videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/precomp.hpp)
set(videoio_srcs
"${CMAKE_CURRENT_LIST_DIR}/src/videoio_registry.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/videoio_c.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/cap.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/cap_images.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/cap_mjpeg_encoder.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/cap_mjpeg_decoder.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/backend_plugin.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/backend_static.cpp"
"${CMAKE_CURRENT_LIST_DIR}/src/container_avi.cpp")
file(GLOB videoio_ext_hdrs
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.h"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/legacy/*.h")
if(OPENCV_DEBUG_POSTFIX)
ocv_append_source_file_compile_definitions("${CMAKE_CURRENT_LIST_DIR}/src/backend_plugin.cpp" "DEBUG_POSTFIX=${OPENCV_DEBUG_POSTFIX}")
endif()
# Removing WinRT API headers by default
list(REMOVE_ITEM videoio_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/cap_winrt.hpp")
if(DEFINED WINRT AND NOT DEFINED ENABLE_WINRT_MODE_NATIVE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
endif()
# Dependencies used by the implementation referenced
# below are not available on WinRT 8.0.
# Enabling it for WiRT 8.1+ only.
if(DEFINED WINRT AND NOT DEFINED WINRT_8_0 AND NOT DEFINED ENABLE_WINRT_MODE_NATIVE)
# WinRT detected. Adding WinRT API header
message(STATUS " ${name}: WinRT detected. Adding WinRT API header")
list(APPEND videoio_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/cap_winrt.hpp")
# Adding WinRT internal sources and headers
list(APPEND videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_capture.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_bridge.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_video.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/CaptureFrameGrabber.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/MediaStreamSink.cpp)
list(APPEND videoio_hdrs
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_capture.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_bridge.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt_video.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/MFIncludes.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/CaptureFrameGrabber.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/MediaSink.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_winrt/MediaStreamSink.hpp)
endif()
include(${CMAKE_CURRENT_LIST_DIR}/cmake/plugin.cmake)
set(tgts "PRIVATE")
if(TARGET ocv.3rdparty.mediasdk)
if("mfx" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_intel_mfx" ocv.3rdparty.mediasdk "cap_mfx_common.cpp" "cap_mfx_reader.cpp" "cap_mfx_writer.cpp" "cap_mfx_plugin.cpp")
else()
list(APPEND videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_common.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_reader.cpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_writer.cpp)
list(APPEND videoio_hdrs
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_common.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_reader.hpp
${CMAKE_CURRENT_LIST_DIR}/src/cap_mfx_writer.hpp)
list(APPEND tgts ocv.3rdparty.mediasdk)
endif()
endif()
if(TARGET ocv.3rdparty.dshow)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.cpp)
list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.hpp)
list(APPEND tgts ocv.3rdparty.dshow)
endif()
if(TARGET ocv.3rdparty.msmf)
if("msmf" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_msmf" ocv.3rdparty.msmf "cap_msmf.cpp")
else()
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_msmf.hpp)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_msmf.cpp)
list(APPEND tgts ocv.3rdparty.msmf)
endif()
endif()
if(TARGET ocv.3rdparty.xine)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_xine.cpp)
list(APPEND tgts ocv.3rdparty.xine)
endif()
if(TARGET ocv.3rdparty.dc1394_2)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dc1394_v2.cpp)
list(APPEND tgts ocv.3rdparty.dc1394_2)
endif()
if(TARGET ocv.3rdparty.gstreamer)
if("gstreamer" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_gstreamer" ocv.3rdparty.gstreamer "cap_gstreamer.cpp")
else()
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_gstreamer.cpp)
list(APPEND tgts ocv.3rdparty.gstreamer)
endif()
endif()
if(TARGET ocv.3rdparty.v4l)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_v4l.cpp)
list(APPEND tgts ocv.3rdparty.v4l)
endif()
if(TARGET ocv.3rdparty.openni2)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_openni2.cpp)
list(APPEND tgts ocv.3rdparty.openni2)
endif()
if(TARGET ocv.3rdparty.ximea)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ximea.cpp)
list(APPEND tgts ocv.3rdparty.ximea)
endif()
if(TARGET ocv.3rdparty.ueye)
if("ueye" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_ueye" ocv.3rdparty.ueye "cap_ueye.cpp")
else()
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ueye.cpp)
list(APPEND tgts ocv.3rdparty.ueye)
endif()
endif()
if(TARGET ocv.3rdparty.ffmpeg)
if(HAVE_FFMPEG_WRAPPER)
list(APPEND tgts ocv.3rdparty.ffmpeg)
elseif("ffmpeg" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_ffmpeg" ocv.3rdparty.ffmpeg "cap_ffmpeg.cpp")
if(TARGET ocv.3rdparty.ffmpeg.plugin_deps)
ocv_target_link_libraries(opencv_videoio_ffmpeg ocv.3rdparty.ffmpeg.plugin_deps)
endif()
if(TARGET ocv.3rdparty.mediasdk
AND NOT OPENCV_FFMPEG_DISABLE_MEDIASDK
)
ocv_target_link_libraries(opencv_videoio_ffmpeg ocv.3rdparty.mediasdk)
endif()
else()
list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg_impl.hpp)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg.cpp)
list(APPEND tgts ocv.3rdparty.ffmpeg)
if(TARGET ocv.3rdparty.ffmpeg.builtin_deps)
list(APPEND tgts ocv.3rdparty.ffmpeg.builtin_deps)
endif()
if(TARGET ocv.3rdparty.mediasdk
AND NOT OPENCV_FFMPEG_DISABLE_MEDIASDK
)
list(APPEND tgts ocv.3rdparty.mediasdk)
endif()
endif()
endif()
if(TARGET ocv.3rdparty.pvapi)
set(videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_pvapi.cpp ${videoio_srcs})
list(APPEND tgts ocv.3rdparty.pvapi)
endif()
if(TARGET ocv.3rdparty.aravis)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_aravis.cpp)
list(APPEND tgts ocv.3rdparty.aravis)
endif()
if(TARGET ocv.3rdparty.avfoundation)
if(IOS)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_avfoundation.mm)
else()
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_avfoundation_mac.mm)
endif()
list(APPEND tgts ocv.3rdparty.avfoundation)
endif()
if(TARGET ocv.3rdparty.librealsense)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_librealsense.cpp)
list(APPEND tgts ocv.3rdparty.librealsense)
endif()
if(TARGET ocv.3rdparty.gphoto2)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_gphoto2.cpp)
list(APPEND tgts ocv.3rdparty.gphoto2)
endif()
if(TARGET ocv.3rdparty.cap_ios)
list(APPEND videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap_ios_abstract_camera.mm
${CMAKE_CURRENT_LIST_DIR}/src/cap_ios_photo_camera.mm
${CMAKE_CURRENT_LIST_DIR}/src/cap_ios_video_camera.mm)
list(APPEND tgts ocv.3rdparty.cap_ios)
endif()
if(TARGET ocv.3rdparty.android_mediandk)
list(APPEND videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap_android_mediandk.cpp)
list(APPEND tgts ocv.3rdparty.android_mediandk)
endif()
if(TARGET ocv.3rdparty.android_native_camera)
list(APPEND videoio_srcs
${CMAKE_CURRENT_LIST_DIR}/src/cap_android_camera.cpp)
list(APPEND tgts ocv.3rdparty.android_native_camera)
endif()
if(tgts STREQUAL "PRIVATE")
set(tgts "")
endif()
# install used dependencies only
if(NOT BUILD_SHARED_LIBS
AND NOT (CMAKE_VERSION VERSION_LESS "3.13.0") # upgrade CMake: https://gitlab.kitware.com/cmake/cmake/-/merge_requests/2152
)
foreach(tgt in ${tgts})
if(tgt MATCHES "^ocv\.3rdparty\.")
install(TARGETS ${tgt} EXPORT OpenCVModules)
endif()
endforeach()
endif()
ocv_set_module_sources(HEADERS ${videoio_ext_hdrs} ${videoio_hdrs} SOURCES ${videoio_srcs})
ocv_module_include_directories()
ocv_create_module()
ocv_add_accuracy_tests(${tgts})
ocv_add_perf_tests(${tgts})
if(VIDEOIO_ENABLE_PLUGINS)
ocv_target_compile_definitions(${the_module} PRIVATE ENABLE_PLUGINS)
endif()
ocv_target_link_libraries(${the_module} LINK_PRIVATE ${tgts})
# copy FFmpeg dll to the output folder
if(WIN32 AND HAVE_FFMPEG_WRAPPER)
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(FFMPEG_SUFFIX _64)
endif()
set(ffmpeg_dir "${OpenCV_BINARY_DIR}/3rdparty/ffmpeg")
set(ffmpeg_bare_name "opencv_videoio_ffmpeg${FFMPEG_SUFFIX}.dll")
set(ffmpeg_bare_name_ver "opencv_videoio_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
set(ffmpeg_path "${ffmpeg_dir}/${ffmpeg_bare_name}")
if(MSVC_IDE)
execute_process(
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}")
elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual"))
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}")
else()
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}")
endif()
install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}")
if(INSTALL_CREATE_DISTRIB)
install(FILES "${ffmpeg_dir}/opencv_videoio_ffmpeg${FFMPEG_SUFFIX}.dll" DESTINATION "bin/" COMPONENT libs RENAME "opencv_videoio_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
endif()
endif()

View File

@ -0,0 +1,6 @@
# if(ANDROID AND ANDROID_NATIVE_API_LEVEL GREATER_EQUAL 24) <-- would be nicer but requires CMake 3.7 or later
if(ANDROID AND ANDROID_NATIVE_API_LEVEL GREATER 23)
set(HAVE_ANDROID_NATIVE_CAMERA TRUE)
set(libs "-landroid -llog -lcamera2ndk")
ocv_add_external_target(android_native_camera "" "${libs}" "HAVE_ANDROID_NATIVE_CAMERA")
endif()

View File

@ -0,0 +1,6 @@
# if(ANDROID AND ANDROID_NATIVE_API_LEVEL GREATER_EQUAL 21) <-- would be nicer but requires CMake 3.7 or later
if(ANDROID AND ANDROID_NATIVE_API_LEVEL GREATER 20)
set(HAVE_ANDROID_MEDIANDK TRUE)
set(libs "-landroid -llog -lmediandk")
ocv_add_external_target(android_mediandk "" "${libs}" "HAVE_ANDROID_MEDIANDK")
endif()

View File

@ -0,0 +1,32 @@
# --- Aravis SDK ---
if(NOT HAVE_ARAVIS_API AND PKG_CONFIG_FOUND)
ocv_check_modules(ARAVIS aravis-0.6 QUIET)
if(ARAVIS_FOUND)
set(HAVE_ARAVIS_API TRUE)
endif()
endif()
if(NOT HAVE_ARAVIS_API)
find_path(ARAVIS_INCLUDE "arv.h"
PATHS "${ARAVIS_ROOT}" ENV ARAVIS_ROOT
PATH_SUFFIXES "include/aravis-0.6"
NO_DEFAULT_PATH)
find_library(ARAVIS_LIBRARY "aravis-0.6"
PATHS "${ARAVIS_ROOT}" ENV ARAVIS_ROOT
PATH_SUFFIXES "lib"
NO_DEFAULT_PATH)
if(ARAVIS_INCLUDE AND ARAVIS_LIBRARY)
set(HAVE_ARAVIS_API TRUE)
file(STRINGS "${ARAVIS_INCLUDE}/arvversion.h" ver_strings REGEX "#define +ARAVIS_(MAJOR|MINOR|MICRO)_VERSION.*")
string(REGEX REPLACE ".*ARAVIS_MAJOR_VERSION[^0-9]+([0-9]+).*" "\\1" ver_major "${ver_strings}")
string(REGEX REPLACE ".*ARAVIS_MINOR_VERSION[^0-9]+([0-9]+).*" "\\1" ver_minor "${ver_strings}")
string(REGEX REPLACE ".*ARAVIS_MICRO_VERSION[^0-9]+([0-9]+).*" "\\1" ver_micro "${ver_strings}")
set(ARAVIS_VERSION "${ver_major}.${ver_minor}.${ver_micro}") # informational
set(ARAVIS_INCLUDE_DIRS "${ARAVIS_INCLUDE}")
set(ARAVIS_LIBRARIES "${ARAVIS_LIBRARY}")
endif()
endif()
if(HAVE_ARAVIS_API)
ocv_add_external_target(aravis "${ARAVIS_INCLUDE_DIRS}" "${ARAVIS_LIBRARIES}" "HAVE_ARAVIS_API")
endif()

View File

@ -0,0 +1,16 @@
if(APPLE)
set(HAVE_AVFOUNDATION TRUE)
if(IOS)
set(libs "-framework AVFoundation" "-framework QuartzCore")
else()
set(libs
"-framework Cocoa"
"-framework Accelerate"
"-framework AVFoundation"
"-framework CoreGraphics"
"-framework CoreMedia"
"-framework CoreVideo"
"-framework QuartzCore")
endif()
ocv_add_external_target(avfoundation "" "${libs}" "HAVE_AVFOUNDATION")
endif()

View File

@ -0,0 +1,28 @@
# --- Dc1394 ---
if(NOT HAVE_DC1394_2 AND PKG_CONFIG_FOUND)
ocv_check_modules(DC1394_2 libdc1394-2)
if(DC1394_2_FOUND)
set(HAVE_DC1394_2 TRUE)
endif()
endif()
if(NOT HAVE_DC1394_2)
find_path(DC1394_INCLUDE "dc1394/dc1394.h"
PATHS "${DC1394_ROOT}" ENV DC1394_ROOT
PATH_SUFFIXES "include"
NO_DEFAULT_PATH)
find_library(DC1394_LIBRARY "dc1394"
PATHS "${DC1394_ROOT}" ENV DC1394_ROOT
PATH_SUFFIXES "lib"
NO_DEFAULT_PATH)
if(DC1394_INCLUDE AND DC1394_LIBRARY)
set(HAVE_DC1394_2 TRUE)
set(DC1394_2_INCLUDE_DIRS "${DC1394_INCLUDE}")
set(DC1394_2_LIBRARIES "${DC1394_LIBRARY}")
set(DC1394_2_VERSION "unknown") # informational
endif()
endif()
if(HAVE_DC1394_2)
ocv_add_external_target(dc1394_2 "${DC1394_2_INCLUDE_DIRS}" "${DC1394_2_LIBRARIES}" "HAVE_DC1394_2")
endif()

View File

@ -0,0 +1,12 @@
# --- VideoInput/DirectShow ---
if(NOT HAVE_DSHOW AND MSVC AND NOT MSVC_VERSION LESS 1500)
set(HAVE_DSHOW TRUE)
endif()
if(NOT HAVE_DSHOW)
check_include_file(dshow.h HAVE_DSHOW)
endif()
if(HAVE_DSHOW)
ocv_add_external_target(dshow "" "" "HAVE_DSHOW")
endif()

View File

@ -0,0 +1,129 @@
# --- FFMPEG ---
if(NOT HAVE_FFMPEG AND OPENCV_FFMPEG_USE_FIND_PACKAGE)
if(OPENCV_FFMPEG_USE_FIND_PACKAGE STREQUAL "1" OR OPENCV_FFMPEG_USE_FIND_PACKAGE STREQUAL "ON")
set(OPENCV_FFMPEG_USE_FIND_PACKAGE "FFMPEG")
endif()
find_package(${OPENCV_FFMPEG_USE_FIND_PACKAGE}) # Required components: AVCODEC AVFORMAT AVUTIL SWSCALE
if(FFMPEG_FOUND OR FFmpeg_FOUND)
set(HAVE_FFMPEG TRUE)
endif()
endif()
if(NOT HAVE_FFMPEG AND WIN32 AND NOT ARM AND NOT OPENCV_FFMPEG_SKIP_DOWNLOAD)
include("${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/ffmpeg.cmake")
download_win_ffmpeg(FFMPEG_CMAKE_SCRIPT)
if(FFMPEG_CMAKE_SCRIPT)
include("${FFMPEG_CMAKE_SCRIPT}")
set(HAVE_FFMPEG TRUE)
set(HAVE_FFMPEG_WRAPPER TRUE)
endif()
endif()
set(_required_ffmpeg_libraries libavcodec libavformat libavutil libswscale)
set(_used_ffmpeg_libraries ${_required_ffmpeg_libraries})
if(NOT HAVE_FFMPEG AND PKG_CONFIG_FOUND)
ocv_check_modules(FFMPEG libavcodec libavformat libavutil libswscale)
if(FFMPEG_FOUND)
ocv_check_modules(FFMPEG_libavresample libavresample) # optional
if(FFMPEG_libavresample_FOUND)
list(APPEND FFMPEG_LIBRARIES ${FFMPEG_libavresample_LIBRARIES})
list(APPEND _used_ffmpeg_libraries libavresample)
endif()
set(HAVE_FFMPEG TRUE)
else()
set(_missing_ffmpeg_libraries "")
foreach (ffmpeg_lib ${_required_ffmpeg_libraries})
if (NOT FFMPEG_${ffmpeg_lib}_FOUND)
list(APPEND _missing_ffmpeg_libraries ${ffmpeg_lib})
endif()
endforeach ()
message(STATUS "FFMPEG is disabled. Required libraries: ${_required_ffmpeg_libraries}."
" Missing libraries: ${_missing_ffmpeg_libraries}")
unset(_missing_ffmpeg_libraries)
endif()
endif()
#=================================
# Versions check.
if(HAVE_FFMPEG AND NOT HAVE_FFMPEG_WRAPPER)
set(_min_libavcodec_version 54.35.0)
set(_min_libavformat_version 54.20.4)
set(_min_libavutil_version 52.3.0)
set(_min_libswscale_version 2.1.1)
set(_min_libavresample_version 1.0.1)
foreach(ffmpeg_lib ${_used_ffmpeg_libraries})
if(FFMPEG_${ffmpeg_lib}_VERSION VERSION_LESS _min_${ffmpeg_lib}_version)
message(STATUS "FFMPEG is disabled. Can't find suitable ${ffmpeg_lib} library"
" (minimal ${_min_${ffmpeg_lib}_version}, found ${FFMPEG_${ffmpeg_lib}_VERSION}).")
set(HAVE_FFMPEG FALSE)
endif()
endforeach()
if(NOT HAVE_FFMPEG)
message(STATUS "FFMPEG libraries version check failed "
"(minimal libav release 9.20, minimal FFMPEG release 1.1.16).")
endif()
unset(_min_libavcodec_version)
unset(_min_libavformat_version)
unset(_min_libavutil_version)
unset(_min_libswscale_version)
unset(_min_libavresample_version)
endif()
#==================================
if(HAVE_FFMPEG AND NOT HAVE_FFMPEG_WRAPPER AND NOT OPENCV_FFMPEG_SKIP_BUILD_CHECK)
try_compile(__VALID_FFMPEG
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/ffmpeg_test.cpp"
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${FFMPEG_INCLUDE_DIRS}"
"-DLINK_LIBRARIES:STRING=${FFMPEG_LIBRARIES}"
OUTPUT_VARIABLE TRY_OUT
)
if(NOT __VALID_FFMPEG)
# message(FATAL_ERROR "FFMPEG: test check build log:\n${TRY_OUT}")
message(STATUS "WARNING: Can't build ffmpeg test code")
set(HAVE_FFMPEG FALSE)
endif()
endif()
#==================================
unset(_required_ffmpeg_libraries)
unset(_used_ffmpeg_libraries)
if(HAVE_FFMPEG_WRAPPER)
ocv_add_external_target(ffmpeg "" "" "HAVE_FFMPEG_WRAPPER")
elseif(HAVE_FFMPEG)
ocv_add_external_target(ffmpeg "${FFMPEG_INCLUDE_DIRS}" "${FFMPEG_LIBRARIES}" "HAVE_FFMPEG")
set(__builtin_defines "")
set(__builtin_include_dirs "")
set(__builtin_libs "")
set(__plugin_defines "")
set(__plugin_include_dirs "")
set(__plugin_libs "")
if(HAVE_OPENCL)
set(__opencl_dirs "")
if(OPENCL_INCLUDE_DIRS)
set(__opencl_dirs "${OPENCL_INCLUDE_DIRS}")
elseif(OPENCL_INCLUDE_DIR)
set(__opencl_dirs "${OPENCL_INCLUDE_DIR}")
else()
set(__opencl_dirs "${OpenCV_SOURCE_DIR}/3rdparty/include/opencl/1.2")
endif()
# extra dependencies for buildin code (OpenCL dir is required for extensions like cl_d3d11.h)
# buildin HAVE_OPENCL is already defined through cvconfig.h
list(APPEND __builtin_include_dirs "${__opencl_dirs}")
# extra dependencies for
list(APPEND __plugin_defines "HAVE_OPENCL")
list(APPEND __plugin_include_dirs "${__opencl_dirs}")
endif()
# TODO: libva, d3d11
if(__builtin_include_dirs OR __builtin_include_defines OR __builtin_include_libs)
ocv_add_external_target(ffmpeg.builtin_deps "${__builtin_include_dirs}" "${__builtin_include_libs}" "${__builtin_defines}")
endif()
if(VIDEOIO_ENABLE_PLUGINS AND __plugin_include_dirs OR __plugin_include_defines OR __plugin_include_libs)
ocv_add_external_target(ffmpeg.plugin_deps "${__plugin_include_dirs}" "${__plugin_include_libs}" "${__plugin_defines}")
endif()
endif()

View File

@ -0,0 +1,11 @@
# --- gPhoto2 ---
if(NOT HAVE_GPHOTO2 AND PKG_CONFIG_FOUND)
ocv_check_modules(GPHOTO2 libgphoto2)
if(GPHOTO2_FOUND)
set(HAVE_GPHOTO2 TRUE)
endif()
endif()
if(HAVE_GPHOTO2)
ocv_add_external_target(gphoto2 "${GPHOTO2_INCLUDE_DIRS}" "${GPHOTO2_LIBRARIES}" "HAVE_GPHOTO2")
endif()

View File

@ -0,0 +1,106 @@
# --- GStreamer ---
if(NOT HAVE_GSTREAMER AND WIN32)
set(env_paths "${GSTREAMER_DIR}" ENV GSTREAMER_ROOT)
if(X86_64)
list(APPEND env_paths ENV GSTREAMER_1_0_ROOT_X86_64 ENV GSTREAMER_ROOT_X86_64)
else()
list(APPEND env_paths ENV GSTREAMER_1_0_ROOT_X86 ENV GSTREAMER_ROOT_X86)
endif()
find_path(GSTREAMER_gst_INCLUDE_DIR
gst/gst.h
PATHS ${env_paths}
PATH_SUFFIXES "include/gstreamer-1.0")
find_path(GSTREAMER_glib_INCLUDE_DIR
glib.h
PATHS ${env_paths}
PATH_SUFFIXES "include/glib-2.0")
find_path(GSTREAMER_glibconfig_INCLUDE_DIR
glibconfig.h
PATHS ${env_paths}
PATH_SUFFIXES "lib/glib-2.0/include")
find_library(GSTREAMER_gstreamer_LIBRARY
NAMES gstreamer gstreamer-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_app_LIBRARY
NAMES gstapp gstapp-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_base_LIBRARY
NAMES gstbase gstbase-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_pbutils_LIBRARY
NAMES gstpbutils gstpbutils-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_riff_LIBRARY
NAMES gstriff gstriff-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_video_LIBRARY
NAMES gstvideo gstvideo-1.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_glib_LIBRARY
NAMES glib-2.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
find_library(GSTREAMER_gobject_LIBRARY
NAMES gobject-2.0
PATHS ${env_paths}
PATH_SUFFIXES "lib")
if(GSTREAMER_gst_INCLUDE_DIR
AND GSTREAMER_glib_INCLUDE_DIR
AND GSTREAMER_glibconfig_INCLUDE_DIR
AND GSTREAMER_gstreamer_LIBRARY
AND GSTREAMER_app_LIBRARY
AND GSTREAMER_base_LIBRARY
AND GSTREAMER_pbutils_LIBRARY
AND GSTREAMER_riff_LIBRARY
AND GSTREAMER_video_LIBRARY
AND GSTREAMER_glib_LIBRARY
AND GSTREAMER_gobject_LIBRARY)
file(STRINGS "${GSTREAMER_gst_INCLUDE_DIR}/gst/gstversion.h" ver_strings REGEX "#define +GST_VERSION_(MAJOR|MINOR|MICRO|NANO).*")
string(REGEX REPLACE ".*GST_VERSION_MAJOR[^0-9]+([0-9]+).*" "\\1" ver_major "${ver_strings}")
string(REGEX REPLACE ".*GST_VERSION_MINOR[^0-9]+([0-9]+).*" "\\1" ver_minor "${ver_strings}")
string(REGEX REPLACE ".*GST_VERSION_MICRO[^0-9]+([0-9]+).*" "\\1" ver_micro "${ver_strings}")
set(GSTREAMER_VERSION "${ver_major}.${ver_minor}.${ver_micro}") # informational
set(HAVE_GSTREAMER TRUE)
set(GSTREAMER_LIBRARIES
${GSTREAMER_gstreamer_LIBRARY}
${GSTREAMER_base_LIBRARY}
${GSTREAMER_app_LIBRARY}
${GSTREAMER_riff_LIBRARY}
${GSTREAMER_video_LIBRARY}
${GSTREAMER_pbutils_LIBRARY}
${GSTREAMER_glib_LIBRARY}
${GSTREAMER_gobject_LIBRARY})
set(GSTREAMER_INCLUDE_DIRS
${GSTREAMER_gst_INCLUDE_DIR}
${GSTREAMER_glib_INCLUDE_DIR}
${GSTREAMER_glibconfig_INCLUDE_DIR})
endif()
endif()
if(NOT HAVE_GSTREAMER AND PKG_CONFIG_FOUND)
ocv_check_modules(GSTREAMER_base gstreamer-base-1.0)
ocv_check_modules(GSTREAMER_app gstreamer-app-1.0)
ocv_check_modules(GSTREAMER_riff gstreamer-riff-1.0)
ocv_check_modules(GSTREAMER_pbutils gstreamer-pbutils-1.0)
ocv_check_modules(GSTREAMER_video gstreamer-video-1.0)
if(GSTREAMER_base_FOUND AND GSTREAMER_app_FOUND AND GSTREAMER_riff_FOUND AND GSTREAMER_pbutils_FOUND AND GSTREAMER_video_FOUND)
set(HAVE_GSTREAMER TRUE)
set(GSTREAMER_VERSION ${GSTREAMER_base_VERSION}) # informational
set(GSTREAMER_LIBRARIES ${GSTREAMER_base_LIBRARIES} ${GSTREAMER_app_LIBRARIES} ${GSTREAMER_riff_LIBRARIES} ${GSTREAMER_pbutils_LIBRARIES} ${GSTREAMER_video_LIBRARIES})
set(GSTREAMER_INCLUDE_DIRS ${GSTREAMER_base_INCLUDE_DIRS} ${GSTREAMER_app_INCLUDE_DIRS} ${GSTREAMER_riff_INCLUDE_DIRS} ${GSTREAMER_pbutils_INCLUDE_DIRS} ${GSTREAMER_video_INCLUDE_DIRS})
endif()
endif()
if(HAVE_GSTREAMER)
ocv_add_external_target(gstreamer "${GSTREAMER_INCLUDE_DIRS}" "${GSTREAMER_LIBRARIES}" "HAVE_GSTREAMER")
endif()

View File

@ -0,0 +1,13 @@
if(APPLE AND IOS)
set(HAVE_CAP_IOS TRUE)
set(libs
"-framework Accelerate"
"-framework AVFoundation"
"-framework CoreGraphics"
"-framework CoreImage"
"-framework CoreMedia"
"-framework CoreVideo"
"-framework QuartzCore"
"-framework UIKit")
ocv_add_external_target(cap_ios "" "${libs}" "HAVE_CAP_IOS")
endif()

View File

@ -0,0 +1,72 @@
set(MFX_DEFS "")
if(NOT HAVE_MFX)
find_package(VPL QUIET)
if(VPL_FOUND)
set(MFX_INCLUDE_DIRS "")
set(MFX_LIBRARIES "${VPL_IMPORTED_TARGETS}")
set(HAVE_MFX TRUE)
list(APPEND MFX_DEFS "HAVE_ONEVPL")
endif()
endif()
if(NOT HAVE_MFX)
set(paths "${MFX_HOME}" ENV "MFX_HOME" ENV "INTELMEDIASDKROOT")
if(MSVC)
if(MSVC_VERSION LESS 1900)
set(vs_suffix)
else()
set(vs_suffix "_vs2015")
endif()
if(X86_64)
set(vs_arch "x64")
else()
set(vs_arch "win32")
endif()
endif()
find_path(MFX_INCLUDE mfxdefs.h
PATHS ${paths}
PATH_SUFFIXES "include" "include/mfx"
NO_DEFAULT_PATH)
find_library(MFX_LIBRARY NAMES mfx libmfx${vs_suffix}
PATHS ${paths}
PATH_SUFFIXES "lib64" "lib/lin_x64" "lib/${vs_arch}"
NO_DEFAULT_PATH)
if(MFX_INCLUDE AND MFX_LIBRARY)
set(HAVE_MFX TRUE)
set(MFX_INCLUDE_DIRS "${MFX_INCLUDE}")
set(MFX_LIBRARIES "${MFX_LIBRARY}")
list(APPEND MFX_DEFS "HAVE_MFX_PLUGIN")
endif()
endif()
if(NOT HAVE_MFX AND PKG_CONFIG_FOUND)
ocv_check_modules(MFX mfx)
endif()
if(HAVE_MFX AND UNIX)
foreach(mode NO_DEFAULT_PATH "")
find_path(MFX_va_INCLUDE va/va.h PATHS ${paths} PATH_SUFFIXES "include" ${mode})
find_library(MFX_va_LIBRARY va PATHS ${paths} PATH_SUFFIXES "lib64" "lib/lin_x64" ${mode})
find_library(MFX_va_drm_LIBRARY va-drm PATHS ${paths} PATH_SUFFIXES "lib64" "lib/lin_x64" ${mode})
if(MFX_va_INCLUDE AND MFX_va_LIBRARY AND MFX_va_drm_LIBRARY)
list(APPEND MFX_INCLUDE_DIRS "${MFX_va_INCLUDE}")
list(APPEND MFX_LIBRARIES "${MFX_va_LIBRARY}" "${MFX_va_drm_LIBRARY}")
# list(APPEND MFX_LIBRARIES "-Wl,--exclude-libs=libmfx")
break()
endif()
unset(MFX_va_INCLUDE CACHE)
unset(MFX_va_LIBRARY CACHE)
unset(MFX_va_drm_LIBRARY CACHE)
endforeach()
if(NOT(MFX_va_INCLUDE AND MFX_va_LIBRARY AND MFX_va_drm_LIBRARY))
set(HAVE_MFX FALSE)
endif()
endif()
if(HAVE_MFX)
list(APPEND MFX_DEFS "HAVE_MFX")
ocv_add_external_target(mediasdk "${MFX_INCLUDE_DIRS}" "${MFX_LIBRARIES}" "${MFX_DEFS}")
endif()

View File

@ -0,0 +1,22 @@
# --- VideoInput/Microsoft Media Foundation ---
if(NOT HAVE_MSMF)
check_include_file(mfapi.h HAVE_MFAPI)
if(HAVE_MFAPI)
set(HAVE_MSMF TRUE)
endif()
endif()
if(HAVE_MSMF)
if(WITH_MSMF_DXVA)
check_include_file(d3d11.h HAVE_D3D11)
check_include_file(d3d11_4.h HAVE_D3D11_4)
if(HAVE_D3D11 AND HAVE_D3D11_4)
set(HAVE_MSMF_DXVA TRUE)
endif()
endif()
set(defs "HAVE_MSMF")
if(HAVE_MSMF_DXVA)
list(APPEND defs "HAVE_MSMF_DXVA")
endif()
ocv_add_external_target(msmf "" "" "${defs}")
endif()

View File

@ -0,0 +1,47 @@
# --- OpenNI2 ---
if(NOT HAVE_OPENNI2)
set(paths "${OPENNI2_DIR}")
if(MSVC AND X86_64)
list(APPEND paths ENV OPENNI2_INCLUDE64 ENV OPENNI2_LIB64 ENV OPENNI2_REDIST64)
else()
list(APPEND paths ENV OPENNI2_INCLUDE ENV OPENNI2_LIB ENV OPENNI2_REDIST)
endif()
# From SDK
find_path(OPENNI2_INCLUDE "OpenNI.h"
PATHS ${paths}
PATH_SUFFIXES "Include"
NO_DEFAULT_PATH)
find_library(OPENNI2_LIBRARY "OpenNI2"
PATHS ${paths}
PATH_SUFFIXES "Redist" "Lib"
NO_DEFAULT_PATH)
if(OPENNI2_LIBRARY AND OPENNI2_INCLUDE)
set(HAVE_OPENNI2 TRUE)
set(OPENNI2_INCLUDE_DIRS "${OPENNI2_INCLUDE}")
set(OPENNI2_LIBRARIES "${OPENNI2_LIBRARY}")
endif()
endif()
if(NOT HAVE_OPENNI2)
# From system
find_path(OPENNI2_SYS_INCLUDE "OpenNI.h" PATH_SUFFIXES "openni2" "ni2")
find_library(OPENNI2_SYS_LIBRARY "OpenNI2")
if(OPENNI2_SYS_LIBRARY AND OPENNI2_SYS_INCLUDE)
set(HAVE_OPENNI2 TRUE)
set(OPENNI2_INCLUDE_DIRS "${OPENNI2_SYS_INCLUDE}")
set(OPENNI2_LIBRARIES "${OPENNI2_SYS_LIBRARY}")
endif()
endif()
if(HAVE_OPENNI2)
file(STRINGS "${OPENNI2_INCLUDE_DIRS}/OniVersion.h" ver_strings REGEX "#define +ONI_VERSION_(MAJOR|MINOR|MAINTENANCE|BUILD).*")
string(REGEX REPLACE ".*ONI_VERSION_MAJOR[^0-9]+([0-9]+).*" "\\1" ver_major "${ver_strings}")
string(REGEX REPLACE ".*ONI_VERSION_MINOR[^0-9]+([0-9]+).*" "\\1" ver_minor "${ver_strings}")
string(REGEX REPLACE ".*ONI_VERSION_MAINTENANCE[^0-9]+([0-9]+).*" "\\1" ver_maint "${ver_strings}")
set(OPENNI2_VERSION "${ver_major}.${ver_minor}.${ver_maint}") # informational
ocv_add_external_target(openni2 "${OPENNI2_INCLUDE_DIRS}" "${OPENNI2_LIBRARIES}" "HAVE_OPENNI2")
endif()

View File

@ -0,0 +1,21 @@
# --- PvApi ---
if(NOT HAVE_PVAPI)
if(X86_64)
set(arch x64)
else()
set(arch x86)
endif()
find_path(PVAPI_INCLUDE "PvApi.h"
PATHS "${PVAPI_ROOT}" ENV PVAPI_ROOT
PATH_SUFFIXES "inc-pc")
find_library(PVAPI_LIBRARY "PvAPI"
PATHS "${PVAPI_ROOT}" ENV PVAPI_ROOT
PATH_SUFFIXES "bin-pc/${arch}/${gcc}")
if(PVAPI_INCLUDE AND PVAPI_LIBRARY)
set(HAVE_PVAPI TRUE)
endif()
endif()
if(HAVE_PVAPI)
ocv_add_external_target(pvapi "${PVAPI_INCLUDE}" "${PVAPI_LIBRARY}" "HAVE_PVAPI")
endif()

View File

@ -0,0 +1,28 @@
# --- Intel librealsense ---
if(NOT HAVE_LIBREALSENSE)
find_package(realsense2 QUIET)
if(realsense2_FOUND)
set(HAVE_LIBREALSENSE TRUE)
set(LIBREALSENSE_VERSION "${realsense2_VERSION}") # informational
ocv_add_external_target(librealsense "" "${realsense2_LIBRARY}" "HAVE_LIBREALSENSE")
endif()
endif()
if(NOT HAVE_LIBREALSENSE)
find_path(LIBREALSENSE_INCLUDE_DIR "librealsense2/rs.hpp"
PATHS "${LIBREALSENSE_INCLUDE}" ENV LIBREALSENSE_INCLUDE)
find_library(LIBREALSENSE_LIBRARIES "realsense2"
PATHS "${LIBREALSENSE_LIB}" ENV LIBREALSENSE_LIB)
if(LIBREALSENSE_INCLUDE_DIR AND LIBREALSENSE_LIBRARIES)
set(HAVE_LIBREALSENSE TRUE)
file(STRINGS "${LIBREALSENSE_INCLUDE_DIR}/librealsense2/rs.h" ver_strings REGEX "#define +RS2_API_(MAJOR|MINOR|PATCH|BUILD)_VERSION.*")
string(REGEX REPLACE ".*RS2_API_MAJOR_VERSION[^0-9]+([0-9]+).*" "\\1" ver_major "${ver_strings}")
string(REGEX REPLACE ".*RS2_API_MINOR_VERSION[^0-9]+([0-9]+).*" "\\1" ver_minor "${ver_strings}")
string(REGEX REPLACE ".*RS2_API_PATCH_VERSION[^0-9]+([0-9]+).*" "\\1" ver_patch "${ver_strings}")
set(LIBREALSENSE_VERSION "${ver_major}.${ver_minor}.${ver_patch}") # informational
ocv_add_external_target(librealsense "${LIBREALSENSE_INCLUDE_DIR}" "${LIBREALSENSE_LIBRARIES}" "HAVE_LIBREALSENSE")
endif()
endif()
set(HAVE_LIBREALSENSE ${HAVE_LIBREALSENSE} PARENT_SCOPE)

View File

@ -0,0 +1,23 @@
if(NOT HAVE_UEYE)
if(WIN32)
if(X86_64)
set(_WIN_LIB_SUFFIX "_64")
endif()
endif()
find_path(UEYE_INCLUDE "ueye.h"
PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop"
HINTS "${regpath}"
PATH_SUFFIXES "include")
find_library(UEYE_LIBRARY ueye_api${_WIN_LIB_SUFFIX}
PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop"
HINTS "${regpath}"
PATH_SUFFIXES "lib")
if(UEYE_INCLUDE AND UEYE_LIBRARY)
set(HAVE_UEYE TRUE)
endif()
endif()
unset(_WIN_LIB_SUFFIX)
if(HAVE_UEYE)
ocv_add_external_target(ueye "${UEYE_INCLUDE}" "${UEYE_LIBRARY}" "HAVE_UEYE")
endif()

View File

@ -0,0 +1,17 @@
# --- V4L ---
if(NOT HAVE_V4L)
set(CMAKE_REQUIRED_QUIET TRUE) # for check_include_file
check_include_file(linux/videodev2.h HAVE_CAMV4L2)
check_include_file(sys/videoio.h HAVE_VIDEOIO)
if(HAVE_CAMV4L2 OR HAVE_VIDEOIO)
set(HAVE_V4L TRUE)
set(defs)
if(HAVE_CAMV4L2)
list(APPEND defs "HAVE_CAMV4L2")
endif()
if(HAVE_VIDEOIO)
list(APPEND defs "HAVE_VIDEOIO")
endif()
ocv_add_external_target(v4l "" "" "${defs}")
endif()
endif()

View File

@ -0,0 +1,30 @@
if(NOT HAVE_XIMEA)
if(WIN32)
get_filename_component(regpath "[HKEY_CURRENT_USER\\Software\\XIMEA\\CamSupport\\API;Path]" ABSOLUTE)
if(NOT EXISTS ${regpath})
get_filename_component(regpath "[HKEY_LOCAL_MACHINE\\SOFTWARE\\XIMEA\\API_SoftwarePackage;Path]" ABSOLUTE)
endif()
endif()
if(X86_64)
set(lib_dir "API/x64" "API/64bit")
set(lib_suffix "64")
else()
set(lib_dir "API/x86" "API/32bit")
set(lib_suffix "32")
endif()
find_path(XIMEA_INCLUDE "xiApi.h"
PATHS "${XIMEA_ROOT}" ENV XIMEA_ROOT "/opt/XIMEA"
HINTS "${regpath}"
PATH_SUFFIXES "include" "API")
find_library(XIMEA_LIBRARY m3api xiapi${lib_suffix}
PATHS "${XIMEA_ROOT}" ENV XIMEA_ROOT "/opt/XIMEA"
HINTS "${regpath}"
PATH_SUFFIXES ${lib_dir})
if(XIMEA_INCLUDE AND XIMEA_LIBRARY)
set(HAVE_XIMEA TRUE)
endif()
endif()
if(HAVE_XIMEA)
ocv_add_external_target(ximea "${XIMEA_INCLUDE}" "${XIMEA_LIBRARY}" "HAVE_XIMEA")
endif()

View File

@ -0,0 +1,7 @@
if(NOT HAVE_XINE AND PKG_CONFIG_FOUND)
ocv_check_modules(XINE libxine QUIET)
endif()
if(HAVE_XINE)
ocv_add_external_target(xine "${XINE_INCLUDE_DIRS}" "${XINE_LIBRARIES}" "HAVE_XINE")
endif()

View File

@ -0,0 +1,33 @@
if(NOT PROJECT_NAME STREQUAL "OpenCV")
include(FindPkgConfig)
endif()
macro(add_backend backend_id cond_var)
if(${cond_var})
include("${CMAKE_CURRENT_LIST_DIR}/detect_${backend_id}.cmake")
endif()
endmacro()
add_backend("ffmpeg" WITH_FFMPEG)
add_backend("gstreamer" WITH_GSTREAMER)
add_backend("v4l" WITH_V4L)
add_backend("aravis" WITH_ARAVIS)
add_backend("dc1394" WITH_1394)
add_backend("gphoto" WITH_GPHOTO2)
add_backend("msdk" WITH_MFX)
add_backend("openni2" WITH_OPENNI2)
add_backend("pvapi" WITH_PVAPI)
add_backend("realsense" WITH_LIBREALSENSE)
add_backend("ueye" WITH_UEYE)
add_backend("ximea" WITH_XIMEA)
add_backend("xine" WITH_XINE)
add_backend("avfoundation" WITH_AVFOUNDATION)
add_backend("ios" WITH_CAP_IOS)
add_backend("dshow" WITH_DSHOW)
add_backend("msmf" WITH_MSMF)
add_backend("android_mediandk" WITH_ANDROID_MEDIANDK)
add_backend("android_camera" WITH_ANDROID_NATIVE_CAMERA)

View File

@ -0,0 +1,56 @@
function(ocv_create_builtin_videoio_plugin name target)
ocv_debug_message("ocv_create_builtin_videoio_plugin(${ARGV})")
if(NOT TARGET ${target})
message(FATAL_ERROR "${target} does not exist!")
endif()
if(NOT OpenCV_SOURCE_DIR)
message(FATAL_ERROR "OpenCV_SOURCE_DIR must be set to build the plugin!")
endif()
message(STATUS "Video I/O: add builtin plugin '${name}'")
foreach(src ${ARGN})
list(APPEND sources "${CMAKE_CURRENT_LIST_DIR}/src/${src}")
endforeach()
add_library(${name} MODULE ${sources})
target_include_directories(${name} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}")
target_compile_definitions(${name} PRIVATE BUILD_PLUGIN)
target_link_libraries(${name} PRIVATE ${target})
foreach(mod opencv_videoio opencv_core opencv_imgproc opencv_imgcodecs)
ocv_target_link_libraries(${name} LINK_PRIVATE ${mod})
ocv_target_include_directories(${name} "${OPENCV_MODULE_${mod}_LOCATION}/include")
endforeach()
if(WIN32)
set(OPENCV_PLUGIN_VERSION "${OPENCV_DLLVERSION}" CACHE STRING "")
if(CMAKE_CXX_SIZEOF_DATA_PTR EQUAL 8)
set(OPENCV_PLUGIN_ARCH "_64" CACHE STRING "")
else()
set(OPENCV_PLUGIN_ARCH "" CACHE STRING "")
endif()
else()
set(OPENCV_PLUGIN_VERSION "" CACHE STRING "")
set(OPENCV_PLUGIN_ARCH "" CACHE STRING "")
endif()
set_target_properties(${name} PROPERTIES
CXX_STANDARD 11
CXX_VISIBILITY_PRESET hidden
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
OUTPUT_NAME "${name}${OPENCV_PLUGIN_VERSION}${OPENCV_PLUGIN_ARCH}"
)
if(WIN32)
set_target_properties(${name} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH})
install(TARGETS ${name} OPTIONAL LIBRARY DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT plugins)
else()
install(TARGETS ${name} OPTIONAL LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT plugins)
endif()
add_dependencies(opencv_videoio_plugins ${name})
endfunction()

View File

@ -0,0 +1,877 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="559.99994"
height="520"
viewBox="0 0 559.99993 520"
id="svg2"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="opencv-videoio-structure.svg"
inkscape:export-filename="./opencv-videoio-structure.png"
inkscape:export-xdpi="90"
inkscape:export-ydpi="90">
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.3701928"
inkscape:cx="397.12938"
inkscape:cy="243.08432"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="true"
showguides="true"
inkscape:guide-bbox="true"
inkscape:snap-page="false"
inkscape:snap-bbox="true"
inkscape:bbox-nodes="true"
inkscape:snap-bbox-edge-midpoints="false"
inkscape:bbox-paths="false"
inkscape:object-nodes="true"
inkscape:snap-intersection-paths="true"
inkscape:snap-midpoints="true"
inkscape:object-paths="true"
inkscape:snap-object-midpoints="true"
inkscape:snap-bbox-midpoints="false"
inkscape:snap-center="true"
inkscape:snap-global="true"
inkscape:window-width="1920"
inkscape:window-height="1028"
inkscape:window-x="1912"
inkscape:window-y="-8"
inkscape:window-maximized="1"
inkscape:snap-nodes="false"
inkscape:snap-others="false"
inkscape:snap-grids="true"
fit-margin-top="10"
fit-margin-left="10"
fit-margin-right="10"
fit-margin-bottom="10"
units="px">
<inkscape:grid
originy="-179.99988"
originx="-1620"
visible="true"
empspacing="1"
spacingy="10"
spacingx="9.9999999"
dotted="false"
id="grid6826"
type="xygrid" />
</sodipodi:namedview>
<title
id="title7860">OpenCV video I/O Structure</title>
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title>OpenCV video I/O Structure</dc:title>
<cc:license
rdf:resource="http://creativecommons.org/licenses/by/3.0/" />
<dc:creator>
<cc:Agent>
<dc:title>PkLab.net</dc:title>
</cc:Agent>
</dc:creator>
<dc:language>English</dc:language>
<dc:subject>
<rdf:Bag>
<rdf:li>OpenCV</rdf:li>
<rdf:li>Video I/O</rdf:li>
</rdf:Bag>
</dc:subject>
<dc:description />
</cc:Work>
<cc:License
rdf:about="http://creativecommons.org/licenses/by/3.0/">
<cc:permits
rdf:resource="http://creativecommons.org/ns#Reproduction" />
<cc:permits
rdf:resource="http://creativecommons.org/ns#Distribution" />
<cc:requires
rdf:resource="http://creativecommons.org/ns#Notice" />
<cc:requires
rdf:resource="http://creativecommons.org/ns#Attribution" />
<cc:permits
rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
</cc:License>
</rdf:RDF>
</metadata>
<g
transform="translate(-1620.0001,-352.36227)"
id="layer1"
inkscape:groupmode="layer"
inkscape:label="Layer 1">
<rect
ry="10"
rx="10"
y="372.86224"
x="1640.5001"
height="79"
width="518.99982"
id="rect4136-9-7"
style="fill:#9aba59;fill-opacity:1;stroke:#6f00c7;stroke-width:1.00000012;stroke-miterlimit:4;stroke-dasharray:1.00000003, 1.00000003;stroke-dashoffset:0;stroke-opacity:1" />
<rect
ry="10"
rx="10"
y="402.86224"
x="1820.5001"
height="39"
width="99"
id="rect4615-2-7-5-2-1-4-4-6"
style="fill:#809f41;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:2.99999982, 2.99999982;stroke-dashoffset:0;stroke-opacity:1" />
<rect
ry="10"
rx="10"
y="772.86224"
x="1640.5001"
height="79"
width="519"
id="rect4136-8-5-3-3-3"
style="fill:#cccccc;fill-opacity:1;stroke:#6f00c7;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:1.00000001, 1.00000001;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text5457-0-7-9"
y="792.29303"
x="1899.3917"
style="font-style:normal;font-weight:normal;font-size:15px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
style="font-size:15px"
y="792.29303"
x="1899.3917"
id="tspan5459-7-2-3"
sodipodi:role="line">MEDIA DEVICES</tspan></text>
<rect
ry="10"
rx="10"
y="682.86224"
x="1640.5001"
height="79"
width="519"
id="rect4136-8-5-3-8"
style="fill:#ffb380;fill-opacity:1;stroke:#6f00c7;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:1.00000003, 1.00000003;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text5457-0-6"
y="702.27844"
x="1899.941"
style="font-style:normal;font-weight:normal;font-size:15px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
style="font-size:15px"
y="702.27844"
x="1899.941"
id="tspan5459-7-3"
sodipodi:role="line">SYSTEM</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1330.3986"
y="308.75192"
id="text5748"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5750"
x="1330.3986"
y="308.75192" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1330.3986"
y="308.75192"
id="text5752"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5754"
x="1330.3986"
y="308.75192" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1490.257"
y="308.75192"
id="text5748-3"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5750-9"
x="1490.257"
y="308.75192" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1490.257"
y="308.75192"
id="text5752-1"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5754-8"
x="1490.257"
y="308.75192" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1170.5402"
y="308.75192"
id="text5748-3-9"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5750-9-4"
x="1170.5402"
y="308.75192" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:17.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
x="1170.5402"
y="308.75192"
id="text5752-1-5"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5754-8-4"
x="1170.5402"
y="308.75192" /></text>
<text
transform="scale(0.96032163,1.0413178)"
sodipodi:linespacing="125%"
id="text4165-3-0"
y="376.56689"
x="1978.5697"
style="font-style:normal;font-weight:normal;font-size:15px;line-height:125%;font-family:Verdana;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="376.56689"
x="1978.5697"
id="tspan4167-5-0"
sodipodi:role="line"
style="font-size:15px;text-align:center;text-anchor:middle">USER APPLICATION</tspan></text>
<rect
ry="10"
rx="10"
y="462.86221"
x="1820.5001"
height="209"
width="339"
id="rect4136-8-4-82-8"
style="fill:#80b3ff;fill-opacity:1;stroke:#6f00c7;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:0.99999996, 0.99999996;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4577-6-1"
y="478.42764"
x="2061.6013"
style="font-style:normal;font-weight:normal;font-size:15px;line-height:125%;font-family:Verdana;text-align:end;letter-spacing:0px;word-spacing:0px;text-anchor:end;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
style="font-size:15px"
id="tspan4581-0-0"
y="478.42764"
x="2061.6013"
sodipodi:role="line">OpenCV Video I/O</tspan></text>
<path
inkscape:connector-curvature="0"
id="path4789-4-4"
d="m 1820.0815,567.77306 340,0"
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.99999994;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1.99999991, 0.99999995;stroke-dashoffset:0;stroke-opacity:1" />
<text
transform="scale(1.0001622,0.99983783)"
sodipodi:linespacing="125%"
id="text4577-9-3-2"
y="581.95209"
x="1990.1963"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
id="tspan4581-2-5-1"
y="581.95209"
x="1990.1963"
sodipodi:role="line">OpenCV Video I/O API Backends</tspan></text>
<rect
ry="5"
rx="5"
y="592.86224"
x="1913.8334"
height="29.000002"
width="69"
id="rect4615-4-6-4"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-9-4"
y="612.11469"
x="1947.6696"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="612.11469"
x="1947.6696"
id="tspan4365-0-2"
sodipodi:role="line">DShow</tspan></text>
<rect
ry="5"
rx="5"
y="592.86224"
x="2080.5"
height="29.000002"
width="69"
id="rect4615-5-7-0"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-6-1-4"
y="612.00482"
x="2114.123"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="612.00482"
x="2114.123"
id="tspan4365-8-0-4"
sodipodi:role="line">MSMF</tspan></text>
<rect
ry="5"
rx="5"
y="592.86224"
x="1830.5001"
height="29"
width="69"
id="rect4615-11-0"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
transform="scale(1.0001622,0.99983783)"
sodipodi:linespacing="125%"
id="text4363-5-6-4"
y="611.99872"
x="1864.5328"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
y="611.99872"
x="1864.5328"
id="tspan4365-6-9-6"
sodipodi:role="line">FFMPEG</tspan></text>
<rect
ry="5"
rx="5"
y="632.79645"
x="1913.8334"
height="29"
width="69"
id="rect4615-94-7-1"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-4-8-8"
y="651.9455"
x="1947.932"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="651.9455"
x="1947.932"
id="tspan4365-7-09-1"
sodipodi:role="line">V4L</tspan></text>
<rect
ry="5"
rx="5"
y="592.86224"
x="1997.1666"
height="29.000002"
width="69"
id="rect4615-9-6-4"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-7-5-3"
y="612.00482"
x="2031.5355"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="612.00482"
x="2031.5355"
id="tspan4365-9-0-9"
sodipodi:role="line">VFW</tspan></text>
<rect
ry="5"
rx="5"
y="632.79645"
x="1830.5001"
height="29"
width="69"
id="rect4615-0-7-6"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-4-6-1-7"
y="651.22223"
x="1864.9784"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="651.22223"
x="1864.9784"
id="tspan4365-7-0-4-1"
sodipodi:role="line">AVF/IOS</tspan></text>
<rect
ry="5"
rx="5"
y="632.79645"
x="2080.5"
height="29"
width="69"
id="rect4615-1-2-1"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-4-6-7-6-2"
y="651.71051"
x="2115.1025"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="651.71051"
x="2115.1025"
id="tspan4365-7-0-1-8-6"
sodipodi:role="line">etc...</tspan></text>
<rect
ry="5"
rx="5"
y="632.79645"
x="1997.1666"
height="29"
width="69"
id="rect4615-6-6-4"
style="fill:#999999;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4409-2-7"
y="651.9455"
x="2031.4104"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#4d4d4d;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="651.9455"
x="2031.4104"
id="tspan4411-9-8"
sodipodi:role="line">OPENNI</tspan></text>
<rect
ry="10"
rx="10"
y="462.86221"
x="1640.5001"
height="209"
width="169"
id="rect4136-8-4-8-3-9"
style="fill:#e9afaf;fill-opacity:1;stroke:#6f00c7;stroke-width:0.99999964;stroke-miterlimit:4;stroke-dasharray:1.00000002, 1.00000002;stroke-dashoffset:0;stroke-opacity:1" />
<text
transform="scale(1.0001622,0.99983783)"
sodipodi:linespacing="125%"
id="text4577-8-4-3"
y="479.85953"
x="1723.9659"
style="font-style:normal;font-weight:normal;font-size:15px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"><tspan
style="font-size:15px;text-align:center;text-anchor:middle"
id="tspan4581-5-1-7"
y="479.85953"
x="1723.9659"
sodipodi:role="line">Manufacturer Driver</tspan></text>
<rect
ry="10"
rx="10"
y="557.86224"
x="1650.5001"
height="39"
width="149"
id="rect4615-2-7-1-9"
style="fill:#d35f5f;fill-opacity:1;stroke:#000000;stroke-width:0.99999982;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-8-9"
y="581.27667"
x="1724.7936"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="581.27667"
x="1724.7936"
id="tspan4365-6-3-6-7-2"
sodipodi:role="line">C / C++ / JAVA API</tspan></text>
<rect
ry="10"
rx="10"
y="402.86224"
x="1665.5001"
height="39"
width="119"
id="rect4615-2-7-5-2-0"
style="fill:#809f41;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:2.99999982, 2.99999982;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-7-3-4"
y="419.3027"
x="1724.9401"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="419.3027"
x="1724.9401"
id="tspan4365-6-3-6-2-2-2"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle">cv::Mat from</tspan><tspan
y="434.9277"
x="1724.9401"
sodipodi:role="line"
id="tspan5401-7-0"
style="font-size:12.5px;text-align:center;text-anchor:middle">buffer</tspan></text>
<rect
ry="10"
rx="10"
y="712.86224"
x="1650.5001"
height="39"
width="149"
id="rect4615-2-7-0-6-8"
style="fill:#d35f5f;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-5-7"
y="728.19025"
x="1724.0917"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="728.19025"
x="1724.0917"
id="tspan4365-6-3-6-6-1-0"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle">Manufacturer</tspan><tspan
y="743.81525"
x="1724.0917"
sodipodi:role="line"
id="tspan5372-4-1">Library</tspan></text>
<rect
ry="10"
rx="10"
y="712.86224"
x="1825.5001"
height="39"
width="99"
id="rect4615-2-7-0-2-8-7"
style="fill:#c87137;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-7-2-5"
y="729.32245"
x="1874.3451"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="729.32245"
x="1874.3451"
id="tspan4365-6-3-6-6-8-7-6"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle;fill:#ffffff">Backends</tspan><tspan
y="744.94745"
x="1874.3451"
sodipodi:role="line"
id="tspan5372-3-26-8">Libraries</tspan></text>
<rect
ry="10"
rx="10"
y="712.86224"
x="2050.5"
height="39"
width="99.000038"
id="rect4615-2-7-0-2-5-1-6"
style="fill:#c87137;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-7-1-6-4"
y="729.21259"
x="2099.3086"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="729.21259"
x="2099.3086"
id="tspan4365-6-3-6-6-8-6-5-6"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle;fill:#ffffff">O.S.</tspan><tspan
y="744.83759"
x="2099.3086"
sodipodi:role="line"
id="tspan5372-3-2-4-8">Libraries</tspan></text>
<rect
ry="10"
rx="10"
y="402.86224"
x="1935.5"
height="39"
width="99"
id="rect4615-2-7-5-2-1-2"
style="fill:#809f41;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:2.99999982, 2.99999982;stroke-dashoffset:0;stroke-opacity:1" />
<rect
ry="10"
rx="10"
y="402.86224"
x="2050.5"
height="39"
width="99"
id="rect4615-2-7-5-2-1-4-8"
style="fill:#809f41;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:2.99999982, 2.99999982;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-7-3-0-1-3"
y="423.06659"
x="1961.3533"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0118989,0.98824102)"><tspan
y="423.06659"
x="1961.3533"
id="tspan4365-6-3-6-2-2-1-7-5"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle">set / get</tspan><tspan
y="438.69159"
x="1961.3533"
sodipodi:role="line"
id="tspan5401-7-6-4-7"
style="font-size:12.5px;text-align:center;text-anchor:middle">properties</tspan></text>
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-7-3-0-1-4-5"
y="419.27219"
x="2099.635"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="419.27219"
x="2099.635"
id="tspan4365-6-3-6-2-2-1-7-4-3"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle">grab / write</tspan><tspan
y="434.89719"
x="2099.635"
sodipodi:role="line"
id="tspan5401-7-6-4-3-2"
style="font-size:12.5px;text-align:center;text-anchor:middle">frame</tspan></text>
<rect
ry="10"
rx="10"
y="802.86224"
x="1650.5001"
height="39"
width="149"
id="rect4615-2-7-0-6-6-5"
style="fill:#4d4d4d;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-5-2-08"
y="827.02747"
x="1724.9156"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="827.02747"
x="1724.9156"
sodipodi:role="line"
id="tspan5372-4-2-73">Camera</tspan></text>
<rect
ry="10"
rx="10"
y="802.86224"
x="1825.5001"
height="39"
width="149"
id="rect4615-2-7-0-6-6-4-6"
style="fill:#4d4d4d;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-5-2-0-1"
y="827.14954"
x="1899.9178"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="827.14954"
x="1899.9178"
sodipodi:role="line"
id="tspan5372-4-2-7-0">Video File</tspan></text>
<rect
ry="10"
rx="10"
y="802.86224"
x="2000.5001"
height="39"
width="149"
id="rect4615-2-7-0-6-6-6-6"
style="fill:#4d4d4d;fill-opacity:1;stroke:#000000;stroke-width:0.99999994;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-5-2-03-1"
y="827.14954"
x="2074.5935"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="827.14954"
x="2074.5935"
sodipodi:role="line"
id="tspan5372-4-2-6-7">Network Stream</tspan></text>
<rect
style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect8223"
width="538.99994"
height="499.00003"
x="1630.5001"
y="362.86227"
rx="10"
ry="10" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-7-3-0-2"
y="419.27219"
x="1869.9166"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="419.27219"
x="1869.9166"
id="tspan4365-6-3-6-2-2-1-2"
sodipodi:role="line"
style="font-size:12.5px;text-align:center;text-anchor:middle">create / open</tspan><tspan
y="434.89719"
x="1869.9166"
sodipodi:role="line"
id="tspan5401-7-6-8"
style="font-size:12.5px;text-align:center;text-anchor:middle">device</tspan></text>
<rect
ry="10"
rx="10"
y="712.86224"
x="1938"
height="39"
width="99"
id="rect4615-2-7-0-2-8-7-8"
style="fill:#c87137;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-2-6-7-2-5-2"
y="729.32245"
x="1986.8268"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="729.32245"
x="1986.8268"
sodipodi:role="line"
id="tspan5372-3-26-8-5">CODECS</tspan><tspan
y="744.94745"
x="1986.8268"
sodipodi:role="line"
id="tspan4295">(fourcc)</tspan></text>
<rect
ry="10"
rx="9.3800001"
y="507.86713"
x="1841.6537"
height="43.995113"
width="90.999962"
id="rect4615-2-8-4-2"
style="fill:#6b98c9;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1" />
<rect
ry="10"
rx="9.3800011"
y="507.86713"
x="1945.6538"
height="43.995113"
width="198.99985"
id="rect4615-2-8-4-2-4"
style="fill:#6b98c9;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:1, 1;stroke-dashoffset:0;stroke-opacity:1" />
<rect
ry="10"
rx="9.3800001"
y="483.86713"
x="1833.6537"
height="29"
width="149"
id="rect4615-2-8-4"
style="fill:#4d82be;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-3-3"
y="501.93918"
x="1908.0701"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="501.93918"
x="1908.0701"
id="tspan4365-6-3-90-7"
sodipodi:role="line">VideoCapture</tspan></text>
<rect
ry="10"
rx="9.3800001"
y="483.86713"
x="2003.6537"
height="29"
width="149"
id="rect4615-2-9-5-7"
style="fill:#4d82be;fill-opacity:1;stroke:#000000;stroke-width:0.99999982;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
<text
sodipodi:linespacing="125%"
id="text4363-5-2-8-1-8"
y="503.1019"
x="2077.719"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="503.1019"
x="2077.719"
id="tspan4365-6-3-9-0-6"
sodipodi:role="line">VideoWriter</tspan></text>
<text
sodipodi:linespacing="125%"
id="text4363-5-2-3-3-4"
y="534.48248"
x="1887.043"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="534.48248"
x="1887.043"
id="tspan4365-6-3-90-7-2"
sodipodi:role="line">Camera</tspan></text>
<text
sodipodi:linespacing="125%"
id="text4363-5-2-3-3-6"
y="526.79205"
x="2044.752"
style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:Verdana;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
xml:space="preserve"
transform="scale(1.0001622,0.99983783)"><tspan
y="526.79205"
x="2044.752"
sodipodi:role="line"
id="tspan4367">File or URL stream</tspan><tspan
y="542.41705"
x="2044.752"
sodipodi:role="line"
id="tspan4371">+ fourcc codec</tspan></text>
</g>
</svg>

After

Width:  |  Height:  |  Size: 37 KiB

View File

@ -0,0 +1,97 @@
Video I/O with OpenCV Overview {#videoio_overview}
===================================
### See also:
- @ref videoio "Video I/O Code Reference"
- Tutorials: @ref tutorial_table_of_content_app
General Information
===================
The OpenCV @ref videoio module is a set of classes and functions to read and write video or images sequence.
Basically, the module provides the cv::VideoCapture and cv::VideoWriter classes as 2-layer interface to many video
I/O APIs used as backend.
![Video I/O with OpenCV](pics/videoio_overview.svg)
Some backends such as Direct Show (DSHOW), Microsoft Media Foundation (MSMF),
Video 4 Linux (V4L), etc... are interfaces to the video I/O library provided by the operating system.
Some others backends like OpenNI2 for Kinect, Intel Perceptual Computing SDK, GStreamer,
XIMEA Camera API, etc... are interfaces to proprietary drivers or to external library.
See the list of supported backends here: cv::VideoCaptureAPIs
@warning Some backends are experimental use them at your own risk
@note Each backend supports devices properties (cv::VideoCaptureProperties) in a different way or might not support any property at all.
Select the backend at runtime
-----------------------------
OpenCV automatically selects and uses first available backend (`apiPreference=cv::CAP_ANY`).
As advanced usage you can select the backend to use at runtime. Currently this option is
available only with %VideoCapture.
For example to grab from default camera using Direct Show as backend
```cpp
//declare a capture object
cv::VideoCapture cap(0, cv::CAP_DSHOW);
//or specify the apiPreference with open
cap.open(0, cv::CAP_DSHOW);
```
If you want to grab from a file using the Direct Show as backend:
```cpp
//declare a capture object
cv::VideoCapture cap(filename, cv::CAP_DSHOW);
//or specify the apiPreference with open
cap.open(filename, cv::CAP_DSHOW);
```
@sa cv::VideoCapture::open() , cv::VideoCapture::VideoCapture()
#### How to enable backends
There are two kinds of videoio backends: built-in backends and plugins which will be loaded at runtime (since OpenCV 4.1.0). Use functions cv::videoio_registry::getBackends, cv::videoio_registry::hasBackend and cv::videoio_registry::getBackendName to check actual presence of backend during runtime.
To enable built-in videoio backends:
1. Enable corresponding CMake option, e.g. `-DWITH_GSTREAMER=ON`
2. Rebuild OpenCV
To enable dynamically-loaded videoio backend (currently supported: GStreamer and FFmpeg on Linux, MediaSDK on Linux and Windows):
1. Enable backend and add it to the list of plugins: `-DWITH_GSTREAMER=ON -DVIDEOIO_PLUGIN_LIST=gstreamer` CMake options
2. Rebuild OpenCV
3. Check that `libopencv_videoio_gstreamer.so` library exists in the `lib` directory
@note Don't forget to clean CMake cache when switching between these two modes
#### Use 3rd party drivers or cameras
Many industrial cameras or some video I/O devices don't provide standard driver interfaces
for the operating system. Thus you can't use VideoCapture or VideoWriter with these devices.
To get access to their devices, manufactures provide their own C++ API and library that you have to
include and link with your OpenCV application.
It is a common case that these libraries read/write images from/to a memory buffer. If it so, it is
possible to make a `Mat` header for memory buffer (user-allocated data) and process it
in-place using OpenCV functions. See cv::Mat::Mat() for more details.
The FFmpeg library
------------------
OpenCV can use the FFmpeg library (http://ffmpeg.org/) as backend to record, convert and stream audio and video.
FFmpeg is a complete, cross-reference solution. If you enable FFmpeg while configuring OpenCV than
CMake will download and install the binaries in `OPENCV_SOURCE_CODE/3rdparty/ffmpeg/`. To use
FFmpeg at runtime, you must deploy the FFmpeg binaries with your application.
@note FFmpeg is licensed under the GNU Lesser General Public License (LGPL) version 2.1 or later.
See `OPENCV_SOURCE_CODE/3rdparty/ffmpeg/readme.txt` and http://ffmpeg.org/legal.html for details and
licensing information

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,150 @@
/* For iOS video I/O
* by Eduard Feicho on 29/07/12
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import <UIKit/UIKit.h>
#import <Accelerate/Accelerate.h>
#import <AVFoundation/AVFoundation.h>
#import <ImageIO/ImageIO.h>
#include "opencv2/core.hpp"
//! @addtogroup videoio_ios
//! @{
/////////////////////////////////////// CvAbstractCamera /////////////////////////////////////
@class CvAbstractCamera;
CV_EXPORTS @interface CvAbstractCamera : NSObject
{
UIDeviceOrientation currentDeviceOrientation;
BOOL cameraAvailable;
}
@property (nonatomic, strong) AVCaptureSession* captureSession;
@property (nonatomic, strong) AVCaptureConnection* videoCaptureConnection;
@property (nonatomic, readonly) BOOL running;
@property (nonatomic, readonly) BOOL captureSessionLoaded;
@property (nonatomic, assign) int defaultFPS;
@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;
@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;
@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;
@property (nonatomic, assign) int imageWidth;
@property (nonatomic, assign) int imageHeight;
@property (nonatomic, strong) UIView* parentView;
- CV_UNUSED(start);
- CV_UNUSED(stop);
- CV_UNUSED(switchCameras);
- (id)initWithParentView:(UIView*)parent;
- CV_UNUSED(createCaptureOutput);
- CV_UNUSED(createVideoPreviewLayer);
- CV_UNUSED(updateOrientation);
- CV_UNUSED(lockFocus);
- CV_UNUSED(unlockFocus);
- CV_UNUSED(lockExposure);
- CV_UNUSED(unlockExposure);
- CV_UNUSED(lockBalance);
- CV_UNUSED(unlockBalance);
@end
///////////////////////////////// CvVideoCamera ///////////////////////////////////////////
@class CvVideoCamera;
CV_EXPORTS @protocol CvVideoCameraDelegate <NSObject>
#ifdef __cplusplus
// delegate method for processing image frames
- (void)processImage:(cv::Mat&)image;
#endif
@end
CV_EXPORTS @interface CvVideoCamera : CvAbstractCamera<AVCaptureVideoDataOutputSampleBufferDelegate>
{
AVCaptureVideoDataOutput *videoDataOutput;
dispatch_queue_t videoDataOutputQueue;
CALayer *customPreviewLayer;
CMTime lastSampleTime;
}
@property (nonatomic, weak) id<CvVideoCameraDelegate> delegate;
@property (nonatomic, assign) BOOL grayscaleMode;
@property (nonatomic, assign) BOOL recordVideo;
@property (nonatomic, assign) BOOL rotateVideo;
@property (nonatomic, strong) AVAssetWriterInput* recordAssetWriterInput;
@property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
@property (nonatomic, strong) AVAssetWriter* recordAssetWriter;
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
- CV_UNUSED(layoutPreviewLayer);
- CV_UNUSED(saveVideo);
- (NSURL *)videoFileURL;
- (NSString *)videoFileString;
@end
///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////
@class CvPhotoCamera;
CV_EXPORTS @protocol CvPhotoCameraDelegate <NSObject>
- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image;
- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera;
@end
CV_EXPORTS @interface CvPhotoCamera : CvAbstractCamera
{
AVCaptureStillImageOutput *stillImageOutput;
}
@property (nonatomic, weak) id<CvPhotoCameraDelegate> delegate;
- CV_UNUSED(takePicture);
@end
//! @} videoio_ios

View File

@ -0,0 +1,132 @@
// Video support for Windows Runtime
// Copyright (c) Microsoft Open Technologies, Inc.
// All rights reserved.
//
// (3 - clause BSD License)
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
// promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <ppl.h>
#include <functional>
#include <concrt.h>
#include <agile.h>
#include "opencv2/core/cvdef.h"
namespace cv
{
//! @addtogroup videoio_winrt
//! @{
enum {
OPEN_CAMERA = 300,
CLOSE_CAMERA,
UPDATE_IMAGE_ELEMENT,
SHOW_TRACKBAR
};
/********************************** WinRT API ************************************************/
template <typename ...Args>
CV_EXPORTS void winrt_startMessageLoop(std::function<void(Args...)>&& callback, Args... args);
template <typename ...Args>
CV_EXPORTS void winrt_startMessageLoop(void callback(Args...), Args... args);
/** @brief
@note
Starts (1) frame-grabbing loop and (2) message loop
1. Function passed as an argument must implement common OCV reading frames
pattern (see cv::VideoCapture documentation) AND call cv::winrt_imgshow().
2. Message processing loop required to overcome WinRT container and type
conversion restrictions. OCV provides default implementation
Here is how the class can be used:
@code
void cvMain()
{
Mat frame;
VideoCapture cam;
cam.open(0);
while (1)
{
cam >> frame;
// don't reprocess the same frame again
if (!cam.grab()) continue;
// your processing logic goes here
// obligatory step to get XAML image component updated
winrt_imshow();
}
}
MainPage::MainPage()
{
InitializeComponent();
cv::winrt_setFrameContainer(cvImage);
cv::winrt_startMessageLoop(cvMain);
}
@endcode
*/
template
CV_EXPORTS void winrt_startMessageLoop(void callback(void));
/** @brief
@note
Must be called from WinRT specific callback to handle image grabber state.
Here is how the class can be used:
@code
MainPage::MainPage()
{
// ...
Window::Current->VisibilityChanged += ref new Windows::UI::Xaml::WindowVisibilityChangedEventHandler(this, &Application::MainPage::OnVisibilityChanged);
// ...
}
void Application::MainPage::OnVisibilityChanged(Platform::Object ^sender,
Windows::UI::Core::VisibilityChangedEventArgs ^e)
{
cv::winrt_onVisibilityChanged(e->Visible);
}
@endcode
*/
CV_EXPORTS void winrt_onVisibilityChanged(bool visible);
/** @brief
@note
Must be called to assign WinRT control holding image you're working with.
Code sample is available for winrt_startMessageLoop().
*/
CV_EXPORTS void winrt_setFrameContainer(::Windows::UI::Xaml::Controls::Image^ image);
/** @brief
@note
Must be called to update attached image source.
Code sample is available for winrt_startMessageLoop().
*/
CV_EXPORTS void winrt_imshow();
//! @} videoio_winrt
} // cv

View File

@ -0,0 +1,192 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef CONTAINER_AVI_HPP
#define CONTAINER_AVI_HPP
#ifndef __OPENCV_BUILD
# error this is a private header which should not be used from outside of the OpenCV library
#endif
#include "opencv2/core/cvdef.h"
#include "opencv2/videoio/videoio_c.h"
#include <deque>
namespace cv
{
/*
AVI struct:
RIFF ('AVI '
LIST ('hdrl'
'avih'(<Main AVI Header>)
LIST ('strl'
'strh'(<Stream header>)
'strf'(<Stream format>)
[ 'strd'(<Additional header data>) ]
[ 'strn'(<Stream name>) ]
[ 'indx'(<Odml index data>) ]
...
)
[LIST ('strl' ...)]
[LIST ('strl' ...)]
...
[LIST ('odml'
'dmlh'(<ODML header data>)
...
)
]
...
)
[LIST ('INFO' ...)]
[JUNK]
LIST ('movi'
{{xxdb|xxdc|xxpc|xxwb}(<Data>) | LIST ('rec '
{xxdb|xxdc|xxpc|xxwb}(<Data>)
{xxdb|xxdc|xxpc|xxwb}(<Data>)
...
)
...
}
...
)
['idx1' (<AVI Index>) ]
)
{xxdb|xxdc|xxpc|xxwb}
xx - stream number: 00, 01, 02, ...
db - uncompressed video frame
dc - compressed video frame
pc - palette change
wb - audio frame
JUNK section may pad any data section and must be ignored
*/
typedef std::deque< std::pair<uint64_t, uint32_t> > frame_list;
typedef frame_list::iterator frame_iterator;
struct RiffChunk;
struct RiffList;
class VideoInputStream;
enum Codecs { MJPEG };
//Represents single MJPEG video stream within single AVI/AVIX entry
//Multiple video streams within single AVI/AVIX entry are not supported
//ODML index is not supported
class CV_EXPORTS AVIReadContainer
{
public:
AVIReadContainer();
void initStream(const String& filename);
void initStream(Ptr<VideoInputStream> m_file_stream_);
void close();
//stores founded frames in m_frame_list which can be accessed via getFrames
bool parseAvi(Codecs codec_) { return parseAviWithFrameList(m_frame_list, codec_); }
//stores founded frames in in_frame_list. getFrames() would return empty list
bool parseAvi(frame_list& in_frame_list, Codecs codec_) { return parseAviWithFrameList(in_frame_list, codec_); }
size_t getFramesCount() { return m_frame_list.size(); }
frame_list& getFrames() { return m_frame_list; }
unsigned int getWidth() { return m_width; }
unsigned int getHeight() { return m_height; }
double getFps() { return m_fps; }
std::vector<char> readFrame(frame_iterator it);
bool parseRiff(frame_list &m_mjpeg_frames);
protected:
bool parseAviWithFrameList(frame_list& in_frame_list, Codecs codec_);
void skipJunk(RiffChunk& chunk);
void skipJunk(RiffList& list);
bool parseHdrlList(Codecs codec_);
bool parseIndex(unsigned int index_size, frame_list& in_frame_list);
bool parseMovi(frame_list& in_frame_list)
{
//not implemented
CV_UNUSED(in_frame_list);
// FIXIT: in_frame_list.empty();
return true;
}
bool parseStrl(char stream_id, Codecs codec_);
bool parseInfo()
{
//not implemented
return true;
}
void printError(RiffList& list, unsigned int expected_fourcc);
void printError(RiffChunk& chunk, unsigned int expected_fourcc);
Ptr<VideoInputStream> m_file_stream;
unsigned int m_stream_id;
unsigned long long int m_movi_start;
unsigned long long int m_movi_end;
frame_list m_frame_list;
unsigned int m_width;
unsigned int m_height;
double m_fps;
bool m_is_indx_present;
};
enum { COLORSPACE_GRAY=0, COLORSPACE_RGBA=1, COLORSPACE_BGR=2, COLORSPACE_YUV444P=3 };
enum StreamType { db, dc, pc, wb };
class BitStream;
// {xxdb|xxdc|xxpc|xxwb}
// xx - stream number: 00, 01, 02, ...
// db - uncompressed video frame
// dc - compressed video frame
// pc - palette change
// wb - audio frame
class CV_EXPORTS AVIWriteContainer
{
public:
AVIWriteContainer();
~AVIWriteContainer();
bool initContainer(const String& filename, double fps, Size size, bool iscolor);
void startWriteAVI(int stream_count);
void writeStreamHeader(Codecs codec_);
void startWriteChunk(uint32_t fourcc);
void endWriteChunk();
int getAVIIndex(int stream_number, StreamType strm_type);
void writeIndex(int stream_number, StreamType strm_type);
void finishWriteAVI();
bool isOpenedStream() const;
bool isEmptyFrameOffset() const { return frameOffset.empty(); }
int getWidth() const { return width; }
int getHeight() const { return height; }
int getChannels() const { return channels; }
size_t getMoviPointer() const { return moviPointer; }
size_t getStreamPos() const;
void pushFrameOffset(size_t elem) { frameOffset.push_back(elem); }
void pushFrameSize(size_t elem) { frameSize.push_back(elem); }
bool isEmptyFrameSize() const { return frameSize.empty(); }
size_t atFrameSize(size_t i) const { return frameSize[i]; }
size_t countFrameSize() const { return frameSize.size(); }
void jputStreamShort(int val);
void putStreamBytes(const uchar* buf, int count);
void putStreamByte(int val);
void jputStream(unsigned currval);
void jflushStream(unsigned currval, int bitIdx);
private:
Ptr<BitStream> strm;
int outfps;
int width, height, channels;
size_t moviPointer;
std::vector<size_t> frameOffset, frameSize, AVIChunkSizeIndex, frameNumIndexes;
};
}
#endif //CONTAINER_AVI_HPP

View File

@ -0,0 +1,34 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// This file should not be used with compiler (documentation only)
//
namespace cv {
/** @addtogroup videoio_hwaccel
This section contains information about API to control Hardware-accelerated video decoding and encoding.
@note Check [Wiki page](https://github.com/opencv/opencv/wiki/Video-IO-hardware-acceleration)
for description of supported hardware / software configurations and available benchmarks
cv::VideoCapture properties:
- #CAP_PROP_HW_ACCELERATION (as #VideoAccelerationType)
- #CAP_PROP_HW_DEVICE
cv::VideoWriter properties:
- #VIDEOWRITER_PROP_HW_ACCELERATION (as #VideoAccelerationType)
- #VIDEOWRITER_PROP_HW_DEVICE
Properties are supported by these backends:
- #CAP_FFMPEG
- #CAP_GSTREAMER
- #CAP_MSMF (Windows)
@{
*/
/** @} */
} // namespace

View File

@ -0,0 +1,434 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_VIDEOIO_LEGACY_CONSTANTS_H
#define OPENCV_VIDEOIO_LEGACY_CONSTANTS_H
enum
{
CV_CAP_ANY =0, // autodetect
CV_CAP_MIL =100, // MIL proprietary drivers
CV_CAP_VFW =200, // platform native
CV_CAP_V4L =200,
CV_CAP_V4L2 =200,
CV_CAP_FIREWARE =300, // IEEE 1394 drivers
CV_CAP_FIREWIRE =300,
CV_CAP_IEEE1394 =300,
CV_CAP_DC1394 =300,
CV_CAP_CMU1394 =300,
CV_CAP_STEREO =400, // TYZX proprietary drivers
CV_CAP_TYZX =400,
CV_TYZX_LEFT =400,
CV_TYZX_RIGHT =401,
CV_TYZX_COLOR =402,
CV_TYZX_Z =403,
CV_CAP_QT =500, // QuickTime
CV_CAP_UNICAP =600, // Unicap drivers
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android - not used
CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera - not used
CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera - not used
CV_CAP_XIAPI =1100, // XIMEA Camera API
CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
CV_CAP_INTELPERC = 1500, // Intel Perceptual Computing
CV_CAP_OPENNI2 = 1600, // OpenNI2 (for Kinect)
CV_CAP_GPHOTO2 = 1700,
CV_CAP_GSTREAMER = 1800, // GStreamer
CV_CAP_FFMPEG = 1900, // FFMPEG
CV_CAP_IMAGES = 2000, // OpenCV Image Sequence (e.g. img_%02d.jpg)
CV_CAP_ARAVIS = 2100 // Aravis GigE SDK
};
enum
{
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
// every feature can have only one mode turned on at a time
CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
CV_CAP_PROP_DC1394_MODE_AUTO = -2,
CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
CV_CAP_PROP_POS_MSEC =0,
CV_CAP_PROP_POS_FRAMES =1,
CV_CAP_PROP_POS_AVI_RATIO =2,
CV_CAP_PROP_FRAME_WIDTH =3,
CV_CAP_PROP_FRAME_HEIGHT =4,
CV_CAP_PROP_FPS =5,
CV_CAP_PROP_FOURCC =6,
CV_CAP_PROP_FRAME_COUNT =7,
CV_CAP_PROP_FORMAT =8,
CV_CAP_PROP_MODE =9,
CV_CAP_PROP_BRIGHTNESS =10,
CV_CAP_PROP_CONTRAST =11,
CV_CAP_PROP_SATURATION =12,
CV_CAP_PROP_HUE =13,
CV_CAP_PROP_GAIN =14,
CV_CAP_PROP_EXPOSURE =15,
CV_CAP_PROP_CONVERT_RGB =16,
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CV_CAP_PROP_RECTIFICATION =18,
CV_CAP_PROP_MONOCHROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
// user can adjust reference level
// using this feature
CV_CAP_PROP_GAMMA =22,
CV_CAP_PROP_TEMPERATURE =23,
CV_CAP_PROP_TRIGGER =24,
CV_CAP_PROP_TRIGGER_DELAY =25,
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
CV_CAP_PROP_ZOOM =27,
CV_CAP_PROP_FOCUS =28,
CV_CAP_PROP_GUID =29,
CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_MAX_DC1394 =31,
CV_CAP_PROP_BACKLIGHT =32,
CV_CAP_PROP_PAN =33,
CV_CAP_PROP_TILT =34,
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_BUFFERSIZE =38,
CV_CAP_PROP_AUTOFOCUS =39,
CV_CAP_PROP_SAR_NUM =40,
CV_CAP_PROP_SAR_DEN =41,
CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
// OpenNI map generators
CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
CV_CAP_OPENNI_IR_GENERATOR = 1 << 29,
CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_OPENNI_IR_GENERATOR,
// Properties of cameras available through OpenNI interfaces
CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
// by changing depth generator's view point (if the flag is "on") or
// sets this view point to its normal one (if the flag is "off").
CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
CV_CAP_PROP_OPENNI2_SYNC = 110,
CV_CAP_PROP_OPENNI2_MIRROR = 111,
CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
CV_CAP_OPENNI_DEPTH_GENERATOR_PRESENT = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
CV_CAP_OPENNI_IR_GENERATOR_PRESENT = CV_CAP_OPENNI_IR_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
// Properties of cameras available through GStreamer interface
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
// PVAPI
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image
CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image
CV_CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor
CV_CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor
CV_CAP_PROP_PVAPI_PIXELFORMAT = 306, // Pixel format
// Properties of cameras available through XIMEA SDK interface
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
CV_CAP_PROP_XI_EXPOSURE = 421, // Exposure time in microseconds
CV_CAP_PROP_XI_EXPOSURE_BURST_COUNT = 422, // Sets the number of times of exposure in one frame.
CV_CAP_PROP_XI_GAIN_SELECTOR = 423, // Gain selector for parameter Gain allows to select different type of gains.
CV_CAP_PROP_XI_GAIN = 424, // Gain in dB
CV_CAP_PROP_XI_DOWNSAMPLING_TYPE = 426, // Change image downsampling type.
CV_CAP_PROP_XI_BINNING_SELECTOR = 427, // Binning engine selector.
CV_CAP_PROP_XI_BINNING_VERTICAL = 428, // Vertical Binning - number of vertical photo-sensitive cells to combine together.
CV_CAP_PROP_XI_BINNING_HORIZONTAL = 429, // Horizontal Binning - number of horizontal photo-sensitive cells to combine together.
CV_CAP_PROP_XI_BINNING_PATTERN = 430, // Binning pattern type.
CV_CAP_PROP_XI_DECIMATION_SELECTOR = 431, // Decimation engine selector.
CV_CAP_PROP_XI_DECIMATION_VERTICAL = 432, // Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor.
CV_CAP_PROP_XI_DECIMATION_HORIZONTAL = 433, // Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor.
CV_CAP_PROP_XI_DECIMATION_PATTERN = 434, // Decimation pattern type.
CV_CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR = 587, // Selects which test pattern generator is controlled by the TestPattern feature.
CV_CAP_PROP_XI_TEST_PATTERN = 588, // Selects which test pattern type is generated by the selected generator.
CV_CAP_PROP_XI_IMAGE_DATA_FORMAT = 435, // Output data format.
CV_CAP_PROP_XI_SHUTTER_TYPE = 436, // Change sensor shutter type(CMOS sensor).
CV_CAP_PROP_XI_SENSOR_TAPS = 437, // Number of taps
CV_CAP_PROP_XI_AEAG_ROI_OFFSET_X = 439, // Automatic exposure/gain ROI offset X
CV_CAP_PROP_XI_AEAG_ROI_OFFSET_Y = 440, // Automatic exposure/gain ROI offset Y
CV_CAP_PROP_XI_AEAG_ROI_WIDTH = 441, // Automatic exposure/gain ROI Width
CV_CAP_PROP_XI_AEAG_ROI_HEIGHT = 442, // Automatic exposure/gain ROI Height
CV_CAP_PROP_XI_BPC = 445, // Correction of bad pixels
CV_CAP_PROP_XI_WB_KR = 448, // White balance red coefficient
CV_CAP_PROP_XI_WB_KG = 449, // White balance green coefficient
CV_CAP_PROP_XI_WB_KB = 450, // White balance blue coefficient
CV_CAP_PROP_XI_WIDTH = 451, // Width of the Image provided by the device (in pixels).
CV_CAP_PROP_XI_HEIGHT = 452, // Height of the Image provided by the device (in pixels).
CV_CAP_PROP_XI_REGION_SELECTOR = 589, // Selects Region in Multiple ROI which parameters are set by width, height, ... ,region mode
CV_CAP_PROP_XI_REGION_MODE = 595, // Activates/deactivates Region selected by Region Selector
CV_CAP_PROP_XI_LIMIT_BANDWIDTH = 459, // Set/get bandwidth(datarate)(in Megabits)
CV_CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH = 460, // Sensor output data bit depth.
CV_CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH = 461, // Device output data bit depth.
CV_CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH = 462, // bitdepth of data returned by function xiGetImage
CV_CAP_PROP_XI_OUTPUT_DATA_PACKING = 463, // Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available.
CV_CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE = 464, // Data packing type. Some cameras supports only specific packing type.
CV_CAP_PROP_XI_IS_COOLED = 465, // Returns 1 for cameras that support cooling.
CV_CAP_PROP_XI_COOLING = 466, // Start camera cooling.
CV_CAP_PROP_XI_TARGET_TEMP = 467, // Set sensor target temperature for cooling.
CV_CAP_PROP_XI_CHIP_TEMP = 468, // Camera sensor temperature
CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing temperature
CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side temperature
CV_CAP_PROP_XI_SENSOR_BOARD_TEMP = 596, // Camera sensor board temperature
CV_CAP_PROP_XI_CMS = 470, // Mode of color management system.
CV_CAP_PROP_XI_APPLY_CMS = 471, // Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE).
CV_CAP_PROP_XI_IMAGE_IS_COLOR = 474, // Returns 1 for color cameras.
CV_CAP_PROP_XI_COLOR_FILTER_ARRAY = 475, // Returns color filter array type of RAW data.
CV_CAP_PROP_XI_GAMMAY = 476, // Luminosity gamma
CV_CAP_PROP_XI_GAMMAC = 477, // Chromaticity gamma
CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strength
CV_CAP_PROP_XI_CC_MATRIX_00 = 479, // Color Correction Matrix element [0][0]
CV_CAP_PROP_XI_CC_MATRIX_01 = 480, // Color Correction Matrix element [0][1]
CV_CAP_PROP_XI_CC_MATRIX_02 = 481, // Color Correction Matrix element [0][2]
CV_CAP_PROP_XI_CC_MATRIX_03 = 482, // Color Correction Matrix element [0][3]
CV_CAP_PROP_XI_CC_MATRIX_10 = 483, // Color Correction Matrix element [1][0]
CV_CAP_PROP_XI_CC_MATRIX_11 = 484, // Color Correction Matrix element [1][1]
CV_CAP_PROP_XI_CC_MATRIX_12 = 485, // Color Correction Matrix element [1][2]
CV_CAP_PROP_XI_CC_MATRIX_13 = 486, // Color Correction Matrix element [1][3]
CV_CAP_PROP_XI_CC_MATRIX_20 = 487, // Color Correction Matrix element [2][0]
CV_CAP_PROP_XI_CC_MATRIX_21 = 488, // Color Correction Matrix element [2][1]
CV_CAP_PROP_XI_CC_MATRIX_22 = 489, // Color Correction Matrix element [2][2]
CV_CAP_PROP_XI_CC_MATRIX_23 = 490, // Color Correction Matrix element [2][3]
CV_CAP_PROP_XI_CC_MATRIX_30 = 491, // Color Correction Matrix element [3][0]
CV_CAP_PROP_XI_CC_MATRIX_31 = 492, // Color Correction Matrix element [3][1]
CV_CAP_PROP_XI_CC_MATRIX_32 = 493, // Color Correction Matrix element [3][2]
CV_CAP_PROP_XI_CC_MATRIX_33 = 494, // Color Correction Matrix element [3][3]
CV_CAP_PROP_XI_DEFAULT_CC_MATRIX = 495, // Set default Color Correction Matrix
CV_CAP_PROP_XI_TRG_SELECTOR = 498, // Selects the type of trigger.
CV_CAP_PROP_XI_ACQ_FRAME_BURST_COUNT = 499, // Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStart
CV_CAP_PROP_XI_DEBOUNCE_EN = 507, // Enable/Disable debounce to selected GPI
CV_CAP_PROP_XI_DEBOUNCE_T0 = 508, // Debounce time (x * 10us)
CV_CAP_PROP_XI_DEBOUNCE_T1 = 509, // Debounce time (x * 10us)
CV_CAP_PROP_XI_DEBOUNCE_POL = 510, // Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge)
CV_CAP_PROP_XI_LENS_MODE = 511, // Status of lens control interface. This shall be set to XI_ON before any Lens operations.
CV_CAP_PROP_XI_LENS_APERTURE_VALUE = 512, // Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11
CV_CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE = 513, // Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps.
CV_CAP_PROP_XI_LENS_FOCUS_MOVE = 514, // Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE.
CV_CAP_PROP_XI_LENS_FOCUS_DISTANCE = 515, // Lens focus distance in cm.
CV_CAP_PROP_XI_LENS_FOCAL_LENGTH = 516, // Lens focal distance in mm.
CV_CAP_PROP_XI_LENS_FEATURE_SELECTOR = 517, // Selects the current feature which is accessible by XI_PRM_LENS_FEATURE.
CV_CAP_PROP_XI_LENS_FEATURE = 518, // Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR.
CV_CAP_PROP_XI_DEVICE_MODEL_ID = 521, // Return device model id
CV_CAP_PROP_XI_DEVICE_SN = 522, // Return device serial number
CV_CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA = 529, // The alpha channel of RGB32 output image format.
CV_CAP_PROP_XI_IMAGE_PAYLOAD_SIZE = 530, // Buffer size in bytes sufficient for output image returned by xiGetImage
CV_CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT = 531, // Current format of pixels on transport layer.
CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ = 532, // Sensor clock frequency in Hz.
CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX = 533, // Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index.
CV_CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT = 534, // Number of output channels from sensor used for data transfer.
CV_CAP_PROP_XI_FRAMERATE = 535, // Define framerate in Hz
CV_CAP_PROP_XI_COUNTER_SELECTOR = 536, // Select counter
CV_CAP_PROP_XI_COUNTER_VALUE = 537, // Counter status
CV_CAP_PROP_XI_ACQ_TIMING_MODE = 538, // Type of sensor frames timing.
CV_CAP_PROP_XI_AVAILABLE_BANDWIDTH = 539, // Calculate and return available interface bandwidth(int Megabits)
CV_CAP_PROP_XI_BUFFER_POLICY = 540, // Data move policy
CV_CAP_PROP_XI_LUT_EN = 541, // Activates LUT.
CV_CAP_PROP_XI_LUT_INDEX = 542, // Control the index (offset) of the coefficient to access in the LUT.
CV_CAP_PROP_XI_LUT_VALUE = 543, // Value at entry LUTIndex of the LUT
CV_CAP_PROP_XI_TRG_DELAY = 544, // Specifies the delay in microseconds (us) to apply after the trigger reception before activating it.
CV_CAP_PROP_XI_TS_RST_MODE = 545, // Defines how time stamp reset engine will be armed
CV_CAP_PROP_XI_TS_RST_SOURCE = 546, // Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming)
CV_CAP_PROP_XI_IS_DEVICE_EXIST = 547, // Returns 1 if camera connected and works properly.
CV_CAP_PROP_XI_ACQ_BUFFER_SIZE = 548, // Acquisition buffer size in buffer_size_unit. Default bytes.
CV_CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT = 549, // Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytes
CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE = 550, // Acquisition transport buffer size in bytes
CV_CAP_PROP_XI_BUFFERS_QUEUE_SIZE = 551, // Queue of field/frame buffers
CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT = 552, // Number of buffers to commit to low level
CV_CAP_PROP_XI_RECENT_FRAME = 553, // GetImage returns most recent frame
CV_CAP_PROP_XI_DEVICE_RESET = 554, // Resets the camera to default state.
CV_CAP_PROP_XI_COLUMN_FPN_CORRECTION = 555, // Correction of column FPN
CV_CAP_PROP_XI_ROW_FPN_CORRECTION = 591, // Correction of row FPN
CV_CAP_PROP_XI_SENSOR_MODE = 558, // Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling.
CV_CAP_PROP_XI_HDR = 559, // Enable High Dynamic Range feature.
CV_CAP_PROP_XI_HDR_KNEEPOINT_COUNT = 560, // The number of kneepoints in the PWLR.
CV_CAP_PROP_XI_HDR_T1 = 561, // position of first kneepoint(in % of XI_PRM_EXPOSURE)
CV_CAP_PROP_XI_HDR_T2 = 562, // position of second kneepoint (in % of XI_PRM_EXPOSURE)
CV_CAP_PROP_XI_KNEEPOINT1 = 563, // value of first kneepoint (% of sensor saturation)
CV_CAP_PROP_XI_KNEEPOINT2 = 564, // value of second kneepoint (% of sensor saturation)
CV_CAP_PROP_XI_IMAGE_BLACK_LEVEL = 565, // Last image black level counts. Can be used for Offline processing to recall it.
CV_CAP_PROP_XI_HW_REVISION = 571, // Returns hardware revision number.
CV_CAP_PROP_XI_DEBUG_LEVEL = 572, // Set debug level
CV_CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION = 573, // Automatic bandwidth calculation,
CV_CAP_PROP_XI_FFS_FILE_ID = 594, // File number.
CV_CAP_PROP_XI_FFS_FILE_SIZE = 580, // Size of file.
CV_CAP_PROP_XI_FREE_FFS_SIZE = 581, // Size of free camera FFS.
CV_CAP_PROP_XI_USED_FFS_SIZE = 582, // Size of used camera FFS.
CV_CAP_PROP_XI_FFS_ACCESS_KEY = 583, // Setting of key enables file operations on some cameras.
CV_CAP_PROP_XI_SENSOR_FEATURE_SELECTOR = 585, // Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE.
CV_CAP_PROP_XI_SENSOR_FEATURE_VALUE = 586, // Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR.
// Properties for Android cameras
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
// Properties of cameras available through AVFOUNDATION interface
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
// Intel PerC streams
CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
};
enum
{
// Data given from depth generator.
CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator.
CV_CAP_OPENNI_BGR_IMAGE = 5,
CV_CAP_OPENNI_GRAY_IMAGE = 6,
// Data given from IR image generator.
CV_CAP_OPENNI_IR_IMAGE = 7
};
// Supported output modes of OpenNI image generator
enum
{
CV_CAP_OPENNI_VGA_30HZ = 0,
CV_CAP_OPENNI_SXGA_15HZ = 1,
CV_CAP_OPENNI_SXGA_30HZ = 2,
CV_CAP_OPENNI_QVGA_30HZ = 3,
CV_CAP_OPENNI_QVGA_60HZ = 4
};
enum
{
CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
CV_CAP_INTELPERC_IMAGE = 3
};
// gPhoto2 properties, if propertyId is less than 0 then work on widget with that __additive inversed__ camera setting ID
// Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE.
// @see CvCaptureCAM_GPHOTO2 for more info
enum
{
CV_CAP_PROP_GPHOTO2_PREVIEW = 17001, // Capture only preview from liveview mode.
CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE = 17002, // Readonly, returns (const char *).
CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG = 17003, // Trigger, only by set. Reload camera settings.
CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE = 17004, // Reload all settings on set.
CV_CAP_PROP_GPHOTO2_COLLECT_MSGS = 17005, // Collect messages with details.
CV_CAP_PROP_GPHOTO2_FLUSH_MSGS = 17006, // Readonly, returns (const char *).
CV_CAP_PROP_SPEED = 17007, // Exposure speed. Can be readonly, depends on camera program.
CV_CAP_PROP_APERTURE = 17008, // Aperture. Can be readonly, depends on camera program.
CV_CAP_PROP_EXPOSUREPROGRAM = 17009, // Camera exposure program.
CV_CAP_PROP_VIEWFINDER = 17010 // Enter liveview mode.
};
//! Macro to construct the fourcc code of the codec. Same as CV_FOURCC()
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
/** @brief Constructs the fourcc code of the codec function
Simply call it with 4 chars fourcc code like `CV_FOURCC('I', 'Y', 'U', 'V')`
List of codes can be obtained at [Video Codecs by FOURCC](http://www.fourcc.org/codecs.php) page.
FFMPEG backend with MP4 container natively uses other values as fourcc code:
see [ObjectType](http://mp4ra.org/#/codecs).
*/
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return CV_FOURCC_MACRO(c1, c2, c3, c4);
}
//! (Windows only) Open Codec Selection Dialog
#define CV_FOURCC_PROMPT -1
//! (Linux only) Use default codec for specified filename
#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V')
#endif // OPENCV_VIDEOIO_LEGACY_CONSTANTS_H

View File

@ -0,0 +1,72 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_VIDEOIO_REGISTRY_HPP
#define OPENCV_VIDEOIO_REGISTRY_HPP
#include <opencv2/videoio.hpp>
namespace cv { namespace videoio_registry {
/** @addtogroup videoio_registry
This section contains API description how to query/configure available Video I/O backends.
Runtime configuration options:
- enable debug mode: `OPENCV_VIDEOIO_DEBUG=1`
- change backend priority: `OPENCV_VIDEOIO_PRIORITY_<backend>=9999`
- disable backend: `OPENCV_VIDEOIO_PRIORITY_<backend>=0`
- specify list of backends with high priority (>100000): `OPENCV_VIDEOIO_PRIORITY_LIST=FFMPEG,GSTREAMER`
@{
*/
/** @brief Returns backend API name or "UnknownVideoAPI(xxx)"
@param api backend ID (#VideoCaptureAPIs)
*/
CV_EXPORTS_W cv::String getBackendName(VideoCaptureAPIs api);
/** @brief Returns list of all available backends */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getBackends();
/** @brief Returns list of available backends which works via `cv::VideoCapture(int index)` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getCameraBackends();
/** @brief Returns list of available backends which works via `cv::VideoCapture(filename)` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getStreamBackends();
/** @brief Returns list of available backends which works via `cv::VideoWriter()` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getWriterBackends();
/** @brief Returns true if backend is available */
CV_EXPORTS_W bool hasBackend(VideoCaptureAPIs api);
/** @brief Returns true if backend is built in (false if backend is used as plugin) */
CV_EXPORTS_W bool isBackendBuiltIn(VideoCaptureAPIs api);
/** @brief Returns description and ABI/API version of videoio plugin's camera interface */
CV_EXPORTS_W std::string getCameraBackendPluginVersion(
VideoCaptureAPIs api,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
/** @brief Returns description and ABI/API version of videoio plugin's stream capture interface */
CV_EXPORTS_W std::string getStreamBackendPluginVersion(
VideoCaptureAPIs api,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
/** @brief Returns description and ABI/API version of videoio plugin's writer interface */
CV_EXPORTS_W std::string getWriterBackendPluginVersion(
VideoCaptureAPIs api,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
//! @}
}} // namespace
#endif // OPENCV_VIDEOIO_REGISTRY_HPP

View File

@ -0,0 +1,48 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/videoio.hpp"

View File

@ -0,0 +1,153 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOIO_H
#define OPENCV_VIDEOIO_H
#include "opencv2/core/core_c.h"
#include "opencv2/videoio/legacy/constants_c.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
@addtogroup videoio_c
@{
*/
/****************************************************************************************\
* Working with Video Files and Cameras *
\****************************************************************************************/
/** @brief "black box" capture structure
In C++ use cv::VideoCapture
*/
typedef struct CvCapture CvCapture;
/** @brief start capturing frames from video file
*/
CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
/** @brief start capturing frames from video file. allows specifying a preferred API to use
*/
CVAPI(CvCapture*) cvCreateFileCaptureWithPreference( const char* filename , int apiPreference);
/** @brief start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*)
*/
CVAPI(CvCapture*) cvCreateCameraCapture( int index );
/** @brief grab a frame, return 1 on success, 0 on fail.
this function is thought to be fast
*/
CVAPI(int) cvGrabFrame( CvCapture* capture );
/** @brief get the frame grabbed with cvGrabFrame(..)
This function may apply some frame processing like
frame decompression, flipping etc.
@warning !!!DO NOT RELEASE or MODIFY the retrieved frame!!!
*/
CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
/** @brief Just a combination of cvGrabFrame and cvRetrieveFrame
@warning !!!DO NOT RELEASE or MODIFY the retrieved frame!!!
*/
CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
/** @brief stop capturing/reading and free resources
*/
CVAPI(void) cvReleaseCapture( CvCapture** capture );
/** @brief retrieve capture properties
*/
CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
/** @brief set capture properties
*/
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
/** @brief Return the type of the capturer (eg, ::CV_CAP_VFW, ::CV_CAP_UNICAP)
It is unknown if created with ::CV_CAP_ANY
*/
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/** @brief "black box" video file writer structure
In C++ use cv::VideoWriter
*/
typedef struct CvVideoWriter CvVideoWriter;
/** @brief initialize video file writer
*/
CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color CV_DEFAULT(1));
/** @brief write frame to video file
*/
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
/** @brief close video file writer
*/
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
// ***************************************************************************************
//! @name Obsolete functions/synonyms
//! @{
#define cvCaptureFromCAM cvCreateCameraCapture //!< @deprecated use cvCreateCameraCapture() instead
#define cvCaptureFromFile cvCreateFileCapture //!< @deprecated use cvCreateFileCapture() instead
#define cvCaptureFromAVI cvCaptureFromFile //!< @deprecated use cvCreateFileCapture() instead
#define cvCreateAVIWriter cvCreateVideoWriter //!< @deprecated use cvCreateVideoWriter() instead
#define cvWriteToAVI cvWriteFrame //!< @deprecated use cvWriteFrame() instead
//! @} Obsolete...
//! @} videoio_c
#ifdef __cplusplus
}
#endif
#endif //OPENCV_VIDEOIO_H

View File

@ -0,0 +1,81 @@
#!/bin/bash
set -e
if [ -z $1 ] ; then
echo "$0 <destination directory>"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
OCV="$( cd "${DIR}/../../.." >/dev/null 2>&1 && pwd )"
mkdir -p "${1}" # Docker creates non-existed mounts with 'root' owner, lets ensure that dir exists under the current user to avoid "Permission denied" problem
DST="$( cd "$1" >/dev/null 2>&1 && pwd )"
CFG=$2
do_build()
{
TAG=$1
D=$2
F=$3
shift 3
docker build \
--build-arg http_proxy \
--build-arg https_proxy \
$@ \
-t $TAG \
-f "${D}/${F}" \
"${D}"
}
do_run()
{
TAG=$1
shift 1
docker run \
-it \
--rm \
-v "${OCV}":/opencv:ro \
-v "${DST}":/dst \
-e CFG=$CFG \
--user $(id -u):$(id -g) \
$TAG \
$@
}
build_gstreamer()
{
TAG=opencv_gstreamer_builder
do_build $TAG "${DIR}/plugin_gstreamer" Dockerfile
do_run $TAG /opencv/modules/videoio/misc/plugin_gstreamer/build.sh /dst $CFG
}
build_ffmpeg_ubuntu()
{
VER=$1
TAG=opencv_ffmpeg_ubuntu_builder:${VER}
do_build $TAG "${DIR}/plugin_ffmpeg" Dockerfile-ubuntu --build-arg VER=${VER}
do_run $TAG /opencv/modules/videoio/misc/plugin_ffmpeg/build-ubuntu.sh /dst ${VER} ${CFG}
}
build_ffmpeg()
{
VER=$1
TAG=opencv_ffmpeg_builder:${VER}
ARCHIVE="${DIR}/plugin_ffmpeg/ffmpeg-${VER}.tar.xz"
if [ ! -f "${ARCHIVE}" ] ; then
wget https://www.ffmpeg.org/releases/ffmpeg-${VER}.tar.xz -O "${ARCHIVE}"
fi
do_build $TAG "${DIR}/plugin_ffmpeg" Dockerfile-ffmpeg --build-arg VER=${VER}
do_run $TAG /opencv/modules/videoio/misc/plugin_ffmpeg/build-standalone.sh /dst ${VER} ${CFG}
}
echo "OpenCV: ${OCV}"
echo "Destination: ${DST}"
build_gstreamer
build_ffmpeg_ubuntu 18.04
build_ffmpeg_ubuntu 16.04
build_ffmpeg 4.1
build_ffmpeg 3.4.5
build_ffmpeg 2.8.15

View File

@ -0,0 +1,63 @@
{
"const_ignore_list": [
"CV_CAP_OPENNI",
"CV_CAP_OPENNI2",
"CV_CAP_PROP_OPENNI_",
"CV_CAP_INTELPERC",
"CV_CAP_PROP_INTELPERC_",
"CV_CAP_ANY",
"CV_CAP_MIL",
"CV_CAP_VFW",
"CV_CAP_V4L",
"CV_CAP_V4L2",
"CV_CAP_FIREWARE",
"CV_CAP_FIREWIRE",
"CV_CAP_IEEE1394",
"CV_CAP_DC1394",
"CV_CAP_CMU1394",
"CV_CAP_STEREO",
"CV_CAP_TYZX",
"CV_CAP_QT",
"CV_CAP_UNICAP",
"CV_CAP_DSHOW",
"CV_CAP_PVAPI",
"CV_CAP_ARAVIS",
"CV_CAP_PROP_DC1394_OFF",
"CV_CAP_PROP_DC1394_MODE_MANUAL",
"CV_CAP_PROP_DC1394_MODE_AUTO",
"CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO",
"CV_CAP_PROP_POS_MSEC",
"CV_CAP_PROP_POS_FRAMES",
"CV_CAP_PROP_POS_AVI_RATIO",
"CV_CAP_PROP_FPS",
"CV_CAP_PROP_FOURCC",
"CV_CAP_PROP_FRAME_COUNT",
"CV_CAP_PROP_FORMAT",
"CV_CAP_PROP_MODE",
"CV_CAP_PROP_BRIGHTNESS",
"CV_CAP_PROP_CONTRAST",
"CV_CAP_PROP_SATURATION",
"CV_CAP_PROP_HUE",
"CV_CAP_PROP_GAIN",
"CV_CAP_PROP_EXPOSURE",
"CV_CAP_PROP_CONVERT_RGB",
"CV_CAP_PROP_WHITE_BALANCE_BLUE_U",
"CV_CAP_PROP_RECTIFICATION",
"CV_CAP_PROP_MONOCHROME",
"CV_CAP_PROP_SHARPNESS",
"CV_CAP_PROP_AUTO_EXPOSURE",
"CV_CAP_PROP_GAMMA",
"CV_CAP_PROP_TEMPERATURE",
"CV_CAP_PROP_TRIGGER",
"CV_CAP_PROP_TRIGGER_DELAY",
"CV_CAP_PROP_WHITE_BALANCE_RED_V",
"CV_CAP_PROP_MAX_DC1394",
"CV_CAP_GSTREAMER_QUEUE_LENGTH",
"CV_CAP_PROP_PVAPI_MULTICASTIP",
"CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING",
"CV_TYZX_LEFT",
"CV_TYZX_RIGHT",
"CV_TYZX_COLOR",
"CV_TYZX_Z"
]
}

View File

@ -0,0 +1,64 @@
package org.opencv.test.videoio;
import java.util.List;
import org.opencv.core.Size;
import org.opencv.videoio.Videoio;
import org.opencv.videoio.VideoCapture;
import org.opencv.test.OpenCVTestCase;
public class VideoCaptureTest extends OpenCVTestCase {
private VideoCapture capture;
private boolean isOpened;
private boolean isSucceed;
@Override
protected void setUp() throws Exception {
super.setUp();
capture = null;
isTestCaseEnabled = false;
isSucceed = false;
isOpened = false;
}
public void testGrab() {
capture = new VideoCapture();
isSucceed = capture.grab();
assertFalse(isSucceed);
}
public void testIsOpened() {
capture = new VideoCapture();
assertFalse(capture.isOpened());
}
public void testDefaultConstructor() {
capture = new VideoCapture();
assertNotNull(capture);
assertFalse(capture.isOpened());
}
public void testConstructorWithFilename() {
capture = new VideoCapture("some_file.avi");
assertNotNull(capture);
}
public void testConstructorWithFilenameAndExplicitlySpecifiedAPI() {
capture = new VideoCapture("some_file.avi", Videoio.CAP_ANY);
assertNotNull(capture);
}
public void testConstructorWithIndex() {
capture = new VideoCapture(0);
assertNotNull(capture);
}
public void testConstructorWithIndexAndExplicitlySpecifiedAPI() {
capture = new VideoCapture(0, Videoio.CAP_ANY);
assertNotNull(capture);
}
}

View File

@ -0,0 +1,20 @@
{
"AdditionalImports" : {
"Videoio" :
[ "\"videoio/registry.hpp\"" ]
},
"ManualFuncs" : {
"VideoCapture" : {
"release" : {"declaration" : [""], "implementation" : [""] }
},
"VideoWriter" : {
"release" : {"declaration" : [""], "implementation" : [""] }
}
},
"func_arg_fix" : {
"VideoCapture" : {
"(BOOL)open:(int)index apiPreference:(int)apiPreference" : { "open" : {"name" : "openWithIndex"} },
"(BOOL)open:(int)index apiPreference:(int)apiPreference params:(IntVector*)params" : { "open" : {"name" : "openWithIndexAndParameters"} }
}
}
}

View File

@ -0,0 +1,445 @@
//
// CvAbstractCamera2.mm
//
// Created by Giles Payne on 2020/04/01.
//
#import "CvCamera2.h"
#pragma mark - Private Interface
@interface CvAbstractCamera2 ()
@property (nonatomic, strong) AVCaptureVideoPreviewLayer* captureVideoPreviewLayer;
- (void)deviceOrientationDidChange:(NSNotification*)notification;
- (void)startCaptureSession;
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
- (void)updateSize;
@end
#pragma mark - Implementation
@implementation CvAbstractCamera2
#pragma mark - Constructors
- (id)init;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
self.currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
self.cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (self.cameraAvailable ? @"YES" : @"NO") );
_running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
self.parentView = nil;
self.useAVCaptureVideoPreviewLayer = NO;
}
return self;
}
- (id)initWithParentView:(UIView*)parent;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
self.currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
self.cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (self.cameraAvailable ? @"YES" : @"NO") );
_running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset640x480;
self.parentView = parent;
self.useAVCaptureVideoPreviewLayer = YES;
}
return self;
}
- (void)dealloc;
{
[[NSNotificationCenter defaultCenter] removeObserver:self];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
}
#pragma mark - Public interface
- (void)start;
{
if (![NSThread isMainThread]) {
NSLog(@"[Camera] Warning: Call start only from main thread");
[self performSelectorOnMainThread:@selector(start) withObject:nil waitUntilDone:NO];
return;
}
if (self.running == YES) {
return;
}
_running = YES;
// TODO: update image size data before actually starting (needed for recording)
[self updateSize];
if (self.cameraAvailable) {
[self startCaptureSession];
}
}
- (void)pause;
{
_running = NO;
[self.captureSession stopRunning];
}
- (void)stop;
{
_running = NO;
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
if (self.captureSession) {
for (AVCaptureInput *input in self.captureSession.inputs) {
[self.captureSession removeInput:input];
}
for (AVCaptureOutput *output in self.captureSession.outputs) {
[self.captureSession removeOutput:output];
}
[self.captureSession stopRunning];
}
_captureSessionLoaded = NO;
}
// use front/back camera
- (void)switchCameras;
{
BOOL was_running = self.running;
if (was_running) {
[self stop];
}
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
} else {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
}
if (was_running) {
[self start];
}
}
#pragma mark - Device Orientation Changes
- (void)deviceOrientationDidChange:(NSNotification*)notification
{
(void)notification;
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
switch (orientation)
{
case UIDeviceOrientationPortrait:
case UIDeviceOrientationPortraitUpsideDown:
case UIDeviceOrientationLandscapeLeft:
case UIDeviceOrientationLandscapeRight:
self.currentDeviceOrientation = orientation;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break;
}
NSLog(@"deviceOrientationDidChange: %d", (int)orientation);
[self updateOrientation];
}
#pragma mark - Private Interface
- (void)createCaptureSession;
{
// set a av capture session preset
self.captureSession = [[AVCaptureSession alloc] init];
if ([self.captureSession canSetSessionPreset:self.defaultAVCaptureSessionPreset]) {
[self.captureSession setSessionPreset:self.defaultAVCaptureSessionPreset];
} else if ([self.captureSession canSetSessionPreset:AVCaptureSessionPresetLow]) {
[self.captureSession setSessionPreset:AVCaptureSessionPresetLow];
} else {
NSLog(@"[Camera] Error: could not set session preset");
}
}
- (void)createCaptureDevice;
{
// setup the device
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
[self setDesiredCameraPosition:self.defaultAVCaptureDevicePosition];
NSLog(@"[Camera] device connected? %@", device.connected ? @"YES" : @"NO");
NSLog(@"[Camera] device position %@", (device.position == AVCaptureDevicePositionBack) ? @"back" : @"front");
}
- (void)createVideoPreviewLayer;
{
self.captureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
if ([self.captureVideoPreviewLayer.connection isVideoOrientationSupported])
{
[self.captureVideoPreviewLayer.connection setVideoOrientation:self.defaultAVCaptureVideoOrientation];
}
if (self.parentView != nil) {
self.captureVideoPreviewLayer.frame = self.parentView.bounds;
self.captureVideoPreviewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.parentView.layer addSublayer:self.captureVideoPreviewLayer];
}
NSLog(@"[Camera] created AVCaptureVideoPreviewLayer");
}
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
{
for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if ([device position] == desiredPosition) {
[self.captureSession beginConfiguration];
NSError* error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if (!input) {
NSLog(@"error creating input %@", [error description]);
}
// support for autofocus
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
}
}
[self.captureSession addInput:input];
for (AVCaptureInput *oldInput in self.captureSession.inputs) {
[self.captureSession removeInput:oldInput];
}
[self.captureSession addInput:input];
[self.captureSession commitConfiguration];
break;
}
}
}
- (void)startCaptureSession
{
if (!self.cameraAvailable) {
return;
}
if (self.captureSessionLoaded == NO) {
[self createCaptureSession];
[self createCaptureDevice];
[self createCaptureOutput];
// setup preview layer
if (self.useAVCaptureVideoPreviewLayer) {
[self createVideoPreviewLayer];
} else {
[self createCustomVideoPreview];
}
_captureSessionLoaded = YES;
}
[self.captureSession startRunning];
}
- (void)createCaptureOutput;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %s in a subclass", __FUNCTION__];
}
- (void)createCustomVideoPreview;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %s in a subclass", __FUNCTION__];
}
- (void)updateOrientation;
{
// nothing to do here
}
- (void)updateSize;
{
if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetPhoto]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetHigh]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetMedium]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetLow]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset352x288]) {
self.imageWidth = 352;
self.imageHeight = 288;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset640x480]) {
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset1280x720]) {
self.imageWidth = 1280;
self.imageHeight = 720;
} else {
self.imageWidth = 640;
self.imageHeight = 480;
}
}
- (void)lockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked focus configuration %@", [error description]);
}
}
}
- (void) unlockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
}
}
}
- (void)lockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked exposure configuration %@", [error description]);
}
}
}
- (void) unlockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autoexposure configuration %@", [error description]);
}
}
}
- (void)lockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked white balance configuration %@", [error description]);
}
}
}
- (void) unlockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for auto white balance configuration %@", [error description]);
}
}
}
@end

View File

@ -0,0 +1,85 @@
//
// CvCamera2.h
//
// Created by Giles Payne on 2020/03/11.
//
#import <UIKit/UIKit.h>
#import <Accelerate/Accelerate.h>
#import <AVFoundation/AVFoundation.h>
#import <ImageIO/ImageIO.h>
#import "CVObjcUtil.h"
@class Mat;
@class CvAbstractCamera2;
CV_EXPORTS @interface CvAbstractCamera2 : NSObject
@property UIDeviceOrientation currentDeviceOrientation;
@property BOOL cameraAvailable;
@property (nonatomic, strong) AVCaptureSession* captureSession;
@property (nonatomic, strong) AVCaptureConnection* videoCaptureConnection;
@property (nonatomic, readonly) BOOL running;
@property (nonatomic, readonly) BOOL captureSessionLoaded;
@property (nonatomic, assign) int defaultFPS;
@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;
@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;
@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;
@property (nonatomic, assign) int imageWidth;
@property (nonatomic, assign) int imageHeight;
@property (nonatomic, strong) UIView* parentView;
- (void)start;
- (void)stop;
- (void)switchCameras;
- (id)initWithParentView:(UIView*)parent;
- (void)createCaptureOutput;
- (void)createVideoPreviewLayer;
- (void)updateOrientation;
- (void)lockFocus;
- (void)unlockFocus;
- (void)lockExposure;
- (void)unlockExposure;
- (void)lockBalance;
- (void)unlockBalance;
@end
///////////////////////////////// CvVideoCamera ///////////////////////////////////////////
@class CvVideoCamera2;
@protocol CvVideoCameraDelegate2 <NSObject>
- (void)processImage:(Mat*)image;
@end
CV_EXPORTS @interface CvVideoCamera2 : CvAbstractCamera2<AVCaptureVideoDataOutputSampleBufferDelegate>
@property (nonatomic, weak) id<CvVideoCameraDelegate2> delegate;
@property (nonatomic, assign) BOOL grayscaleMode;
@property (nonatomic, assign) BOOL recordVideo;
@property (nonatomic, assign) BOOL rotateVideo;
@property (nonatomic, strong) AVAssetWriterInput* recordAssetWriterInput;
@property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
@property (nonatomic, strong) AVAssetWriter* recordAssetWriter;
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
- (void)layoutPreviewLayer;
- (void)saveVideo;
- (NSURL *)videoFileURL;
- (NSString *)videoFileString;
@end
///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////
@class CvPhotoCamera2;
@protocol CvPhotoCameraDelegate2 <NSObject>
- (void)photoCamera:(CvPhotoCamera2*)photoCamera capturedImage:(UIImage*)image;
- (void)photoCameraCancel:(CvPhotoCamera2*)photoCamera;
@end
CV_EXPORTS @interface CvPhotoCamera2 : CvAbstractCamera2<AVCapturePhotoCaptureDelegate>
@property (nonatomic, weak) id<CvPhotoCameraDelegate2> delegate;
- (void)takePicture;
@end

View File

@ -0,0 +1,138 @@
//
// CvPhotoCamera2.mm
//
// Created by Giles Payne on 2020/04/01.
//
#import "CvCamera2.h"
#pragma mark - Private Interface
@interface CvPhotoCamera2 ()
{
id<CvPhotoCameraDelegate2> _delegate;
}
@property (nonatomic, strong) AVCaptureStillImageOutput* stillImageOutput;
@end
#pragma mark - Implementation
@implementation CvPhotoCamera2
#pragma mark Public
- (void)setDelegate:(id<CvPhotoCameraDelegate2>)newDelegate {
_delegate = newDelegate;
}
- (id<CvPhotoCameraDelegate2>)delegate {
return _delegate;
}
#pragma mark - Public interface
- (void)takePicture
{
if (self.cameraAvailable == NO) {
return;
}
self.cameraAvailable = NO;
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:self.videoCaptureConnection
completionHandler:
^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
if (error == nil && imageSampleBuffer != NULL)
{
// TODO check
// NSNumber* imageOrientation = [UIImage cgImageOrientationForUIDeviceOrientation:currentDeviceOrientation];
// CMSetAttachment(imageSampleBuffer, kCGImagePropertyOrientation, imageOrientation, 1);
NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
[self.captureSession stopRunning];
// Make sure we create objects on the main thread in the main context
UIImage* newImage = [UIImage imageWithData:jpegData];
//UIImageOrientation orientation = [newImage imageOrientation];
// TODO: only apply rotation, don't scale, since we can set this directly in the camera
/*
switch (orientation) {
case UIImageOrientationUp:
case UIImageOrientationDown:
newImage = [newImage imageWithAppliedRotationAndMaxSize:CGSizeMake(640.0, 480.0)];
break;
case UIImageOrientationLeft:
case UIImageOrientationRight:
newImage = [newImage imageWithMaxSize:CGSizeMake(640.0, 480.0)];
default:
break;
}
*/
// We have captured the image, we can allow the user to take another picture
self.cameraAvailable = YES;
NSLog(@"CvPhotoCamera2 captured image");
[self.delegate photoCamera:self capturedImage:newImage];
[self.captureSession startRunning];
});
}
}];
}
- (void)stop;
{
[super stop];
self.stillImageOutput = nil;
}
#pragma mark - Private Interface
- (void)createStillImageOutput;
{
// setup still image output with jpeg codec
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = [NSDictionary dictionaryWithObjectsAndKeys:AVVideoCodecJPEG, AVVideoCodecKey, nil];
[self.stillImageOutput setOutputSettings:outputSettings];
[self.captureSession addOutput:self.stillImageOutput];
for (AVCaptureConnection *connection in self.stillImageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([port.mediaType isEqual:AVMediaTypeVideo]) {
self.videoCaptureConnection = connection;
break;
}
}
if (self.videoCaptureConnection) {
break;
}
}
NSLog(@"[Camera] still image output created");
}
- (void)createCaptureOutput;
{
[self createStillImageOutput];
}
- (void)createCustomVideoPreview;
{
//do nothing, always use AVCaptureVideoPreviewLayer
}
@end

View File

@ -0,0 +1,575 @@
//
// CvVideoCamera2.mm
//
// Created by Giles Payne on 2020/03/11.
//
#import "Mat.h"
#import "CvCamera2.h"
#import <UIKit/UIKit.h>
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}
#pragma mark - Private Interface
@interface CvVideoCamera2 () {
int recordingCountDown;
}
- (void)createVideoDataOutput;
- (void)createVideoFileOutput;
@property (nonatomic, strong) CALayer *customPreviewLayer;
@property (nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
@end
#pragma mark - Implementation
@implementation CvVideoCamera2
{
id<CvVideoCameraDelegate2> _delegate;
dispatch_queue_t videoDataOutputQueue;
CMTime lastSampleTime;
}
- (void)setDelegate:(id<CvVideoCameraDelegate2>)newDelegate {
_delegate = newDelegate;
}
- (id<CvVideoCameraDelegate2>)delegate {
return _delegate;
}
#pragma mark - Constructors
- (id)initWithParentView:(UIView*)parent {
self = [super initWithParentView:parent];
if (self) {
parent.contentMode = UIViewContentModeScaleAspectFill;
self.useAVCaptureVideoPreviewLayer = NO;
self.recordVideo = NO;
self.rotateVideo = NO;
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPresetHigh;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
self.defaultFPS = 30;
self.grayscaleMode = NO;
}
return self;
}
#pragma mark - Public interface
- (void)start {
if (self.running == YES) {
return;
}
recordingCountDown = 10;
[super start];
if (self.recordVideo == YES) {
NSError* error = nil;
if ([[NSFileManager defaultManager] fileExistsAtPath:[self videoFileString]]) {
[[NSFileManager defaultManager] removeItemAtPath:[self videoFileString] error:&error];
}
if (error == nil) {
NSLog(@"[Camera] Delete file %@", [self videoFileString]);
}
}
}
- (void)stop {
if (self.running == YES) {
[super stop];
if (self.recordVideo == YES) {
if (self.recordAssetWriter) {
if (self.recordAssetWriter.status == AVAssetWriterStatusWriting) {
[self.recordAssetWriter finishWritingWithCompletionHandler:^void() {
NSLog(@"[Camera] recording stopped");
}];
} else {
NSLog(@"[Camera] Recording Error: asset writer status is not writing");
}
}
}
if (self.customPreviewLayer) {
[self.customPreviewLayer removeFromSuperlayer];
self.customPreviewLayer = nil;
}
}
}
// TODO fix
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation {
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (interfaceOrientation) {
case UIInterfaceOrientationPortrait:
NSLog(@"to Portrait");
rotation_angle = 270;
break;
case UIInterfaceOrientationPortraitUpsideDown:
rotation_angle = 90;
NSLog(@"to UpsideDown");
break;
case UIInterfaceOrientationLandscapeLeft:
rotation_angle = 0;
NSLog(@"to LandscapeLeft");
break;
case UIInterfaceOrientationLandscapeRight:
rotation_angle = 180;
NSLog(@"to LandscapeRight");
break;
default:
break; // leave the layer in its last known orientation
}
switch (self.defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
// TODO fix
- (void)layoutPreviewLayer {
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (self.currentDeviceOrientation) {
case UIDeviceOrientationPortrait:
rotation_angle = 270;
break;
case UIDeviceOrientationPortraitUpsideDown:
rotation_angle = 90;
break;
case UIDeviceOrientationLandscapeLeft:
NSLog(@"left");
rotation_angle = 180;
break;
case UIDeviceOrientationLandscapeRight:
NSLog(@"right");
rotation_angle = 0;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break; // leave the layer in its last known orientation
}
switch (self.defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
#pragma mark - Private Interface
- (void)createVideoDataOutput {
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
// In grayscale mode we want YUV (YpCbCr 4:2:0) so we can directly access the graylevel intensity values (Y component)
// In color mode we, BGRA format is used
OSType format = self.grayscaleMode ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
// set default FPS
AVCaptureDeviceInput *currentInput = [self.captureSession.inputs objectAtIndex:0];
AVCaptureDevice *device = currentInput.device;
NSError *error = nil;
[device lockForConfiguration:&error];
float maxRate = ((AVFrameRateRange*) [device.activeFormat.videoSupportedFrameRateRanges objectAtIndex:0]).maxFrameRate;
if (maxRate > self.defaultFPS - 1 && error == nil) {
[device setActiveVideoMinFrameDuration:CMTimeMake(1, self.defaultFPS)];
[device setActiveVideoMaxFrameDuration:CMTimeMake(1, self.defaultFPS)];
NSLog(@"[Camera] FPS set to %d", self.defaultFPS);
} else {
NSLog(@"[Camera] unable to set defaultFPS at %d FPS, max is %f FPS", self.defaultFPS, maxRate);
}
if (error != nil) {
NSLog(@"[Camera] unable to set defaultFPS: %@", error);
}
[device unlockForConfiguration];
// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}
// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}
// create a custom preview layer
self.customPreviewLayer = [CALayer layer];
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
[self updateOrientation];
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
NSLog(@"[Camera] created AVCaptureVideoDataOutput");
}
- (void)createVideoFileOutput {
/* Video File Output in H.264, via AVAsserWriter */
NSLog(@"Create Video with dimensions %dx%d", self.imageWidth, self.imageHeight);
NSDictionary *outputSettings
= [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:self.imageWidth], AVVideoWidthKey,
[NSNumber numberWithInt:self.imageHeight], AVVideoHeightKey,
AVVideoCodecH264, AVVideoCodecKey,
nil
];
self.recordAssetWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings];
int pixelBufferFormat = (self.grayscaleMode == YES) ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.recordPixelBufferAdaptor =
[[AVAssetWriterInputPixelBufferAdaptor alloc]
initWithAssetWriterInput:self.recordAssetWriterInput
sourcePixelBufferAttributes:[NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:pixelBufferFormat], kCVPixelBufferPixelFormatTypeKey, nil]];
NSError* error = nil;
NSLog(@"Create AVAssetWriter with url: %@", [self videoFileURL]);
self.recordAssetWriter = [AVAssetWriter assetWriterWithURL:[self videoFileURL]
fileType:AVFileTypeMPEG4
error:&error];
if (error != nil) {
NSLog(@"[Camera] Unable to create AVAssetWriter: %@", error);
}
[self.recordAssetWriter addInput:self.recordAssetWriterInput];
self.recordAssetWriterInput.expectsMediaDataInRealTime = YES;
NSLog(@"[Camera] created AVAssetWriter");
}
- (void)createCaptureOutput {
[self createVideoDataOutput];
if (self.recordVideo == YES) {
[self createVideoFileOutput];
}
}
- (void)createCustomVideoPreview {
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image {
CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image));
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:NO], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameSize.width,
frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options),
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width,
frameSize.height, 8, 4*frameSize.width, rgbColorSpace,
kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
#pragma mark - Protocol AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
auto strongDelegate = self.delegate;
if (strongDelegate) {
// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;
CGColorSpaceRef colorSpace;
CGContextRef context;
int format_opencv;
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
format_opencv = CV_8UC1;
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
} else { // expect kCVPixelFormatType_32BGRA
format_opencv = CV_8UC4;
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
}
// delegate image processing to the delegate
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
CGImage* dstImage;
if ([strongDelegate respondsToSelector:@selector(processImage:)]) {
[strongDelegate processImage:[Mat fromNative:image]];
}
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;
// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}
if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);
CGDataProviderRelease(provider);
}
// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});
recordingCountDown--;
if (self.recordVideo == YES && recordingCountDown < 0) {
lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
// CMTimeShow(lastSampleTime);
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
[self.recordAssetWriter startWriting];
[self.recordAssetWriter startSessionAtSourceTime:lastSampleTime];
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
NSLog(@"[Camera] Recording Error: asset writer status is not writing: %@", self.recordAssetWriter.error);
return;
} else {
NSLog(@"[Camera] Video recording started");
}
}
if (self.recordAssetWriterInput.readyForMoreMediaData) {
CVImageBufferRef pixelBuffer = [self pixelBufferFromCGImage:dstImage];
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:pixelBuffer
withPresentationTime:lastSampleTime] ) {
NSLog(@"Video Writing Error");
}
if (pixelBuffer != nullptr)
CVPixelBufferRelease(pixelBuffer);
}
}
// cleanup
CGImageRelease(dstImage);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}
- (void)updateOrientation {
if (self.rotateVideo == YES)
{
NSLog(@"rotate..");
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
}
}
- (void)saveVideo {
if (self.recordVideo == NO) {
return;
}
UISaveVideoAtPathToSavedPhotosAlbum([self videoFileString], nil, nil, NULL);
}
- (NSURL *)videoFileURL {
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
NSURL *outputURL = [NSURL fileURLWithPath:outputPath];
NSFileManager *fileManager = [NSFileManager defaultManager];
if ([fileManager fileExistsAtPath:outputPath]) {
NSLog(@"file exists");
}
return outputURL;
}
- (NSString *)videoFileString {
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
return outputPath;
}
@end

View File

@ -0,0 +1,18 @@
cmake_minimum_required(VERSION 3.5)
get_filename_component(OpenCV_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../.." ABSOLUTE)
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVPluginStandalone.cmake")
# scan dependencies
set(WITH_FFMPEG ON)
set(OPENCV_FFMPEG_SKIP_BUILD_CHECK ON)
include("${OpenCV_SOURCE_DIR}/modules/videoio/cmake/init.cmake")
set(OPENCV_PLUGIN_DEPS core imgproc imgcodecs)
ocv_create_plugin(videoio "opencv_videoio_ffmpeg" "ocv.3rdparty.ffmpeg" "FFmpeg" "src/cap_ffmpeg.cpp")
message(STATUS "FFMPEG_libavcodec_VERSION=${FFMPEG_libavcodec_VERSION}")
message(STATUS "FFMPEG_libavformat_VERSION=${FFMPEG_libavformat_VERSION}")
message(STATUS "FFMPEG_libavutil_VERSION=${FFMPEG_libavutil_VERSION}")
message(STATUS "FFMPEG_libswscale_VERSION=${FFMPEG_libswscale_VERSION}")
message(STATUS "FFMPEG_libavresample_VERSION=${FFMPEG_libavresample_VERSION}")

View File

@ -0,0 +1,45 @@
FROM ubuntu:18.04
RUN apt-get update && apt-get --no-install-recommends install -y \
pkg-config \
cmake \
g++ \
ninja-build \
make \
nasm \
&& \
rm -rf /var/lib/apt/lists/*
ARG VER
ADD ffmpeg-${VER}.tar.xz /ffmpeg/
WORKDIR /ffmpeg/ffmpeg-${VER}
RUN ./configure \
--enable-avresample \
--prefix=/ffmpeg-shared \
--enable-shared \
--disable-static \
--disable-programs \
--disable-doc \
--disable-avdevice \
--disable-postproc \
&& make -j8 install \
&& make clean \
&& make distclean
RUN ./configure \
--enable-avresample \
--prefix=/ffmpeg-static \
--disable-shared \
--enable-static \
--enable-pic \
--disable-programs \
--disable-doc \
--disable-avdevice \
--disable-postproc \
&& make -j8 install \
&& make clean \
&& make distclean
WORKDIR /tmp

View File

@ -0,0 +1,17 @@
ARG VER
FROM ubuntu:$VER
RUN apt-get update && apt-get --no-install-recommends install -y \
libavcodec-dev \
libavfilter-dev \
libavformat-dev \
libavresample-dev \
libavutil-dev \
pkg-config \
cmake \
g++ \
ninja-build \
&& \
rm -rf /var/lib/apt/lists/*
WORKDIR /tmp

View File

@ -0,0 +1,24 @@
#!/bin/bash
set -e
mkdir -p build_shared && pushd build_shared
PKG_CONFIG_PATH=/ffmpeg-shared/lib/pkgconfig \
cmake -GNinja \
-DOPENCV_PLUGIN_NAME=opencv_videoio_ffmpeg_shared_$2 \
-DOPENCV_PLUGIN_DESTINATION=$1 \
-DCMAKE_BUILD_TYPE=$3 \
/opencv/modules/videoio/misc/plugin_ffmpeg
ninja
popd
mkdir -p build_static && pushd build_static
PKG_CONFIG_PATH=/ffmpeg-static/lib/pkgconfig \
cmake -GNinja \
-DOPENCV_PLUGIN_NAME=opencv_videoio_ffmpeg_static_$2 \
-DOPENCV_PLUGIN_DESTINATION=$1 \
-DCMAKE_MODULE_LINKER_FLAGS=-Wl,-Bsymbolic \
-DCMAKE_BUILD_TYPE=$3 \
/opencv/modules/videoio/misc/plugin_ffmpeg
ninja
popd

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -e
cmake -GNinja \
-DOPENCV_PLUGIN_NAME=opencv_videoio_ffmpeg_ubuntu_$2 \
-DOPENCV_PLUGIN_DESTINATION=$1 \
-DCMAKE_BUILD_TYPE=$3 \
/opencv/modules/videoio/misc/plugin_ffmpeg
ninja

View File

@ -0,0 +1,13 @@
cmake_minimum_required(VERSION 3.5)
get_filename_component(OpenCV_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../.." ABSOLUTE)
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVPluginStandalone.cmake")
# scan dependencies
set(WITH_GSTREAMER ON)
include("${OpenCV_SOURCE_DIR}/modules/videoio/cmake/init.cmake")
set(OPENCV_PLUGIN_DEPS core imgproc imgcodecs)
ocv_create_plugin(videoio "opencv_videoio_gstreamer" "ocv.3rdparty.gstreamer" "GStreamer" "src/cap_gstreamer.cpp")
message(STATUS "Using GStreamer: ${GSTREAMER_VERSION}")

View File

@ -0,0 +1,13 @@
FROM ubuntu:18.04
RUN apt-get update && apt-get --no-install-recommends install -y \
libgstreamer-plugins-base1.0-dev \
libgstreamer-plugins-good1.0-dev \
libgstreamer1.0-dev \
cmake \
g++ \
ninja-build \
&& \
rm -rf /var/lib/apt/lists/*
WORKDIR /tmp

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -e
cmake -GNinja \
-DOPENCV_PLUGIN_NAME=opencv_videoio_gstreamer \
-DOPENCV_PLUGIN_DESTINATION=$1 \
-DCMAKE_BUILD_TYPE=$2 \
/opencv/modules/videoio/misc/plugin_gstreamer
ninja

View File

@ -0,0 +1,23 @@
#ifdef HAVE_OPENCV_VIDEOIO
typedef std::vector<VideoCaptureAPIs> vector_VideoCaptureAPIs;
template<> struct pyopencvVecConverter<cv::VideoCaptureAPIs>
{
static bool to(PyObject* obj, std::vector<cv::VideoCaptureAPIs>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<cv::VideoCaptureAPIs>& value)
{
return pyopencv_from_generic_vec(value);
}
};
template<>
bool pyopencv_to(PyObject *o, std::vector<cv::VideoCaptureAPIs>& apis, const ArgInfo& info)
{
return pyopencvVecConverter<cv::VideoCaptureAPIs>::to(o, apis, info);
}
#endif // HAVE_OPENCV_VIDEOIO

View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class Bindings(NewOpenCVTests):
def check_name(self, name):
#print(name)
self.assertFalse(name == None)
self.assertFalse(name == "")
def test_registry(self):
self.check_name(cv.videoio_registry.getBackendName(cv.CAP_ANY));
self.check_name(cv.videoio_registry.getBackendName(cv.CAP_FFMPEG))
self.check_name(cv.videoio_registry.getBackendName(cv.CAP_OPENCV_MJPEG))
backends = cv.videoio_registry.getBackends()
for backend in backends:
self.check_name(cv.videoio_registry.getBackendName(backend))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,76 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// Not a standalone header.
#include <opencv2/core/utils/configuration.private.hpp>
namespace opencv_test {
using namespace perf;
static
utils::Paths getTestCameras()
{
static utils::Paths cameras = utils::getConfigurationParameterPaths("OPENCV_TEST_PERF_CAMERA_LIST");
return cameras;
}
PERF_TEST(VideoCapture_Camera, waitAny_V4L)
{
auto cameraNames = getTestCameras();
if (cameraNames.empty())
throw SkipTestException("No list of tested cameras. Use OPENCV_TEST_PERF_CAMERA_LIST parameter");
const int totalFrames = 50; // number of expected frames (summary for all cameras)
const int64 timeoutNS = 100 * 1000000;
const Size frameSize(640, 480);
const int fpsDefaultEven = 30;
const int fpsDefaultOdd = 15;
std::vector<VideoCapture> cameras;
for (size_t i = 0; i < cameraNames.size(); ++i)
{
const auto& name = cameraNames[i];
int fps = (int)utils::getConfigurationParameterSizeT(cv::format("OPENCV_TEST_CAMERA%d_FPS", (int)i).c_str(), (i & 1) ? fpsDefaultOdd : fpsDefaultEven);
std::cout << "Camera[" << i << "] = '" << name << "', fps=" << fps << std::endl;
VideoCapture cap(name, CAP_V4L);
ASSERT_TRUE(cap.isOpened()) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FRAME_WIDTH, frameSize.width)) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FRAME_HEIGHT, frameSize.height)) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FPS, fps)) << name;
//launch cameras
Mat firstFrame;
EXPECT_TRUE(cap.read(firstFrame));
EXPECT_EQ(frameSize.width, firstFrame.cols);
EXPECT_EQ(frameSize.height, firstFrame.rows);
cameras.push_back(cap);
}
TEST_CYCLE()
{
int counter = 0;
std::vector<int> cameraReady;
do
{
EXPECT_TRUE(VideoCapture::waitAny(cameras, cameraReady, timeoutNS));
EXPECT_FALSE(cameraReady.empty());
for (int idx : cameraReady)
{
VideoCapture& c = cameras[idx];
Mat frame;
ASSERT_TRUE(c.retrieve(frame));
EXPECT_EQ(frameSize.width, frame.cols);
EXPECT_EQ(frameSize.height, frame.rows);
++counter;
}
}
while(counter < totalFrames);
}
SANITY_CHECK_NOTHING();
}
} // namespace

View File

@ -0,0 +1,37 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "perf_precomp.hpp"
#include "perf_camera.impl.hpp"
namespace opencv_test
{
using namespace perf;
typedef perf::TestBaseWithParam<std::string> VideoCapture_Reading;
const string bunny_files[] = {
"highgui/video/big_buck_bunny.avi",
"highgui/video/big_buck_bunny.mov",
"highgui/video/big_buck_bunny.mp4",
#ifndef HAVE_MSMF
// MPEG2 is not supported by Media Foundation yet
// http://social.msdn.microsoft.com/Forums/en-US/mediafoundationdevelopment/thread/39a36231-8c01-40af-9af5-3c105d684429
"highgui/video/big_buck_bunny.mpg",
#endif
"highgui/video/big_buck_bunny.wmv"
};
PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::ValuesIn(bunny_files) )
{
string filename = getDataPath(GetParam());
VideoCapture cap;
TEST_CYCLE() cap.open(filename);
SANITY_CHECK_NOTHING();
}
} // namespace

View File

@ -0,0 +1,10 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "perf_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_PERF_TEST_MAIN(videoio)

View File

@ -0,0 +1,47 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "perf_precomp.hpp"
namespace opencv_test
{
using namespace perf;
typedef tuple<std::string, bool> VideoWriter_Writing_t;
typedef perf::TestBaseWithParam<VideoWriter_Writing_t> VideoWriter_Writing;
const string image_files[] = {
"python/images/QCIF_00.bmp",
"python/images/QCIF_01.bmp",
"python/images/QCIF_02.bmp",
"python/images/QCIF_03.bmp",
"python/images/QCIF_04.bmp",
"python/images/QCIF_05.bmp"
};
PERF_TEST_P(VideoWriter_Writing, WriteFrame,
testing::Combine(
testing::ValuesIn(image_files),
testing::Bool()))
{
const string filename = getDataPath(get<0>(GetParam()));
const bool isColor = get<1>(GetParam());
Mat image = imread(filename, isColor ? IMREAD_COLOR : IMREAD_GRAYSCALE );
#if defined(HAVE_MSMF) && !defined(HAVE_FFMPEG)
const string outfile = cv::tempfile(".wmv");
const int fourcc = VideoWriter::fourcc('W', 'M', 'V', '3');
#else
const string outfile = cv::tempfile(".avi");
const int fourcc = VideoWriter::fourcc('X', 'V', 'I', 'D');
#endif
VideoWriter writer(outfile, fourcc, 25, cv::Size(image.cols, image.rows), isColor);
if (!writer.isOpened())
throw SkipTestException("Video file can not be opened");
TEST_CYCLE_N(100) { writer << image; }
SANITY_CHECK_NOTHING();
remove(outfile.c_str());
}
} // namespace

View File

@ -0,0 +1,10 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/videoio.hpp"
#endif

View File

@ -0,0 +1,65 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef BACKEND_HPP_DEFINED
#define BACKEND_HPP_DEFINED
#include "cap_interface.hpp"
#include "opencv2/videoio/registry.hpp"
namespace cv {
// TODO: move to public interface
// TODO: allow runtime backend registration
class IBackend
{
public:
virtual ~IBackend() {}
virtual Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const = 0;
virtual Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const = 0;
virtual Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps, const cv::Size& sz,
const VideoWriterParameters& params) const = 0;
};
class IBackendFactory
{
public:
virtual ~IBackendFactory() {}
virtual Ptr<IBackend> getBackend() const = 0;
virtual bool isBuiltIn() const = 0;
};
//=============================================================================
typedef Ptr<IVideoCapture> (*FN_createCaptureFile)(const std::string & filename);
typedef Ptr<IVideoCapture> (*FN_createCaptureCamera)(int camera);
typedef Ptr<IVideoCapture> (*FN_createCaptureFileWithParams)(const std::string & filename, const VideoCaptureParameters& params);
typedef Ptr<IVideoCapture> (*FN_createCaptureCameraWithParams)(int camera, const VideoCaptureParameters& params);
typedef Ptr<IVideoWriter> (*FN_createWriter)(const std::string& filename, int fourcc, double fps, const Size& sz,
const VideoWriterParameters& params);
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
FN_createCaptureCamera createCaptureCamera,
FN_createWriter createWriter);
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
FN_createCaptureCameraWithParams createCaptureCamera,
FN_createWriter createWriter);
Ptr<IBackendFactory> createPluginBackendFactory(VideoCaptureAPIs id, const char* baseName);
void applyParametersFallback(const Ptr<IVideoCapture>& cap, const VideoCaptureParameters& params);
std::string getCapturePluginVersion(
const Ptr<IBackendFactory>& backend_factory,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
std::string getWriterPluginVersion(
const Ptr<IBackendFactory>& backend_factory,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
} // namespace cv::
#endif // BACKEND_HPP_DEFINED

View File

@ -0,0 +1,782 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "backend.hpp"
#include "plugin_api.hpp"
#include "plugin_capture_api.hpp"
#include "plugin_writer_api.hpp"
#include "opencv2/core/utils/configuration.private.hpp"
#include "opencv2/core/utils/logger.hpp"
#include "opencv2/core/private.hpp"
#include "videoio_registry.hpp"
//==================================================================================================
// Dynamic backend implementation
#include "opencv2/core/utils/plugin_loader.private.hpp"
#include "backend_plugin_legacy.impl.hpp"
namespace cv { namespace impl {
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
using namespace cv::plugin::impl; // plugin_loader.hpp
static Mutex& getInitializationMutex()
{
static Mutex initializationMutex;
return initializationMutex;
}
class PluginBackend: public IBackend
{
protected:
void initCaptureAPI()
{
const char* init_name = "opencv_videoio_capture_plugin_init_v1";
FN_opencv_videoio_capture_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_capture_plugin_init_t>(lib_->getSymbol(init_name));
if (fn_init)
{
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
for (int supported_api_version = CAPTURE_API_VERSION; supported_api_version >= 0; supported_api_version--)
{
capture_api_ = fn_init(CAPTURE_ABI_VERSION, supported_api_version, NULL);
if (capture_api_)
break;
}
if (!capture_api_)
{
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
return;
}
if (!checkCompatibility(
capture_api_->api_header, CAPTURE_ABI_VERSION, CAPTURE_API_VERSION,
capture_api_->v0.id != CAP_FFMPEG))
{
capture_api_ = NULL;
return;
}
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << capture_api_->api_header.api_description << "'");
}
else
{
CV_LOG_INFO(NULL, "Video I/O: missing plugin init function: '" << init_name << "', file: " << lib_->getName());
}
}
void initWriterAPI()
{
const char* init_name = "opencv_videoio_writer_plugin_init_v1";
FN_opencv_videoio_writer_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_writer_plugin_init_t>(lib_->getSymbol(init_name));
if (fn_init)
{
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
for (int supported_api_version = WRITER_API_VERSION; supported_api_version >= 0; supported_api_version--)
{
writer_api_ = fn_init(WRITER_ABI_VERSION, supported_api_version, NULL);
if (writer_api_)
break;
}
if (!writer_api_)
{
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
return;
}
if (!checkCompatibility(
writer_api_->api_header, WRITER_ABI_VERSION, WRITER_API_VERSION,
writer_api_->v0.id != CAP_FFMPEG))
{
writer_api_ = NULL;
return;
}
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << writer_api_->api_header.api_description << "'");
}
else
{
CV_LOG_INFO(NULL, "Video I/O: missing plugin init function: '" << init_name << "', file: " << lib_->getName());
}
}
void initPluginLegacyAPI()
{
const char* init_name = "opencv_videoio_plugin_init_v0";
FN_opencv_videoio_plugin_init_t fn_init = reinterpret_cast<FN_opencv_videoio_plugin_init_t>(lib_->getSymbol(init_name));
if (fn_init)
{
CV_LOG_INFO(NULL, "Found entry: '" << init_name << "'");
for (int supported_api_version = API_VERSION; supported_api_version >= 0; supported_api_version--)
{
plugin_api_ = fn_init(ABI_VERSION, supported_api_version, NULL);
if (plugin_api_)
break;
}
if (!plugin_api_)
{
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible (can't be initialized): " << lib_->getName());
return;
}
if (!checkCompatibility(
plugin_api_->api_header, ABI_VERSION, API_VERSION,
plugin_api_->v0.captureAPI != CAP_FFMPEG))
{
plugin_api_ = NULL;
return;
}
CV_LOG_INFO(NULL, "Video I/O: plugin is ready to use '" << plugin_api_->api_header.api_description << "'");
}
else
{
CV_LOG_INFO(NULL, "Video I/O: plugin is incompatible, missing init function: '" << init_name << "', file: " << lib_->getName());
}
}
bool checkCompatibility(const OpenCV_API_Header& api_header, unsigned int abi_version, unsigned int api_version, bool checkMinorOpenCVVersion)
{
if (api_header.opencv_version_major != CV_VERSION_MAJOR)
{
CV_LOG_ERROR(NULL, "Video I/O: wrong OpenCV major version used by plugin '" << api_header.api_description << "': " <<
cv::format("%d.%d, OpenCV version is '" CV_VERSION "'", api_header.opencv_version_major, api_header.opencv_version_minor))
return false;
}
if (!checkMinorOpenCVVersion)
{
// no checks for OpenCV minor version
}
else if (api_header.opencv_version_minor != CV_VERSION_MINOR)
{
CV_LOG_ERROR(NULL, "Video I/O: wrong OpenCV minor version used by plugin '" << api_header.api_description << "': " <<
cv::format("%d.%d, OpenCV version is '" CV_VERSION "'", api_header.opencv_version_major, api_header.opencv_version_minor))
return false;
}
CV_LOG_INFO(NULL, "Video I/O: initialized '" << api_header.api_description << "': built with "
<< cv::format("OpenCV %d.%d (ABI/API = %d/%d)",
api_header.opencv_version_major, api_header.opencv_version_minor,
api_header.min_api_version, api_header.api_version)
<< ", current OpenCV version is '" CV_VERSION "' (ABI/API = " << abi_version << "/" << api_version << ")"
);
if (api_header.min_api_version != abi_version) // future: range can be here
{
// actually this should never happen due to checks in plugin's init() function
CV_LOG_ERROR(NULL, "Video I/O: plugin is not supported due to incompatible ABI = " << api_header.min_api_version);
return false;
}
if (api_header.api_version != api_version)
{
CV_LOG_INFO(NULL, "Video I/O: NOTE: plugin is supported, but there is API version mismath: "
<< cv::format("plugin API level (%d) != OpenCV API level (%d)", api_header.api_version, api_version));
if (api_header.api_version < api_version)
{
CV_LOG_INFO(NULL, "Video I/O: NOTE: some functionality may be unavailable due to lack of support by plugin implementation");
}
}
return true;
}
public:
Ptr<cv::plugin::impl::DynamicLib> lib_;
const OpenCV_VideoIO_Capture_Plugin_API* capture_api_;
const OpenCV_VideoIO_Writer_Plugin_API* writer_api_;
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_; //!< deprecated
PluginBackend(const Ptr<cv::plugin::impl::DynamicLib>& lib)
: lib_(lib)
, capture_api_(NULL), writer_api_(NULL)
, plugin_api_(NULL)
{
initCaptureAPI();
initWriterAPI();
if (capture_api_ == NULL && writer_api_ == NULL)
{
initPluginLegacyAPI();
}
}
Ptr<IVideoCapture> createCapture(int camera) const;
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE;
Ptr<IVideoCapture> createCapture(const std::string &filename) const;
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE;
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE;
std::string getCapturePluginVersion(CV_OUT int& version_ABI, CV_OUT int& version_API)
{
CV_Assert(capture_api_ || plugin_api_);
const OpenCV_API_Header& api_header = capture_api_ ? capture_api_->api_header : plugin_api_->api_header;
version_ABI = api_header.min_api_version;
version_API = api_header.api_version;
return api_header.api_description;
}
std::string getWriterPluginVersion(CV_OUT int& version_ABI, CV_OUT int& version_API)
{
CV_Assert(writer_api_ || plugin_api_);
const OpenCV_API_Header& api_header = writer_api_ ? writer_api_->api_header : plugin_api_->api_header;
version_ABI = api_header.min_api_version;
version_API = api_header.api_version;
return api_header.api_description;
}
};
class PluginBackendFactory : public IBackendFactory
{
public:
VideoCaptureAPIs id_;
const char* baseName_;
Ptr<PluginBackend> backend;
bool initialized;
public:
PluginBackendFactory(VideoCaptureAPIs id, const char* baseName) :
id_(id), baseName_(baseName),
initialized(false)
{
// nothing, plugins are loaded on demand
}
Ptr<IBackend> getBackend() const CV_OVERRIDE
{
initBackend();
return backend.staticCast<IBackend>();
}
bool isBuiltIn() const CV_OVERRIDE { return false; }
std::string getCapturePluginVersion(
CV_OUT int& version_ABI,
CV_OUT int& version_API) const
{
initBackend();
if (!backend)
CV_Error_(Error::StsNotImplemented, ("Backend '%s' is not available", baseName_));
return backend->getCapturePluginVersion(version_ABI, version_API);
}
std::string getWriterPluginVersion(
CV_OUT int& version_ABI,
CV_OUT int& version_API) const
{
initBackend();
if (!backend)
CV_Error_(Error::StsNotImplemented, ("Backend '%s' is not available", baseName_));
return backend->getWriterPluginVersion(version_ABI, version_API);
}
protected:
inline void initBackend() const
{
if (!initialized)
{
const_cast<PluginBackendFactory*>(this)->initBackend_();
}
}
void initBackend_()
{
AutoLock lock(getInitializationMutex());
try {
if (!initialized)
loadPlugin();
}
catch (...)
{
CV_LOG_INFO(NULL, "Video I/O: exception during plugin loading: " << baseName_ << ". SKIP");
}
initialized = true;
}
void loadPlugin();
};
static
std::vector<FileSystemPath_t> getPluginCandidates(const std::string& baseName)
{
using namespace cv::utils;
using namespace cv::utils::fs;
const std::string baseName_l = toLowerCase(baseName);
const std::string baseName_u = toUpperCase(baseName);
const FileSystemPath_t baseName_l_fs = toFileSystemPath(baseName_l);
std::vector<FileSystemPath_t> paths;
const std::vector<std::string> paths_ = getConfigurationParameterPaths("OPENCV_VIDEOIO_PLUGIN_PATH", std::vector<std::string>());
if (paths_.size() != 0)
{
for (size_t i = 0; i < paths_.size(); i++)
{
paths.push_back(toFileSystemPath(paths_[i]));
}
}
else
{
FileSystemPath_t binaryLocation;
if (getBinLocation(binaryLocation))
{
binaryLocation = getParent(binaryLocation);
#ifndef CV_VIDEOIO_PLUGIN_SUBDIRECTORY
paths.push_back(binaryLocation);
#else
paths.push_back(binaryLocation + toFileSystemPath("/") + toFileSystemPath(CV_VIDEOIO_PLUGIN_SUBDIRECTORY_STR));
#endif
}
}
const std::string default_expr = libraryPrefix() + "opencv_videoio_" + baseName_l + "*" + librarySuffix();
const std::string plugin_expr = getConfigurationParameterString((std::string("OPENCV_VIDEOIO_PLUGIN_") + baseName_u).c_str(), default_expr.c_str());
std::vector<FileSystemPath_t> results;
#ifdef _WIN32
FileSystemPath_t moduleName = toFileSystemPath(libraryPrefix() + "opencv_videoio_" + baseName_l + librarySuffix());
#ifndef WINRT
if (baseName_u == "FFMPEG") // backward compatibility
{
const wchar_t* ffmpeg_env_path = _wgetenv(L"OPENCV_FFMPEG_DLL_DIR");
if (ffmpeg_env_path)
{
results.push_back(FileSystemPath_t(ffmpeg_env_path) + L"\\" + moduleName);
}
}
#endif
if (plugin_expr != default_expr)
{
moduleName = toFileSystemPath(plugin_expr);
results.push_back(moduleName);
}
for (const FileSystemPath_t& path : paths)
{
results.push_back(path + L"\\" + moduleName);
}
results.push_back(moduleName);
#if defined(_DEBUG) && defined(DEBUG_POSTFIX)
if (baseName_u == "FFMPEG") // backward compatibility
{
const FileSystemPath_t templ = toFileSystemPath(CVAUX_STR(DEBUG_POSTFIX) ".dll");
FileSystemPath_t nonDebugName(moduleName);
size_t suf = nonDebugName.rfind(templ);
if (suf != FileSystemPath_t::npos)
{
nonDebugName.replace(suf, suf + templ.size(), L".dll");
results.push_back(nonDebugName);
}
}
#endif // _DEBUG && DEBUG_POSTFIX
#else
CV_LOG_INFO(NULL, "VideoIO plugin (" << baseName << "): glob is '" << plugin_expr << "', " << paths.size() << " location(s)");
for (const std::string& path : paths)
{
if (path.empty())
continue;
std::vector<std::string> candidates;
cv::glob(utils::fs::join(path, plugin_expr), candidates);
// Prefer candisates with higher versions
// TODO: implemented accurate versions-based comparator
std::sort(candidates.begin(), candidates.end(), std::greater<std::string>());
CV_LOG_INFO(NULL, " - " << path << ": " << candidates.size());
copy(candidates.begin(), candidates.end(), back_inserter(results));
}
#endif
CV_LOG_INFO(NULL, "Found " << results.size() << " plugin(s) for " << baseName);
return results;
}
void PluginBackendFactory::loadPlugin()
{
for (const FileSystemPath_t& plugin : getPluginCandidates(baseName_))
{
auto lib = makePtr<cv::plugin::impl::DynamicLib>(plugin);
if (!lib->isLoaded())
continue;
try
{
Ptr<PluginBackend> pluginBackend = makePtr<PluginBackend>(lib);
if (!pluginBackend)
return;
if (pluginBackend->capture_api_)
{
if (pluginBackend->capture_api_->v0.id != id_)
{
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->capture_api_->api_header.api_description <<
"': unexpected backend ID: " <<
pluginBackend->capture_api_->v0.id << " vs " << (int)id_ << " (expected)");
return;
}
}
if (pluginBackend->writer_api_)
{
if (pluginBackend->writer_api_->v0.id != id_)
{
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->writer_api_->api_header.api_description <<
"': unexpected backend ID: " <<
pluginBackend->writer_api_->v0.id << " vs " << (int)id_ << " (expected)");
return;
}
}
if (pluginBackend->plugin_api_)
{
if (pluginBackend->plugin_api_->v0.captureAPI != id_)
{
CV_LOG_ERROR(NULL, "Video I/O: plugin '" << pluginBackend->plugin_api_->api_header.api_description <<
"': unexpected backend ID: " <<
pluginBackend->plugin_api_->v0.captureAPI << " vs " << (int)id_ << " (expected)");
return;
}
}
if (pluginBackend->capture_api_ == NULL && pluginBackend->writer_api_ == NULL
&& pluginBackend->plugin_api_ == NULL)
{
CV_LOG_ERROR(NULL, "Video I/O: no compatible plugin API for backend ID: " << (int)id_);
return;
}
backend = pluginBackend;
return;
}
catch (...)
{
CV_LOG_WARNING(NULL, "Video I/O: exception during plugin initialization: " << toPrintablePath(plugin) << ". SKIP");
}
}
}
//==================================================================================================
class PluginCapture : public cv::IVideoCapture
{
const OpenCV_VideoIO_Capture_Plugin_API* plugin_api_;
CvPluginCapture capture_;
public:
static
Ptr<PluginCapture> create(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api,
const std::string &filename, int camera, const VideoCaptureParameters& params)
{
CV_Assert(plugin_api);
CV_Assert(plugin_api->v0.Capture_release);
CvPluginCapture capture = NULL;
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Capture_open_with_params)
{
std::vector<int> vint_params = params.getIntVector();
int* c_params = vint_params.data();
unsigned n_params = (unsigned)(vint_params.size() / 2);
if (CV_ERROR_OK == plugin_api->v1.Capture_open_with_params(
filename.empty() ? 0 : filename.c_str(), camera, c_params, n_params, &capture))
{
CV_Assert(capture);
return makePtr<PluginCapture>(plugin_api, capture);
}
}
else if (plugin_api->v0.Capture_open)
{
if (CV_ERROR_OK == plugin_api->v0.Capture_open(filename.empty() ? 0 : filename.c_str(), camera, &capture))
{
CV_Assert(capture);
Ptr<PluginCapture> cap = makePtr<PluginCapture>(plugin_api, capture);
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
}
return Ptr<PluginCapture>();
}
PluginCapture(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api, CvPluginCapture capture)
: plugin_api_(plugin_api), capture_(capture)
{
CV_Assert(plugin_api_); CV_Assert(capture_);
}
~PluginCapture()
{
CV_DbgAssert(plugin_api_->v0.Capture_release);
if (CV_ERROR_OK != plugin_api_->v0.Capture_release(capture_))
CV_LOG_ERROR(NULL, "Video I/O: Can't release capture by plugin '" << plugin_api_->api_header.api_description << "'");
capture_ = NULL;
}
double getProperty(int prop) const CV_OVERRIDE
{
double val = -1;
if (plugin_api_->v0.Capture_getProperty)
if (CV_ERROR_OK != plugin_api_->v0.Capture_getProperty(capture_, prop, &val))
val = -1;
return val;
}
bool setProperty(int prop, double val) CV_OVERRIDE
{
if (plugin_api_->v0.Capture_setProperty)
if (CV_ERROR_OK == plugin_api_->v0.Capture_setProperty(capture_, prop, val))
return true;
return false;
}
bool grabFrame() CV_OVERRIDE
{
if (plugin_api_->v0.Capture_grab)
if (CV_ERROR_OK == plugin_api_->v0.Capture_grab(capture_))
return true;
return false;
}
static CvResult CV_API_CALL retrieve_callback(int stream_idx, const unsigned char* data, int step, int width, int height, int type, void* userdata)
{
CV_UNUSED(stream_idx);
cv::_OutputArray* dst = static_cast<cv::_OutputArray*>(userdata);
if (!dst)
return CV_ERROR_FAIL;
cv::Mat(cv::Size(width, height), type, (void*)data, step).copyTo(*dst);
return CV_ERROR_OK;
}
bool retrieveFrame(int idx, cv::OutputArray img) CV_OVERRIDE
{
bool res = false;
if (plugin_api_->v0.Capture_retreive)
if (CV_ERROR_OK == plugin_api_->v0.Capture_retreive(capture_, idx, retrieve_callback, (cv::_OutputArray*)&img))
res = true;
return res;
}
bool isOpened() const CV_OVERRIDE
{
return capture_ != NULL; // TODO always true
}
int getCaptureDomain() CV_OVERRIDE
{
return plugin_api_->v0.id;
}
};
//==================================================================================================
class PluginWriter : public cv::IVideoWriter
{
const OpenCV_VideoIO_Writer_Plugin_API* plugin_api_;
CvPluginWriter writer_;
public:
static
Ptr<PluginWriter> create(const OpenCV_VideoIO_Writer_Plugin_API* plugin_api,
const std::string& filename, int fourcc, double fps, const cv::Size& sz,
const VideoWriterParameters& params)
{
CV_Assert(plugin_api);
CV_Assert(plugin_api->v0.Writer_release);
CV_Assert(!filename.empty());
CvPluginWriter writer = NULL;
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Writer_open_with_params)
{
std::vector<int> vint_params = params.getIntVector();
int* c_params = &vint_params[0];
unsigned n_params = (unsigned)(vint_params.size() / 2);
if (CV_ERROR_OK == plugin_api->v1.Writer_open_with_params(filename.c_str(), fourcc, fps, sz.width, sz.height, c_params, n_params, &writer))
{
CV_Assert(writer);
return makePtr<PluginWriter>(plugin_api, writer);
}
}
else if (plugin_api->v0.Writer_open)
{
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
const int depth = params.get(VIDEOWRITER_PROP_DEPTH, CV_8U);
if (depth != CV_8U)
{
CV_LOG_WARNING(NULL, "Video I/O plugin doesn't support (due to lower API level) creation of VideoWriter with depth != CV_8U");
return Ptr<PluginWriter>();
}
if (params.warnUnusedParameters())
{
CV_LOG_ERROR(NULL, "VIDEOIO: unsupported parameters in VideoWriter, see logger INFO channel for details");
return Ptr<PluginWriter>();
}
if (CV_ERROR_OK == plugin_api->v0.Writer_open(filename.c_str(), fourcc, fps, sz.width, sz.height, isColor, &writer))
{
CV_Assert(writer);
return makePtr<PluginWriter>(plugin_api, writer);
}
}
return Ptr<PluginWriter>();
}
PluginWriter(const OpenCV_VideoIO_Writer_Plugin_API* plugin_api, CvPluginWriter writer)
: plugin_api_(plugin_api), writer_(writer)
{
CV_Assert(plugin_api_); CV_Assert(writer_);
}
~PluginWriter()
{
CV_DbgAssert(plugin_api_->v0.Writer_release);
if (CV_ERROR_OK != plugin_api_->v0.Writer_release(writer_))
CV_LOG_ERROR(NULL, "Video I/O: Can't release writer by plugin '" << plugin_api_->api_header.api_description << "'");
writer_ = NULL;
}
double getProperty(int prop) const CV_OVERRIDE
{
double val = -1;
if (plugin_api_->v0.Writer_getProperty)
if (CV_ERROR_OK != plugin_api_->v0.Writer_getProperty(writer_, prop, &val))
val = -1;
return val;
}
bool setProperty(int prop, double val) CV_OVERRIDE
{
if (plugin_api_->v0.Writer_setProperty)
if (CV_ERROR_OK == plugin_api_->v0.Writer_setProperty(writer_, prop, val))
return true;
return false;
}
bool isOpened() const CV_OVERRIDE
{
return writer_ != NULL; // TODO always true
}
void write(cv::InputArray arr) CV_OVERRIDE
{
cv::Mat img = arr.getMat();
CV_DbgAssert(writer_);
CV_Assert(plugin_api_->v0.Writer_write);
if (CV_ERROR_OK != plugin_api_->v0.Writer_write(writer_, img.data, (int)img.step[0], img.cols, img.rows, img.channels()))
{
CV_LOG_DEBUG(NULL, "Video I/O: Can't write frame by plugin '" << plugin_api_->api_header.api_description << "'");
}
// TODO return bool result?
}
int getCaptureDomain() const CV_OVERRIDE
{
return plugin_api_->v0.id;
}
};
Ptr<IVideoCapture> PluginBackend::createCapture(int camera, const VideoCaptureParameters& params) const
{
try
{
if (capture_api_)
return PluginCapture::create(capture_api_, std::string(), camera, params); //.staticCast<IVideoCapture>();
if (plugin_api_)
{
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, std::string(), camera); //.staticCast<IVideoCapture>();
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
}
catch (...)
{
CV_LOG_DEBUG(NULL, "Video I/O: can't create camera capture: " << camera);
throw;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> PluginBackend::createCapture(const std::string &filename, const VideoCaptureParameters& params) const
{
try
{
if (capture_api_)
return PluginCapture::create(capture_api_, filename, 0, params); //.staticCast<IVideoCapture>();
if (plugin_api_)
{
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, filename, 0); //.staticCast<IVideoCapture>();
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
}
catch (...)
{
CV_LOG_DEBUG(NULL, "Video I/O: can't open file capture: " << filename);
throw;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> PluginBackend::createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const
{
try
{
if (writer_api_)
return PluginWriter::create(writer_api_, filename, fourcc, fps, sz, params); //.staticCast<IVideoWriter>();
if (plugin_api_)
return legacy::PluginWriter::create(plugin_api_, filename, fourcc, fps, sz, params); //.staticCast<IVideoWriter>();
}
catch (...)
{
CV_LOG_DEBUG(NULL, "Video I/O: can't open writer: " << filename);
}
return Ptr<IVideoWriter>();
}
#endif // OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
} // namespace
Ptr<IBackendFactory> createPluginBackendFactory(VideoCaptureAPIs id, const char* baseName)
{
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
return makePtr<impl::PluginBackendFactory>(id, baseName); //.staticCast<IBackendFactory>();
#else
CV_UNUSED(id);
CV_UNUSED(baseName);
return Ptr<IBackendFactory>();
#endif
}
std::string getCapturePluginVersion(
const Ptr<IBackendFactory>& backend_factory,
CV_OUT int& version_ABI,
CV_OUT int& version_API
)
{
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
using namespace impl;
CV_Assert(backend_factory);
PluginBackendFactory* plugin_backend_factory = dynamic_cast<PluginBackendFactory*>(backend_factory.get());
CV_Assert(plugin_backend_factory);
return plugin_backend_factory->getCapturePluginVersion(version_ABI, version_API);
#else
CV_UNUSED(backend_factory);
CV_UNUSED(version_ABI);
CV_UNUSED(version_API);
CV_Error(Error::StsBadFunc, "Plugins are not available in this build");
#endif
}
std::string getWriterPluginVersion(
const Ptr<IBackendFactory>& backend_factory,
CV_OUT int& version_ABI,
CV_OUT int& version_API
)
{
#if OPENCV_HAVE_FILESYSTEM_SUPPORT && defined(ENABLE_PLUGINS)
using namespace impl;
CV_Assert(backend_factory);
PluginBackendFactory* plugin_backend_factory = dynamic_cast<PluginBackendFactory*>(backend_factory.get());
CV_Assert(plugin_backend_factory);
return plugin_backend_factory->getWriterPluginVersion(version_ABI, version_API);
#else
CV_UNUSED(backend_factory);
CV_UNUSED(version_ABI);
CV_UNUSED(version_API);
CV_Error(Error::StsBadFunc, "Plugins are not available in this build");
#endif
}
} // namespace

View File

@ -0,0 +1,199 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Not a standalone header.
//
namespace cv { namespace impl { namespace legacy {
//==================================================================================================
class PluginCapture : public cv::IVideoCapture
{
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_;
CvPluginCapture capture_;
public:
static
Ptr<PluginCapture> create(const OpenCV_VideoIO_Plugin_API_preview* plugin_api,
const std::string &filename, int camera)
{
CV_Assert(plugin_api);
CvPluginCapture capture = NULL;
if (plugin_api->v0.Capture_open)
{
CV_Assert(plugin_api->v0.Capture_release);
if (CV_ERROR_OK == plugin_api->v0.Capture_open(filename.empty() ? 0 : filename.c_str(), camera, &capture))
{
CV_Assert(capture);
return makePtr<PluginCapture>(plugin_api, capture);
}
}
return Ptr<PluginCapture>();
}
PluginCapture(const OpenCV_VideoIO_Plugin_API_preview* plugin_api, CvPluginCapture capture)
: plugin_api_(plugin_api), capture_(capture)
{
CV_Assert(plugin_api_); CV_Assert(capture_);
}
~PluginCapture()
{
CV_DbgAssert(plugin_api_->v0.Capture_release);
if (CV_ERROR_OK != plugin_api_->v0.Capture_release(capture_))
CV_LOG_ERROR(NULL, "Video I/O: Can't release capture by plugin '" << plugin_api_->api_header.api_description << "'");
capture_ = NULL;
}
double getProperty(int prop) const CV_OVERRIDE
{
double val = -1;
if (plugin_api_->v0.Capture_getProperty)
if (CV_ERROR_OK != plugin_api_->v0.Capture_getProperty(capture_, prop, &val))
val = -1;
return val;
}
bool setProperty(int prop, double val) CV_OVERRIDE
{
if (plugin_api_->v0.Capture_setProperty)
if (CV_ERROR_OK == plugin_api_->v0.Capture_setProperty(capture_, prop, val))
return true;
return false;
}
bool grabFrame() CV_OVERRIDE
{
if (plugin_api_->v0.Capture_grab)
if (CV_ERROR_OK == plugin_api_->v0.Capture_grab(capture_))
return true;
return false;
}
static CvResult CV_API_CALL retrieve_callback(int stream_idx, const unsigned char* data, int step, int width, int height, int cn, void* userdata)
{
CV_UNUSED(stream_idx);
cv::_OutputArray* dst = static_cast<cv::_OutputArray*>(userdata);
if (!dst)
return CV_ERROR_FAIL;
cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, cn), (void*)data, step).copyTo(*dst);
return CV_ERROR_OK;
}
bool retrieveFrame(int idx, cv::OutputArray img) CV_OVERRIDE
{
bool res = false;
if (plugin_api_->v0.Capture_retreive)
if (CV_ERROR_OK == plugin_api_->v0.Capture_retreive(capture_, idx, retrieve_callback, (cv::_OutputArray*)&img))
res = true;
return res;
}
bool isOpened() const CV_OVERRIDE
{
return capture_ != NULL; // TODO always true
}
int getCaptureDomain() CV_OVERRIDE
{
return plugin_api_->v0.captureAPI;
}
};
//==================================================================================================
class PluginWriter : public cv::IVideoWriter
{
const OpenCV_VideoIO_Plugin_API_preview* plugin_api_;
CvPluginWriter writer_;
public:
static
Ptr<PluginWriter> create(const OpenCV_VideoIO_Plugin_API_preview* plugin_api,
const std::string& filename, int fourcc, double fps, const cv::Size& sz,
const VideoWriterParameters& params)
{
CV_Assert(plugin_api);
CvPluginWriter writer = NULL;
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Writer_open_with_params)
{
CV_Assert(plugin_api->v0.Writer_release);
CV_Assert(!filename.empty());
std::vector<int> vint_params = params.getIntVector();
int* c_params = &vint_params[0];
unsigned n_params = (unsigned)(vint_params.size() / 2);
if (CV_ERROR_OK == plugin_api->v1.Writer_open_with_params(filename.c_str(), fourcc, fps, sz.width, sz.height, c_params, n_params, &writer))
{
CV_Assert(writer);
return makePtr<PluginWriter>(plugin_api, writer);
}
}
else if (plugin_api->v0.Writer_open)
{
CV_Assert(plugin_api->v0.Writer_release);
CV_Assert(!filename.empty());
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
const int depth = params.get(VIDEOWRITER_PROP_DEPTH, CV_8U);
if (depth != CV_8U)
{
CV_LOG_WARNING(NULL, "Video I/O plugin doesn't support (due to lower API level) creation of VideoWriter with depth != CV_8U");
return Ptr<PluginWriter>();
}
if (CV_ERROR_OK == plugin_api->v0.Writer_open(filename.c_str(), fourcc, fps, sz.width, sz.height, isColor, &writer))
{
CV_Assert(writer);
return makePtr<PluginWriter>(plugin_api, writer);
}
}
return Ptr<PluginWriter>();
}
PluginWriter(const OpenCV_VideoIO_Plugin_API_preview* plugin_api, CvPluginWriter writer)
: plugin_api_(plugin_api), writer_(writer)
{
CV_Assert(plugin_api_); CV_Assert(writer_);
}
~PluginWriter()
{
CV_DbgAssert(plugin_api_->v0.Writer_release);
if (CV_ERROR_OK != plugin_api_->v0.Writer_release(writer_))
CV_LOG_ERROR(NULL, "Video I/O: Can't release writer by plugin '" << plugin_api_->api_header.api_description << "'");
writer_ = NULL;
}
double getProperty(int prop) const CV_OVERRIDE
{
double val = -1;
if (plugin_api_->v0.Writer_getProperty)
if (CV_ERROR_OK != plugin_api_->v0.Writer_getProperty(writer_, prop, &val))
val = -1;
return val;
}
bool setProperty(int prop, double val) CV_OVERRIDE
{
if (plugin_api_->v0.Writer_setProperty)
if (CV_ERROR_OK == plugin_api_->v0.Writer_setProperty(writer_, prop, val))
return true;
return false;
}
bool isOpened() const CV_OVERRIDE
{
return writer_ != NULL; // TODO always true
}
void write(cv::InputArray arr) CV_OVERRIDE
{
cv::Mat img = arr.getMat();
CV_DbgAssert(writer_);
CV_Assert(plugin_api_->v0.Writer_write);
if (CV_ERROR_OK != plugin_api_->v0.Writer_write(writer_, img.data, (int)img.step[0], img.cols, img.rows, img.channels()))
{
CV_LOG_DEBUG(NULL, "Video I/O: Can't write frame by plugin '" << plugin_api_->api_header.api_description << "'");
}
// TODO return bool result?
}
int getCaptureDomain() const CV_OVERRIDE
{
return plugin_api_->v0.captureAPI;
}
};
}}} // namespace

View File

@ -0,0 +1,183 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "backend.hpp"
namespace cv {
void applyParametersFallback(const Ptr<IVideoCapture>& cap, const VideoCaptureParameters& params)
{
std::vector<int> props = params.getUnused();
CV_LOG_INFO(NULL, "VIDEOIO: Backend '" << videoio_registry::getBackendName((VideoCaptureAPIs)cap->getCaptureDomain()) <<
"' implementation doesn't support parameters in .open(). Applying " <<
props.size() << " properties through .setProperty()");
for (int prop : props)
{
double value = params.get<double>(prop, -1);
CV_LOG_INFO(NULL, "VIDEOIO: apply parameter: [" << prop << "]=" <<
cv::format("%g / %lld / 0x%016llx", value, (long long)value, (long long)value));
if (!cap->setProperty(prop, value))
{
if (prop != CAP_PROP_HW_ACCELERATION && prop != CAP_PROP_HW_DEVICE) { // optional parameters
CV_Error_(cv::Error::StsNotImplemented, ("VIDEOIO: Failed to apply invalid or unsupported parameter: [%d]=%g / %lld / 0x%08llx", prop, value, (long long)value, (long long)value));
}
}
}
// NB: there is no dedicated "commit" parameters event, implementations should commit after each property automatically
}
// Legacy API. Modern API with parameters is below
class StaticBackend: public IBackend
{
public:
FN_createCaptureFile fn_createCaptureFile_;
FN_createCaptureCamera fn_createCaptureCamera_;
FN_createWriter fn_createWriter_;
StaticBackend(FN_createCaptureFile fn_createCaptureFile, FN_createCaptureCamera fn_createCaptureCamera, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
{
// nothing
}
~StaticBackend() CV_OVERRIDE {}
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureCamera_)
{
Ptr<IVideoCapture> cap = fn_createCaptureCamera_(camera);
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureFile_)
{
Ptr<IVideoCapture> cap = fn_createCaptureFile_(filename);
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
{
if (fn_createWriter_)
return fn_createWriter_(filename, fourcc, fps, sz, params);
return Ptr<IVideoWriter>();
}
}; // StaticBackend
class StaticBackendFactory : public IBackendFactory
{
protected:
Ptr<StaticBackend> backend;
public:
StaticBackendFactory(FN_createCaptureFile createCaptureFile, FN_createCaptureCamera createCaptureCamera, FN_createWriter createWriter)
: backend(makePtr<StaticBackend>(createCaptureFile, createCaptureCamera, createWriter))
{
// nothing
}
~StaticBackendFactory() CV_OVERRIDE {}
Ptr<IBackend> getBackend() const CV_OVERRIDE
{
return backend.staticCast<IBackend>();
}
bool isBuiltIn() const CV_OVERRIDE { return true; }
};
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
FN_createCaptureCamera createCaptureCamera,
FN_createWriter createWriter)
{
return makePtr<StaticBackendFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
}
class StaticBackendWithParams: public IBackend
{
public:
FN_createCaptureFileWithParams fn_createCaptureFile_;
FN_createCaptureCameraWithParams fn_createCaptureCamera_;
FN_createWriter fn_createWriter_;
StaticBackendWithParams(FN_createCaptureFileWithParams fn_createCaptureFile, FN_createCaptureCameraWithParams fn_createCaptureCamera, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
{
// nothing
}
~StaticBackendWithParams() CV_OVERRIDE {}
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureCamera_)
return fn_createCaptureCamera_(camera, params);
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureFile_)
return fn_createCaptureFile_(filename, params);
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
{
if (fn_createWriter_)
return fn_createWriter_(filename, fourcc, fps, sz, params);
return Ptr<IVideoWriter>();
}
}; // StaticBackendWithParams
class StaticBackendWithParamsFactory : public IBackendFactory
{
protected:
Ptr<StaticBackendWithParams> backend;
public:
StaticBackendWithParamsFactory(FN_createCaptureFileWithParams createCaptureFile, FN_createCaptureCameraWithParams createCaptureCamera, FN_createWriter createWriter)
: backend(makePtr<StaticBackendWithParams>(createCaptureFile, createCaptureCamera, createWriter))
{
// nothing
}
~StaticBackendWithParamsFactory() CV_OVERRIDE {}
Ptr<IBackend> getBackend() const CV_OVERRIDE
{
return backend.staticCast<IBackend>();
}
bool isBuiltIn() const CV_OVERRIDE { return true; }
};
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
FN_createCaptureCameraWithParams createCaptureCamera,
FN_createWriter createWriter)
{
return makePtr<StaticBackendWithParamsFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
}
} // namespace

View File

@ -0,0 +1,701 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videoio/registry.hpp"
#include "videoio_registry.hpp"
namespace cv {
static bool param_VIDEOIO_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_DEBUG", false);
static bool param_VIDEOCAPTURE_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOCAPTURE_DEBUG", false);
static bool param_VIDEOWRITER_DEBUG = utils::getConfigurationParameterBool("OPENCV_VIDEOWRITER_DEBUG", false);
#define CV_CAPTURE_LOG_DEBUG(tag, ...) \
if (param_VIDEOIO_DEBUG || param_VIDEOCAPTURE_DEBUG) \
{ \
CV_LOG_WARNING(nullptr, __VA_ARGS__); \
}
#define CV_WRITER_LOG_DEBUG(tag, ...) \
if (param_VIDEOIO_DEBUG || param_VIDEOWRITER_DEBUG) \
{ \
CV_LOG_WARNING(nullptr, __VA_ARGS__) \
}
void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const { cvReleaseCapture(&obj); }
void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const { cvReleaseVideoWriter(&obj); }
VideoCapture::VideoCapture() : throwOnFail(false)
{}
VideoCapture::VideoCapture(const String& filename, int apiPreference) : throwOnFail(false)
{
CV_TRACE_FUNCTION();
open(filename, apiPreference);
}
VideoCapture::VideoCapture(const String& filename, int apiPreference, const std::vector<int>& params)
: throwOnFail(false)
{
CV_TRACE_FUNCTION();
open(filename, apiPreference, params);
}
VideoCapture::VideoCapture(int index, int apiPreference) : throwOnFail(false)
{
CV_TRACE_FUNCTION();
open(index, apiPreference);
}
VideoCapture::VideoCapture(int index, int apiPreference, const std::vector<int>& params)
: throwOnFail(false)
{
CV_TRACE_FUNCTION();
open(index, apiPreference, params);
}
VideoCapture::~VideoCapture()
{
CV_TRACE_FUNCTION();
icap.release();
}
bool VideoCapture::open(const String& filename, int apiPreference)
{
return open(filename, apiPreference, std::vector<int>());
}
bool VideoCapture::open(const String& filename, int apiPreference, const std::vector<int>& params)
{
CV_INSTRUMENT_REGION();
if (isOpened())
{
release();
}
const VideoCaptureParameters parameters(params);
const std::vector<VideoBackendInfo> backends = cv::videoio_registry::getAvailableBackends_CaptureByFilename();
for (size_t i = 0; i < backends.size(); i++)
{
const VideoBackendInfo& info = backends[i];
if (apiPreference == CAP_ANY || apiPreference == info.id)
{
if (!info.backendFactory)
{
CV_LOG_DEBUG(NULL, "VIDEOIO(" << info.name << "): factory is not available (plugins require filesystem support)");
continue;
}
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): trying capture filename='%s' ...",
info.name, filename.c_str()));
CV_Assert(!info.backendFactory.empty());
const Ptr<IBackend> backend = info.backendFactory->getBackend();
if (!backend.empty())
{
try
{
icap = backend->createCapture(filename, parameters);
if (!icap.empty())
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): created, isOpened=%d",
info.name, icap->isOpened()));
if (icap->isOpened())
{
return true;
}
icap.release();
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): can't create capture",
info.name));
}
}
catch (const cv::Exception& e)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
info.name, e.what()));
}
catch (const std::exception& e)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
info.name, e.what()));
}
catch (...)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
info.name));
}
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));
}
}
}
if (throwOnFail)
{
CV_Error_(Error::StsError, ("could not open '%s'", filename.c_str()));
}
return false;
}
bool VideoCapture::open(int cameraNum, int apiPreference)
{
return open(cameraNum, apiPreference, std::vector<int>());
}
bool VideoCapture::open(int cameraNum, int apiPreference, const std::vector<int>& params)
{
CV_TRACE_FUNCTION();
if (isOpened())
{
release();
}
if (apiPreference == CAP_ANY)
{
// interpret preferred interface (0 = autodetect)
int backendID = (cameraNum / 100) * 100;
if (backendID)
{
cameraNum %= 100;
apiPreference = backendID;
}
}
const VideoCaptureParameters parameters(params);
const std::vector<VideoBackendInfo> backends = cv::videoio_registry::getAvailableBackends_CaptureByIndex();
for (size_t i = 0; i < backends.size(); i++)
{
const VideoBackendInfo& info = backends[i];
if (apiPreference == CAP_ANY || apiPreference == info.id)
{
if (!info.backendFactory)
{
CV_LOG_DEBUG(NULL, "VIDEOIO(" << info.name << "): factory is not available (plugins require filesystem support)");
continue;
}
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): trying capture cameraNum=%d ...",
info.name, cameraNum));
CV_Assert(!info.backendFactory.empty());
const Ptr<IBackend> backend = info.backendFactory->getBackend();
if (!backend.empty())
{
try
{
icap = backend->createCapture(cameraNum, parameters);
if (!icap.empty())
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): created, isOpened=%d",
info.name, icap->isOpened()));
if (icap->isOpened())
{
return true;
}
icap.release();
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): can't create capture",
info.name));
}
}
catch (const cv::Exception& e)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
info.name, e.what()));
}
catch (const std::exception& e)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
info.name, e.what()));
}
catch (...)
{
if (throwOnFail && apiPreference != CAP_ANY)
{
throw;
}
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
info.name));
}
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));
}
}
}
if (throwOnFail)
{
CV_Error_(Error::StsError, ("could not open camera %d", cameraNum));
}
return false;
}
bool VideoCapture::isOpened() const
{
return !icap.empty() ? icap->isOpened() : false;
}
String VideoCapture::getBackendName() const
{
int api = 0;
if (icap)
{
api = icap->isOpened() ? icap->getCaptureDomain() : 0;
}
CV_Assert(api != 0);
return cv::videoio_registry::getBackendName(static_cast<VideoCaptureAPIs>(api));
}
void VideoCapture::release()
{
CV_TRACE_FUNCTION();
icap.release();
}
bool VideoCapture::grab()
{
CV_INSTRUMENT_REGION();
bool ret = !icap.empty() ? icap->grabFrame() : false;
if (!ret && throwOnFail)
{
CV_Error(Error::StsError, "");
}
return ret;
}
bool VideoCapture::retrieve(OutputArray image, int channel)
{
CV_INSTRUMENT_REGION();
bool ret = false;
if (!icap.empty())
{
ret = icap->retrieveFrame(channel, image);
}
if (!ret && throwOnFail)
{
CV_Error_(Error::StsError, ("could not retrieve channel %d", channel));
}
return ret;
}
bool VideoCapture::read(OutputArray image)
{
CV_INSTRUMENT_REGION();
if (grab())
{
retrieve(image);
} else {
image.release();
}
return !image.empty();
}
VideoCapture& VideoCapture::operator >> (Mat& image)
{
#ifdef WINRT_VIDEO
// FIXIT grab/retrieve methods() should work too
if (grab())
{
if (retrieve(image))
{
std::lock_guard<std::mutex> lock(VideoioBridge::getInstance().inputBufferMutex);
VideoioBridge& bridge = VideoioBridge::getInstance();
// double buffering
bridge.swapInputBuffers();
auto p = bridge.frontInputPtr;
bridge.bIsFrameNew = false;
// needed here because setting Mat 'image' is not allowed by OutputArray in read()
Mat m(bridge.getHeight(), bridge.getWidth(), CV_8UC3, p);
image = m;
}
}
#else
read(image);
#endif
return *this;
}
VideoCapture& VideoCapture::operator >> (UMat& image)
{
CV_INSTRUMENT_REGION();
read(image);
return *this;
}
bool VideoCapture::set(int propId, double value)
{
CV_CheckNE(propId, (int)CAP_PROP_BACKEND, "Can't set read-only property");
bool ret = !icap.empty() ? icap->setProperty(propId, value) : false;
if (!ret && throwOnFail)
{
CV_Error_(Error::StsError, ("could not set prop %d = %f", propId, value));
}
return ret;
}
double VideoCapture::get(int propId) const
{
if (propId == CAP_PROP_BACKEND)
{
int api = 0;
if (icap && icap->isOpened())
{
api = icap->getCaptureDomain();
}
if (api <= 0)
{
return -1.0;
}
return static_cast<double>(api);
}
return !icap.empty() ? icap->getProperty(propId) : 0;
}
bool VideoCapture::waitAny(const std::vector<VideoCapture>& streams,
CV_OUT std::vector<int>& readyIndex, int64 timeoutNs)
{
CV_Assert(!streams.empty());
VideoCaptureAPIs backend = (VideoCaptureAPIs)streams[0].icap->getCaptureDomain();
for (size_t i = 1; i < streams.size(); ++i)
{
VideoCaptureAPIs backend_i = (VideoCaptureAPIs)streams[i].icap->getCaptureDomain();
CV_CheckEQ((int)backend, (int)backend_i, "All captures must have the same backend");
}
#if (defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO) // see cap_v4l.cpp guard
if (backend == CAP_V4L2)
{
return VideoCapture_V4L_waitAny(streams, readyIndex, timeoutNs);
}
#else
CV_UNUSED(readyIndex);
CV_UNUSED(timeoutNs);
#endif
CV_Error(Error::StsNotImplemented, "VideoCapture::waitAny() is supported by V4L backend only");
}
//=================================================================================================
VideoWriter::VideoWriter()
{}
VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize,
bool isColor)
{
open(filename, _fourcc, fps, frameSize, isColor);
}
VideoWriter::VideoWriter(const String& filename, int apiPreference, int _fourcc, double fps,
Size frameSize, bool isColor)
{
open(filename, apiPreference, _fourcc, fps, frameSize, isColor);
}
VideoWriter::VideoWriter(const cv::String& filename, int fourcc, double fps,
const cv::Size& frameSize, const std::vector<int>& params)
{
open(filename, fourcc, fps, frameSize, params);
}
VideoWriter::VideoWriter(const cv::String& filename, int apiPreference, int fourcc, double fps,
const cv::Size& frameSize, const std::vector<int>& params)
{
open(filename, apiPreference, fourcc, fps, frameSize, params);
}
void VideoWriter::release()
{
iwriter.release();
}
VideoWriter::~VideoWriter()
{
release();
}
bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize,
bool isColor)
{
return open(filename, CAP_ANY, _fourcc, fps, frameSize,
std::vector<int> { VIDEOWRITER_PROP_IS_COLOR, static_cast<int>(isColor) });
}
bool VideoWriter::open(const String& filename, int apiPreference, int _fourcc, double fps,
Size frameSize, bool isColor)
{
return open(filename, apiPreference, _fourcc, fps, frameSize,
std::vector<int> { VIDEOWRITER_PROP_IS_COLOR, static_cast<int>(isColor) });
}
bool VideoWriter::open(const String& filename, int fourcc, double fps, const Size& frameSize,
const std::vector<int>& params)
{
return open(filename, CAP_ANY, fourcc, fps, frameSize, params);
}
bool VideoWriter::open(const String& filename, int apiPreference, int fourcc, double fps,
const Size& frameSize, const std::vector<int>& params)
{
CV_INSTRUMENT_REGION();
if (isOpened())
{
release();
}
const VideoWriterParameters parameters(params);
for (const auto& info : videoio_registry::getAvailableBackends_Writer())
{
if (apiPreference == CAP_ANY || apiPreference == info.id)
{
CV_WRITER_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): trying writer with filename='%s' "
"fourcc=0x%08x fps=%g sz=%dx%d isColor=%d...",
info.name, filename.c_str(), (unsigned)fourcc, fps,
frameSize.width, frameSize.height,
parameters.get(VIDEOWRITER_PROP_IS_COLOR, true)));
CV_Assert(!info.backendFactory.empty());
const Ptr<IBackend> backend = info.backendFactory->getBackend();
if (!backend.empty())
{
try
{
iwriter = backend->createWriter(filename, fourcc, fps, frameSize, parameters);
if (!iwriter.empty())
{
CV_WRITER_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): created, isOpened=%d",
info.name, iwriter->isOpened()));
if (param_VIDEOIO_DEBUG || param_VIDEOWRITER_DEBUG)
{
for (int key: parameters.getUnused())
{
CV_LOG_WARNING(NULL,
cv::format("VIDEOIO(%s): parameter with key '%d' was unused",
info.name, key));
}
}
if (iwriter->isOpened())
{
return true;
}
iwriter.release();
}
else
{
CV_WRITER_LOG_DEBUG(NULL, cv::format("VIDEOIO(%s): can't create writer",
info.name));
}
}
catch (const cv::Exception& e)
{
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
info.name, e.what()));
}
catch (const std::exception& e)
{
CV_LOG_ERROR(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
info.name, e.what()));
}
catch (...)
{
CV_LOG_ERROR(NULL,
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
info.name));
}
}
else
{
CV_WRITER_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));
}
}
}
return false;
}
bool VideoWriter::isOpened() const
{
return !iwriter.empty();
}
bool VideoWriter::set(int propId, double value)
{
CV_CheckNE(propId, (int)CAP_PROP_BACKEND, "Can't set read-only property");
if (!iwriter.empty())
{
return iwriter->setProperty(propId, value);
}
return false;
}
double VideoWriter::get(int propId) const
{
if (propId == CAP_PROP_BACKEND)
{
int api = 0;
if (iwriter)
{
api = iwriter->getCaptureDomain();
}
return (api <= 0) ? -1. : static_cast<double>(api);
}
if (!iwriter.empty())
{
return iwriter->getProperty(propId);
}
return 0.;
}
String VideoWriter::getBackendName() const
{
int api = 0;
if (iwriter)
{
api = iwriter->getCaptureDomain();
}
CV_Assert(api != 0);
return cv::videoio_registry::getBackendName(static_cast<VideoCaptureAPIs>(api));
}
void VideoWriter::write(InputArray image)
{
CV_INSTRUMENT_REGION();
if (iwriter)
{
iwriter->write(image);
}
}
VideoWriter& VideoWriter::operator << (const Mat& image)
{
CV_INSTRUMENT_REGION();
write(image);
return *this;
}
VideoWriter& VideoWriter::operator << (const UMat& image)
{
CV_INSTRUMENT_REGION();
write(image);
return *this;
}
// FIXIT OpenCV 4.0: make inline
int VideoWriter::fourcc(char c1, char c2, char c3, char c4)
{
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
}
} // namespace cv

View File

@ -0,0 +1,770 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// Contributed by Giles Payne
#include "precomp.hpp"
#include <memory>
#include <condition_variable>
#include <mutex>
#include <thread>
#include <chrono>
#include <android/log.h>
#include <camera/NdkCameraManager.h>
#include <camera/NdkCameraError.h>
#include <camera/NdkCameraDevice.h>
#include <camera/NdkCameraMetadataTags.h>
#include <media/NdkImageReader.h>
using namespace cv;
#define TAG "NativeCamera"
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
#define MAX_BUF_COUNT 4
#define COLOR_FormatUnknown -1
#define COLOR_FormatYUV420Planar 19
#define COLOR_FormatYUV420SemiPlanar 21
#define FOURCC_BGR CV_FOURCC_MACRO('B','G','R','3')
#define FOURCC_RGB CV_FOURCC_MACRO('R','G','B','3')
#define FOURCC_GRAY CV_FOURCC_MACRO('G','R','E','Y')
#define FOURCC_NV21 CV_FOURCC_MACRO('N','V','2','1')
#define FOURCC_YV12 CV_FOURCC_MACRO('Y','V','1','2')
#define FOURCC_UNKNOWN 0xFFFFFFFF
template <typename T> class RangeValue {
public:
T min, max;
/**
* return absolute value from relative value
* * value: in percent (50 for 50%)
* */
T value(int percent) {
return static_cast<T>(min + (max - min) * percent / 100);
}
RangeValue() { min = max = static_cast<T>(0); }
bool Supported(void) const { return (min != max); }
};
static inline void deleter_ACameraManager(ACameraManager *cameraManager) {
ACameraManager_delete(cameraManager);
}
static inline void deleter_ACameraIdList(ACameraIdList *cameraIdList) {
ACameraManager_deleteCameraIdList(cameraIdList);
}
static inline void deleter_ACameraDevice(ACameraDevice *cameraDevice) {
ACameraDevice_close(cameraDevice);
}
static inline void deleter_ACameraMetadata(ACameraMetadata *cameraMetadata) {
ACameraMetadata_free(cameraMetadata);
}
static inline void deleter_AImageReader(AImageReader *imageReader) {
AImageReader_delete(imageReader);
}
static inline void deleter_ACaptureSessionOutputContainer(ACaptureSessionOutputContainer *outputContainer) {
ACaptureSessionOutputContainer_free(outputContainer);
}
static inline void deleter_ACameraCaptureSession(ACameraCaptureSession *captureSession) {
ACameraCaptureSession_close(captureSession);
}
static inline void deleter_AImage(AImage *image) {
AImage_delete(image);
}
static inline void deleter_ANativeWindow(ANativeWindow *nativeWindow) {
ANativeWindow_release(nativeWindow);
}
static inline void deleter_ACaptureSessionOutput(ACaptureSessionOutput *sessionOutput) {
ACaptureSessionOutput_free(sessionOutput);
}
static inline void deleter_ACameraOutputTarget(ACameraOutputTarget *outputTarget) {
ACameraOutputTarget_free(outputTarget);
}
static inline void deleter_ACaptureRequest(ACaptureRequest *captureRequest) {
ACaptureRequest_free(captureRequest);
}
/*
* CameraDevice callbacks
*/
static void OnDeviceDisconnect(void* /* ctx */, ACameraDevice* dev) {
std::string id(ACameraDevice_getId(dev));
LOGW("Device %s disconnected", id.c_str());
}
static void OnDeviceError(void* /* ctx */, ACameraDevice* dev, int err) {
std::string id(ACameraDevice_getId(dev));
LOGI("Camera Device Error: %#x, Device %s", err, id.c_str());
switch (err) {
case ERROR_CAMERA_IN_USE:
LOGI("Camera in use");
break;
case ERROR_CAMERA_SERVICE:
LOGI("Fatal Error occured in Camera Service");
break;
case ERROR_CAMERA_DEVICE:
LOGI("Fatal Error occured in Camera Device");
break;
case ERROR_CAMERA_DISABLED:
LOGI("Camera disabled");
break;
case ERROR_MAX_CAMERAS_IN_USE:
LOGI("System limit for maximum concurrent cameras used was exceeded");
break;
default:
LOGI("Unknown Camera Device Error: %#x", err);
}
}
enum class CaptureSessionState {
INITIALIZING, // session is ready
READY, // session is ready
ACTIVE, // session is busy
CLOSED // session was closed
};
void OnSessionClosed(void* context, ACameraCaptureSession* session);
void OnSessionReady(void* context, ACameraCaptureSession* session);
void OnSessionActive(void* context, ACameraCaptureSession* session);
void OnCaptureCompleted(void* context,
ACameraCaptureSession* session,
ACaptureRequest* request,
const ACameraMetadata* result);
void OnCaptureFailed(void* context,
ACameraCaptureSession* session,
ACaptureRequest* request,
ACameraCaptureFailure* failure);
#define CAPTURE_TIMEOUT_SECONDS 2
/**
* Range of Camera Exposure Time:
* Camera's capability range have a very long range which may be disturbing
* on camera. For this sample purpose, clamp to a range showing visible
* video on preview: 100000ns ~ 250000000ns
*/
static const long kMinExposureTime = 1000000L;
static const long kMaxExposureTime = 250000000L;
class AndroidCameraCapture : public IVideoCapture
{
int cachedIndex;
std::shared_ptr<ACameraManager> cameraManager;
std::shared_ptr<ACameraDevice> cameraDevice;
std::shared_ptr<AImageReader> imageReader;
std::shared_ptr<ACaptureSessionOutputContainer> outputContainer;
std::shared_ptr<ACaptureSessionOutput> sessionOutput;
std::shared_ptr<ANativeWindow> nativeWindow;
std::shared_ptr<ACameraOutputTarget> outputTarget;
std::shared_ptr<ACaptureRequest> captureRequest;
std::shared_ptr<ACameraCaptureSession> captureSession;
CaptureSessionState sessionState = CaptureSessionState::INITIALIZING;
int32_t frameWidth = 0;
int32_t frameHeight = 0;
int32_t colorFormat;
std::vector<uint8_t> buffer;
bool sessionOutputAdded = false;
bool targetAdded = false;
// properties
uint32_t fourCC = FOURCC_UNKNOWN;
bool settingWidth = false;
bool settingHeight = false;
int desiredWidth = 640;
int desiredHeight = 480;
bool autoExposure = true;
int64_t exposureTime = 0L;
RangeValue<int64_t> exposureRange;
int32_t sensitivity = 0;
RangeValue<int32_t> sensitivityRange;
public:
// for synchronization with NDK capture callback
bool waitingCapture = false;
bool captureSuccess = false;
std::mutex mtx;
std::condition_variable condition;
public:
AndroidCameraCapture() {}
~AndroidCameraCapture() { cleanUp(); }
ACameraDevice_stateCallbacks* GetDeviceListener() {
static ACameraDevice_stateCallbacks cameraDeviceListener = {
.onDisconnected = ::OnDeviceDisconnect,
.onError = ::OnDeviceError,
};
return &cameraDeviceListener;
}
ACameraCaptureSession_stateCallbacks sessionListener;
ACameraCaptureSession_stateCallbacks* GetSessionListener() {
sessionListener = {
.context = this,
.onClosed = ::OnSessionClosed,
.onReady = ::OnSessionReady,
.onActive = ::OnSessionActive,
};
return &sessionListener;
}
ACameraCaptureSession_captureCallbacks captureListener;
ACameraCaptureSession_captureCallbacks* GetCaptureCallback() {
captureListener = {
.context = this,
.onCaptureStarted = nullptr,
.onCaptureProgressed = nullptr,
.onCaptureCompleted = ::OnCaptureCompleted,
.onCaptureFailed = ::OnCaptureFailed,
.onCaptureSequenceCompleted = nullptr,
.onCaptureSequenceAborted = nullptr,
.onCaptureBufferLost = nullptr,
};
return &captureListener;
}
void setSessionState(CaptureSessionState newSessionState) {
this->sessionState = newSessionState;
}
bool isOpened() const CV_OVERRIDE { return imageReader.get() != nullptr && captureSession.get() != nullptr; }
int getCaptureDomain() CV_OVERRIDE { return CAP_ANDROID; }
bool grabFrame() CV_OVERRIDE
{
AImage* img;
{
std::unique_lock<std::mutex> lock(mtx);
media_status_t mStatus = AImageReader_acquireLatestImage(imageReader.get(), &img);
if (mStatus != AMEDIA_OK) {
if (mStatus == AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE) {
// this error is not fatal - we just need to wait for a buffer to become available
LOGW("No Buffer Available error occured - waiting for callback");
waitingCapture = true;
captureSuccess = false;
bool captured = condition.wait_for(lock, std::chrono::seconds(CAPTURE_TIMEOUT_SECONDS), [this]{ return captureSuccess; });
waitingCapture = false;
if (captured) {
mStatus = AImageReader_acquireLatestImage(imageReader.get(), &img);
if (mStatus != AMEDIA_OK) {
LOGE("Acquire image failed with error code: %d", mStatus);
return false;
}
} else {
LOGE("Capture failed or callback timed out");
return false;
}
} else {
LOGE("Acquire image failed with error code: %d", mStatus);
return false;
}
}
}
std::shared_ptr<AImage> image = std::shared_ptr<AImage>(img, deleter_AImage);
int32_t srcFormat = -1;
AImage_getFormat(image.get(), &srcFormat);
if (srcFormat != AIMAGE_FORMAT_YUV_420_888) {
LOGE("Incorrect image format");
return false;
}
int32_t srcPlanes = 0;
AImage_getNumberOfPlanes(image.get(), &srcPlanes);
if (srcPlanes != 3) {
LOGE("Incorrect number of planes in image data");
return false;
}
int32_t yStride, uvStride;
uint8_t *yPixel, *uPixel, *vPixel;
int32_t yLen, uLen, vLen;
int32_t uvPixelStride;
AImage_getPlaneRowStride(image.get(), 0, &yStride);
AImage_getPlaneRowStride(image.get(), 1, &uvStride);
AImage_getPlaneData(image.get(), 0, &yPixel, &yLen);
AImage_getPlaneData(image.get(), 1, &uPixel, &uLen);
AImage_getPlaneData(image.get(), 2, &vPixel, &vLen);
AImage_getPlanePixelStride(image.get(), 1, &uvPixelStride);
if ( (uvPixelStride == 2) && (vPixel == uPixel + 1) && (yLen == frameWidth * frameHeight) && (uLen == ((yLen / 2) - 1)) && (vLen == uLen) ) {
colorFormat = COLOR_FormatYUV420SemiPlanar;
if (fourCC == FOURCC_UNKNOWN) {
fourCC = FOURCC_NV21;
}
} else if ( (uvPixelStride == 1) && (vPixel == uPixel + uLen) && (yLen == frameWidth * frameHeight) && (uLen == yLen / 4) && (vLen == uLen) ) {
colorFormat = COLOR_FormatYUV420Planar;
if (fourCC == FOURCC_UNKNOWN) {
fourCC = FOURCC_YV12;
}
} else {
colorFormat = COLOR_FormatUnknown;
fourCC = FOURCC_UNKNOWN;
LOGE("Unsupported format");
return false;
}
buffer.clear();
buffer.insert(buffer.end(), yPixel, yPixel + yLen);
buffer.insert(buffer.end(), uPixel, uPixel + yLen / 2);
return true;
}
bool retrieveFrame(int, OutputArray out) CV_OVERRIDE
{
if (buffer.empty()) {
return false;
}
Mat yuv(frameHeight + frameHeight/2, frameWidth, CV_8UC1, buffer.data());
if (colorFormat == COLOR_FormatYUV420Planar) {
switch (fourCC) {
case FOURCC_BGR:
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_YV12);
break;
case FOURCC_RGB:
cv::cvtColor(yuv, out, cv::COLOR_YUV2RGB_YV12);
break;
case FOURCC_GRAY:
cv::cvtColor(yuv, out, cv::COLOR_YUV2GRAY_YV12);
break;
case FOURCC_YV12:
yuv.copyTo(out);
break;
default:
LOGE("Unexpected FOURCC value: %d", fourCC);
break;
}
} else if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
switch (fourCC) {
case FOURCC_BGR:
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_NV21);
break;
case FOURCC_RGB:
cv::cvtColor(yuv, out, cv::COLOR_YUV2RGB_NV21);
break;
case FOURCC_GRAY:
cv::cvtColor(yuv, out, cv::COLOR_YUV2GRAY_NV21);
break;
case FOURCC_NV21:
yuv.copyTo(out);
break;
default:
LOGE("Unexpected FOURCC value: %d", fourCC);
break;
}
} else {
LOGE("Unsupported video format: %d", colorFormat);
return false;
}
return true;
}
double getProperty(int property_id) const CV_OVERRIDE
{
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
return isOpened() ? frameWidth : desiredWidth;
case CV_CAP_PROP_FRAME_HEIGHT:
return isOpened() ? frameHeight : desiredHeight;
case CAP_PROP_AUTO_EXPOSURE:
return autoExposure ? 1 : 0;
case CV_CAP_PROP_EXPOSURE:
return exposureTime;
case CV_CAP_PROP_ISO_SPEED:
return sensitivity;
case CV_CAP_PROP_FOURCC:
return fourCC;
default:
break;
}
// unknown parameter or value not available
return -1;
}
bool setProperty(int property_id, double value) CV_OVERRIDE
{
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
desiredWidth = value;
settingWidth = true;
if (settingWidth && settingHeight) {
setWidthHeight();
settingWidth = false;
settingHeight = false;
}
return true;
case CV_CAP_PROP_FRAME_HEIGHT:
desiredHeight = value;
settingHeight = true;
if (settingWidth && settingHeight) {
setWidthHeight();
settingWidth = false;
settingHeight = false;
}
return true;
case CV_CAP_PROP_FOURCC:
{
uint32_t newFourCC = cvRound(value);
if (fourCC == newFourCC) {
return true;
} else {
switch (newFourCC) {
case FOURCC_BGR:
case FOURCC_RGB:
case FOURCC_GRAY:
fourCC = newFourCC;
return true;
case FOURCC_YV12:
if (colorFormat == COLOR_FormatYUV420Planar) {
fourCC = newFourCC;
return true;
} else {
LOGE("Unsupported FOURCC conversion COLOR_FormatYUV420SemiPlanar -> COLOR_FormatYUV420Planar");
return false;
}
case FOURCC_NV21:
if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
fourCC = newFourCC;
return true;
} else {
LOGE("Unsupported FOURCC conversion COLOR_FormatYUV420Planar -> COLOR_FormatYUV420SemiPlanar");
return false;
}
default:
LOGE("Unsupported FOURCC value: %d\n", fourCC);
return false;
}
}
}
case CAP_PROP_AUTO_EXPOSURE:
autoExposure = (value != 0);
if (isOpened()) {
uint8_t aeMode = autoExposure ? ACAMERA_CONTROL_AE_MODE_ON : ACAMERA_CONTROL_AE_MODE_OFF;
camera_status_t status = ACaptureRequest_setEntry_u8(captureRequest.get(), ACAMERA_CONTROL_AE_MODE, 1, &aeMode);
return status == ACAMERA_OK;
}
return true;
case CV_CAP_PROP_EXPOSURE:
if (isOpened() && exposureRange.Supported()) {
exposureTime = (int64_t)value;
LOGI("Setting CV_CAP_PROP_EXPOSURE will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
camera_status_t status = ACaptureRequest_setEntry_i64(captureRequest.get(), ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime);
return status == ACAMERA_OK;
}
return false;
case CV_CAP_PROP_ISO_SPEED:
if (isOpened() && sensitivityRange.Supported()) {
sensitivity = (int32_t)value;
LOGI("Setting CV_CAP_PROP_ISO_SPEED will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
camera_status_t status = ACaptureRequest_setEntry_i32(captureRequest.get(), ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity);
return status == ACAMERA_OK;
}
return false;
default:
break;
}
return false;
}
void setWidthHeight() {
cleanUp();
initCapture(cachedIndex);
}
// calculate a score based on how well the width and height match the desired width and height
// basically draw the 2 rectangle on top of each other and take the ratio of the non-overlapping
// area to the overlapping area
double getScore(int32_t width, int32_t height) {
double area1 = width * height;
double area2 = desiredWidth * desiredHeight;
if ((width < desiredWidth) == (height < desiredHeight)) {
return (width < desiredWidth) ? (area2 - area1)/area1 : (area1 - area2)/area2;
} else {
int32_t overlappedWidth = std::min(width, desiredWidth);
int32_t overlappedHeight = std::min(height, desiredHeight);
double overlappedArea = overlappedWidth * overlappedHeight;
return (area1 + area2 - overlappedArea)/overlappedArea;
}
}
bool initCapture(int index)
{
cachedIndex = index;
cameraManager = std::shared_ptr<ACameraManager>(ACameraManager_create(), deleter_ACameraManager);
if (!cameraManager) {
return false;
}
ACameraIdList* cameraIds = nullptr;
camera_status_t cStatus = ACameraManager_getCameraIdList(cameraManager.get(), &cameraIds);
if (cStatus != ACAMERA_OK) {
LOGE("Get camera list failed with error code: %d", cStatus);
return false;
}
std::shared_ptr<ACameraIdList> cameraIdList = std::shared_ptr<ACameraIdList>(cameraIds, deleter_ACameraIdList);
if (index < 0 || index >= cameraIds->numCameras) {
LOGE("Camera index out of range %d (Number of cameras: %d)", index, cameraIds->numCameras);
return false;
}
ACameraDevice* camera = nullptr;
cStatus = ACameraManager_openCamera(cameraManager.get(), cameraIdList.get()->cameraIds[index], GetDeviceListener(), &camera);
if (cStatus != ACAMERA_OK) {
LOGE("Open camera failed with error code: %d", cStatus);
return false;
}
cameraDevice = std::shared_ptr<ACameraDevice>(camera, deleter_ACameraDevice);
ACameraMetadata* metadata;
cStatus = ACameraManager_getCameraCharacteristics(cameraManager.get(), cameraIdList.get()->cameraIds[index], &metadata);
if (cStatus != ACAMERA_OK) {
LOGE("Get camera characteristics failed with error code: %d", cStatus);
return false;
}
std::shared_ptr<ACameraMetadata> cameraMetadata = std::shared_ptr<ACameraMetadata>(metadata, deleter_ACameraMetadata);
ACameraMetadata_const_entry entry;
ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, &entry);
double bestScore = std::numeric_limits<double>::max();
int32_t bestMatchWidth = 0;
int32_t bestMatchHeight = 0;
for (uint32_t i = 0; i < entry.count; i += 4) {
int32_t input = entry.data.i32[i + 3];
int32_t format = entry.data.i32[i + 0];
if (input) {
continue;
}
if (format == AIMAGE_FORMAT_YUV_420_888) {
int32_t width = entry.data.i32[i + 1];
int32_t height = entry.data.i32[i + 2];
if (width == desiredWidth && height == desiredHeight) {
bestMatchWidth = width;
bestMatchHeight = height;
bestScore = 0;
break;
} else {
double score = getScore(width, height);
if (score < bestScore) {
bestMatchWidth = width;
bestMatchHeight = height;
bestScore = score;
}
}
}
}
ACameraMetadata_const_entry val = { 0, };
camera_status_t status = ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE, &val);
if (status == ACAMERA_OK) {
exposureRange.min = val.data.i64[0];
if (exposureRange.min < kMinExposureTime) {
exposureRange.min = kMinExposureTime;
}
exposureRange.max = val.data.i64[1];
if (exposureRange.max > kMaxExposureTime) {
exposureRange.max = kMaxExposureTime;
}
exposureTime = exposureRange.value(2);
} else {
LOGW("Unsupported ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE");
exposureRange.min = exposureRange.max = 0l;
exposureTime = 0l;
}
status = ACameraMetadata_getConstEntry(cameraMetadata.get(), ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE, &val);
if (status == ACAMERA_OK){
sensitivityRange.min = val.data.i32[0];
sensitivityRange.max = val.data.i32[1];
sensitivity = sensitivityRange.value(2);
} else {
LOGW("Unsupported ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE");
sensitivityRange.min = sensitivityRange.max = 0;
sensitivity = 0;
}
AImageReader* reader;
media_status_t mStatus = AImageReader_new(bestMatchWidth, bestMatchHeight, AIMAGE_FORMAT_YUV_420_888, MAX_BUF_COUNT, &reader);
if (mStatus != AMEDIA_OK) {
LOGE("ImageReader creation failed with error code: %d", mStatus);
return false;
}
frameWidth = bestMatchWidth;
frameHeight = bestMatchHeight;
imageReader = std::shared_ptr<AImageReader>(reader, deleter_AImageReader);
ANativeWindow *window;
mStatus = AImageReader_getWindow(imageReader.get(), &window);
if (mStatus != AMEDIA_OK) {
LOGE("Could not get ANativeWindow: %d", mStatus);
return false;
}
nativeWindow = std::shared_ptr<ANativeWindow>(window, deleter_ANativeWindow);
ACaptureSessionOutputContainer* container;
cStatus = ACaptureSessionOutputContainer_create(&container);
if (cStatus != ACAMERA_OK) {
LOGE("CaptureSessionOutputContainer creation failed with error code: %d", cStatus);
return false;
}
outputContainer = std::shared_ptr<ACaptureSessionOutputContainer>(container, deleter_ACaptureSessionOutputContainer);
ANativeWindow_acquire(nativeWindow.get());
ACaptureSessionOutput* output;
cStatus = ACaptureSessionOutput_create(nativeWindow.get(), &output);
if (cStatus != ACAMERA_OK) {
LOGE("CaptureSessionOutput creation failed with error code: %d", cStatus);
return false;
}
sessionOutput = std::shared_ptr<ACaptureSessionOutput>(output, deleter_ACaptureSessionOutput);
ACaptureSessionOutputContainer_add(outputContainer.get(), sessionOutput.get());
sessionOutputAdded = true;
ACameraOutputTarget* target;
cStatus = ACameraOutputTarget_create(nativeWindow.get(), &target);
if (cStatus != ACAMERA_OK) {
LOGE("CameraOutputTarget creation failed with error code: %d", cStatus);
return false;
}
outputTarget = std::shared_ptr<ACameraOutputTarget>(target, deleter_ACameraOutputTarget);
ACaptureRequest * request;
cStatus = ACameraDevice_createCaptureRequest(cameraDevice.get(), TEMPLATE_PREVIEW, &request);
if (cStatus != ACAMERA_OK) {
LOGE("CaptureRequest creation failed with error code: %d", cStatus);
return false;
}
captureRequest = std::shared_ptr<ACaptureRequest>(request, deleter_ACaptureRequest);
cStatus = ACaptureRequest_addTarget(captureRequest.get(), outputTarget.get());
if (cStatus != ACAMERA_OK) {
LOGE("Add target to CaptureRequest failed with error code: %d", cStatus);
return false;
}
targetAdded = true;
ACameraCaptureSession *session;
cStatus = ACameraDevice_createCaptureSession(cameraDevice.get(), outputContainer.get(), GetSessionListener(), &session);
if (cStatus != ACAMERA_OK) {
LOGE("CaptureSession creation failed with error code: %d", cStatus);
return false;
}
captureSession = std::shared_ptr<ACameraCaptureSession>(session, deleter_ACameraCaptureSession);
uint8_t aeMode = autoExposure ? ACAMERA_CONTROL_AE_MODE_ON : ACAMERA_CONTROL_AE_MODE_OFF;
ACaptureRequest_setEntry_u8(captureRequest.get(), ACAMERA_CONTROL_AE_MODE, 1, &aeMode);
ACaptureRequest_setEntry_i32(captureRequest.get(), ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity);
if (!autoExposure) {
ACaptureRequest_setEntry_i64(captureRequest.get(), ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime);
}
cStatus = ACameraCaptureSession_setRepeatingRequest(captureSession.get(), GetCaptureCallback(), 1, &request, nullptr);
if (cStatus != ACAMERA_OK) {
LOGE("CameraCaptureSession set repeating request failed with error code: %d", cStatus);
return false;
}
return true;
}
void cleanUp() {
captureListener.context = nullptr;
sessionListener.context = nullptr;
if (sessionState == CaptureSessionState::ACTIVE) {
ACameraCaptureSession_stopRepeating(captureSession.get());
}
captureSession = nullptr;
if (targetAdded) {
ACaptureRequest_removeTarget(captureRequest.get(), outputTarget.get());
targetAdded = false;
}
captureRequest = nullptr;
outputTarget = nullptr;
if (sessionOutputAdded) {
ACaptureSessionOutputContainer_remove(outputContainer.get(), sessionOutput.get());
sessionOutputAdded = false;
}
sessionOutput = nullptr;
nativeWindow = nullptr;
outputContainer = nullptr;
cameraDevice = nullptr;
cameraManager = nullptr;
imageReader = nullptr;
}
};
/******************************** Session management *******************************/
void OnSessionClosed(void* context, ACameraCaptureSession* session) {
if (context == nullptr) return;
LOGW("session %p closed", session);
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::CLOSED);
}
void OnSessionReady(void* context, ACameraCaptureSession* session) {
if (context == nullptr) return;
LOGW("session %p ready", session);
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::READY);
}
void OnSessionActive(void* context, ACameraCaptureSession* session) {
if (context == nullptr) return;
LOGW("session %p active", session);
reinterpret_cast<AndroidCameraCapture*>(context)->setSessionState(CaptureSessionState::ACTIVE);
}
void OnCaptureCompleted(void* context,
ACameraCaptureSession* session,
ACaptureRequest* /* request */,
const ACameraMetadata* /* result */) {
if (context == nullptr) return;
LOGV("session %p capture completed", session);
AndroidCameraCapture* cameraCapture = reinterpret_cast<AndroidCameraCapture*>(context);
std::unique_lock<std::mutex> lock(cameraCapture->mtx);
if (cameraCapture->waitingCapture) {
cameraCapture->waitingCapture = false;
cameraCapture->captureSuccess = true;
cameraCapture->condition.notify_one();
}
}
void OnCaptureFailed(void* context,
ACameraCaptureSession* session,
ACaptureRequest* /* request */,
ACameraCaptureFailure* /* failure */) {
if (context == nullptr) return;
LOGV("session %p capture failed", session);
AndroidCameraCapture* cameraCapture = reinterpret_cast<AndroidCameraCapture*>(context);
std::unique_lock<std::mutex> lock(cameraCapture->mtx);
if (cameraCapture->waitingCapture) {
cameraCapture->waitingCapture = false;
cameraCapture->captureSuccess = false;
cameraCapture->condition.notify_one();
}
}
/****************** Implementation of interface functions ********************/
Ptr<IVideoCapture> cv::createAndroidCapture_cam( int index ) {
Ptr<AndroidCameraCapture> res = makePtr<AndroidCameraCapture>();
if (res && res->initCapture(index))
return res;
return Ptr<IVideoCapture>();
}

View File

@ -0,0 +1,248 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include <stdio.h>
#include <string.h>
#include <fstream>
#include <iostream>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <android/log.h>
#include "media/NdkMediaCodec.h"
#include "media/NdkMediaExtractor.h"
#define INPUT_TIMEOUT_MS 2000
#define COLOR_FormatYUV420Planar 19
#define COLOR_FormatYUV420SemiPlanar 21
using namespace cv;
#define TAG "NativeCodec"
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
static inline void deleter_AMediaExtractor(AMediaExtractor *extractor) {
AMediaExtractor_delete(extractor);
}
static inline void deleter_AMediaCodec(AMediaCodec *codec) {
AMediaCodec_stop(codec);
AMediaCodec_delete(codec);
}
static inline void deleter_AMediaFormat(AMediaFormat *format) {
AMediaFormat_delete(format);
}
class AndroidMediaNdkCapture : public IVideoCapture
{
public:
AndroidMediaNdkCapture():
sawInputEOS(false), sawOutputEOS(false),
frameWidth(0), frameHeight(0), colorFormat(0) {}
std::shared_ptr<AMediaExtractor> mediaExtractor;
std::shared_ptr<AMediaCodec> mediaCodec;
bool sawInputEOS;
bool sawOutputEOS;
int32_t frameWidth;
int32_t frameHeight;
int32_t colorFormat;
std::vector<uint8_t> buffer;
~AndroidMediaNdkCapture() { cleanUp(); }
bool decodeFrame() {
while (!sawInputEOS || !sawOutputEOS) {
if (!sawInputEOS) {
auto bufferIndex = AMediaCodec_dequeueInputBuffer(mediaCodec.get(), INPUT_TIMEOUT_MS);
LOGV("input buffer %zd", bufferIndex);
if (bufferIndex >= 0) {
size_t bufferSize;
auto inputBuffer = AMediaCodec_getInputBuffer(mediaCodec.get(), bufferIndex, &bufferSize);
auto sampleSize = AMediaExtractor_readSampleData(mediaExtractor.get(), inputBuffer, bufferSize);
if (sampleSize < 0) {
sampleSize = 0;
sawInputEOS = true;
LOGV("EOS");
}
auto presentationTimeUs = AMediaExtractor_getSampleTime(mediaExtractor.get());
AMediaCodec_queueInputBuffer(mediaCodec.get(), bufferIndex, 0, sampleSize,
presentationTimeUs, sawInputEOS ? AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM : 0);
AMediaExtractor_advance(mediaExtractor.get());
}
}
if (!sawOutputEOS) {
AMediaCodecBufferInfo info;
auto bufferIndex = AMediaCodec_dequeueOutputBuffer(mediaCodec.get(), &info, 0);
if (bufferIndex >= 0) {
size_t bufferSize = 0;
auto mediaFormat = std::shared_ptr<AMediaFormat>(AMediaCodec_getOutputFormat(mediaCodec.get()), deleter_AMediaFormat);
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_WIDTH, &frameWidth);
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_HEIGHT, &frameHeight);
AMediaFormat_getInt32(mediaFormat.get(), AMEDIAFORMAT_KEY_COLOR_FORMAT, &colorFormat);
uint8_t* codecBuffer = AMediaCodec_getOutputBuffer(mediaCodec.get(), bufferIndex, &bufferSize);
buffer = std::vector<uint8_t>(codecBuffer + info.offset, codecBuffer + bufferSize);
LOGV("colorFormat: %d", colorFormat);
LOGV("buffer size: %zu", bufferSize);
LOGV("width (frame): %d", frameWidth);
LOGV("height (frame): %d", frameHeight);
if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
LOGV("output EOS");
sawOutputEOS = true;
}
AMediaCodec_releaseOutputBuffer(mediaCodec.get(), bufferIndex, info.size != 0);
return true;
} else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED) {
LOGV("output buffers changed");
} else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
auto format = AMediaCodec_getOutputFormat(mediaCodec.get());
LOGV("format changed to: %s", AMediaFormat_toString(format));
AMediaFormat_delete(format);
} else if (bufferIndex == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
LOGV("no output buffer right now");
} else {
LOGV("unexpected info code: %zd", bufferIndex);
}
}
}
return false;
}
bool isOpened() const CV_OVERRIDE { return mediaCodec.get() != nullptr; }
int getCaptureDomain() CV_OVERRIDE { return CAP_ANDROID; }
bool grabFrame() CV_OVERRIDE
{
// clear the previous frame
buffer.clear();
return decodeFrame();
}
bool retrieveFrame(int, OutputArray out) CV_OVERRIDE
{
if (buffer.empty()) {
return false;
}
Mat yuv(frameHeight + frameHeight/2, frameWidth, CV_8UC1, buffer.data());
if (colorFormat == COLOR_FormatYUV420Planar) {
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_YV12);
} else if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
cv::cvtColor(yuv, out, cv::COLOR_YUV2BGR_NV21);
} else {
LOGE("Unsupported video format: %d", colorFormat);
return false;
}
return true;
}
double getProperty(int property_id) const CV_OVERRIDE
{
switch (property_id)
{
case CV_CAP_PROP_FRAME_WIDTH: return frameWidth;
case CV_CAP_PROP_FRAME_HEIGHT: return frameHeight;
}
return 0;
}
bool setProperty(int /* property_id */, double /* value */) CV_OVERRIDE
{
return false;
}
bool initCapture(const char * filename)
{
struct stat statBuffer;
if (stat(filename, &statBuffer) != 0) {
LOGE("failed to stat file: %s (%s)", filename, strerror(errno));
return false;
}
int fd = open(filename, O_RDONLY);
if (fd < 0) {
LOGE("failed to open file: %s %d (%s)", filename, fd, strerror(errno));
return false;
}
mediaExtractor = std::shared_ptr<AMediaExtractor>(AMediaExtractor_new(), deleter_AMediaExtractor);
if (!mediaExtractor) {
return false;
}
media_status_t err = AMediaExtractor_setDataSourceFd(mediaExtractor.get(), fd, 0, statBuffer.st_size);
close(fd);
if (err != AMEDIA_OK) {
LOGV("setDataSource error: %d", err);
return false;
}
int numtracks = AMediaExtractor_getTrackCount(mediaExtractor.get());
LOGV("input has %d tracks", numtracks);
for (int i = 0; i < numtracks; i++) {
auto format = std::shared_ptr<AMediaFormat>(AMediaExtractor_getTrackFormat(mediaExtractor.get(), i), deleter_AMediaFormat);
if (!format) {
continue;
}
const char *s = AMediaFormat_toString(format.get());
LOGV("track %d format: %s", i, s);
const char *mime;
if (!AMediaFormat_getString(format.get(), AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGV("no mime type");
} else if (!strncmp(mime, "video/", 6)) {
int32_t trackWidth, trackHeight;
AMediaFormat_getInt32(format.get(), AMEDIAFORMAT_KEY_WIDTH, &trackWidth);
AMediaFormat_getInt32(format.get(), AMEDIAFORMAT_KEY_HEIGHT, &trackHeight);
LOGV("width (track): %d", trackWidth);
LOGV("height (track): %d", trackHeight);
if (AMediaExtractor_selectTrack(mediaExtractor.get(), i) != AMEDIA_OK) {
continue;
}
mediaCodec = std::shared_ptr<AMediaCodec>(AMediaCodec_createDecoderByType(mime), deleter_AMediaCodec);
if (!mediaCodec) {
continue;
}
if (AMediaCodec_configure(mediaCodec.get(), format.get(), NULL, NULL, 0) != AMEDIA_OK) {
continue;
}
sawInputEOS = false;
sawOutputEOS = false;
if (AMediaCodec_start(mediaCodec.get()) != AMEDIA_OK) {
continue;
}
return true;
}
}
return false;
}
void cleanUp() {
sawInputEOS = true;
sawOutputEOS = true;
frameWidth = 0;
frameHeight = 0;
colorFormat = 0;
}
};
/****************** Implementation of interface functions ********************/
Ptr<IVideoCapture> cv::createAndroidCapture_file(const std::string &filename) {
Ptr<AndroidMediaNdkCapture> res = makePtr<AndroidMediaNdkCapture>();
if (res && res->initCapture(filename.c_str()))
return res;
return Ptr<IVideoCapture>();
}

View File

@ -0,0 +1,639 @@
////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//
//
// The code has been contributed by Arkadiusz Raj on 2016 Oct
//
#include "precomp.hpp"
#include "cap_interface.hpp"
#ifdef HAVE_ARAVIS_API
#include <arv.h>
//
// This file provides wrapper for using Aravis SDK library to access GigE Vision cameras.
// Aravis library (version 0.4 or 0.6) shall be installed else this code will not be included in build.
//
// To include this module invoke cmake with -DWITH_ARAVIS=ON
//
// Please obvserve, that jumbo frames are required when high fps & 16bit data is selected.
// (camera, switches/routers and the computer this software is running on)
//
// Basic usage: VideoCapture cap(<camera id>, CAP_ARAVIS);
//
// Supported properties:
// read/write
// CAP_PROP_AUTO_EXPOSURE(0|1)
// CAP_PROP_EXPOSURE(t), t in seconds
// CAP_PROP_BRIGHTNESS (ev), exposure compensation in EV for auto exposure algorithm
// CAP_PROP_GAIN(g), g >=0 or -1 for automatic control if CAP_PROP_AUTO_EXPOSURE is true
// CAP_PROP_FPS(f)
// CAP_PROP_FOURCC(type)
// CAP_PROP_BUFFERSIZE(n)
// read only:
// CAP_PROP_POS_MSEC
// CAP_PROP_FRAME_WIDTH
// CAP_PROP_FRAME_HEIGHT
//
// Supported types of data:
// video/x-raw, fourcc:'GREY' -> 8bit, 1 channel
// video/x-raw, fourcc:'Y800' -> 8bit, 1 channel
// video/x-raw, fourcc:'Y12 ' -> 12bit, 1 channel
// video/x-raw, fourcc:'Y16 ' -> 16bit, 1 channel
// video/x-raw, fourcc:'GRBG' -> 8bit, 1 channel
//
#define MODE_GREY CV_FOURCC_MACRO('G','R','E','Y')
#define MODE_Y800 CV_FOURCC_MACRO('Y','8','0','0')
#define MODE_Y12 CV_FOURCC_MACRO('Y','1','2',' ')
#define MODE_Y16 CV_FOURCC_MACRO('Y','1','6',' ')
#define MODE_GRBG CV_FOURCC_MACRO('G','R','B','G')
#define CLIP(a,b,c) (cv::max(cv::min((a),(c)),(b)))
/********************* Capturing video from camera via Aravis *********************/
class CvCaptureCAM_Aravis : public CvCapture
{
public:
CvCaptureCAM_Aravis();
virtual ~CvCaptureCAM_Aravis()
{
close();
}
virtual bool open(int);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE
{
return cv::CAP_ARAVIS;
}
protected:
bool create(int);
bool init_buffers();
void stopCapture();
bool startCapture();
bool getDeviceNameById(int id, std::string &device);
void autoExposureControl(IplImage*);
ArvCamera *camera; // Camera to control.
ArvStream *stream; // Object for video stream reception.
void *framebuffer; //
unsigned int payload; // Width x height x Pixel width.
int widthMin; // Camera sensor minimum width.
int widthMax; // Camera sensor maximum width.
int heightMin; // Camera sensor minimum height.
int heightMax; // Camera sensor maximum height.
bool fpsAvailable;
double fpsMin; // Camera minimum fps.
double fpsMax; // Camera maximum fps.
bool gainAvailable;
double gainMin; // Camera minimum gain.
double gainMax; // Camera maximum gain.
bool exposureAvailable;
double exposureMin; // Camera's minimum exposure time.
double exposureMax; // Camera's maximum exposure time.
bool controlExposure; // Flag if automatic exposure shall be done by this SW
double exposureCompensation;
bool autoGain;
double targetGrey; // Target grey value (mid grey))
bool softwareTriggered; // Flag if the camera is software triggered
bool allowAutoTrigger; // Flag that user allowed to trigger software triggered cameras automatically
gint64 *pixelFormats;
guint pixelFormatsCnt;
int num_buffers; // number of payload transmission buffers
ArvPixelFormat pixelFormat; // pixel format
int xoffset; // current frame region x offset
int yoffset; // current frame region y offset
int width; // current frame width of frame
int height; // current frame height of image
double fps; // current value of fps
double exposure; // current value of exposure time
double gain; // current value of gain
double midGrey; // current value of mid grey (brightness)
unsigned frameID; // current frame id
unsigned prevFrameID;
IplImage *frame; // local frame copy
};
CvCaptureCAM_Aravis::CvCaptureCAM_Aravis()
{
camera = NULL;
stream = NULL;
framebuffer = NULL;
payload = 0;
widthMin = widthMax = heightMin = heightMax = 0;
xoffset = yoffset = width = height = 0;
fpsMin = fpsMax = gainMin = gainMax = exposureMin = exposureMax = 0;
controlExposure = false;
exposureCompensation = 0;
targetGrey = 0;
frameID = prevFrameID = 0;
allowAutoTrigger = false;
num_buffers = 10;
frame = NULL;
}
void CvCaptureCAM_Aravis::close()
{
if(camera) {
stopCapture();
g_object_unref(camera);
camera = NULL;
}
}
bool CvCaptureCAM_Aravis::getDeviceNameById(int id, std::string &device)
{
arv_update_device_list();
if((id >= 0) && (id < (int)arv_get_n_devices())) {
device = arv_get_device_id(id);
return true;
}
return false;
}
bool CvCaptureCAM_Aravis::create( int index )
{
std::string deviceName;
if(!getDeviceNameById(index, deviceName))
return false;
return NULL != (camera = arv_camera_new(deviceName.c_str()));
}
bool CvCaptureCAM_Aravis::init_buffers()
{
if(stream) {
g_object_unref(stream);
stream = NULL;
}
if( (stream = arv_camera_create_stream(camera, NULL, NULL)) ) {
if( arv_camera_is_gv_device(camera) ) {
g_object_set(stream,
"socket-buffer", ARV_GV_STREAM_SOCKET_BUFFER_AUTO,
"socket-buffer-size", 0, NULL);
g_object_set(stream,
"packet-resend", ARV_GV_STREAM_PACKET_RESEND_NEVER, NULL);
g_object_set(stream,
"packet-timeout", (unsigned) 40000,
"frame-retention", (unsigned) 200000, NULL);
}
payload = arv_camera_get_payload (camera);
for (int i = 0; i < num_buffers; i++)
arv_stream_push_buffer(stream, arv_buffer_new(payload, NULL));
return true;
}
return false;
}
bool CvCaptureCAM_Aravis::open( int index )
{
if(create(index)) {
// fetch properties bounds
pixelFormats = arv_camera_get_available_pixel_formats(camera, &pixelFormatsCnt);
arv_camera_get_width_bounds(camera, &widthMin, &widthMax);
arv_camera_get_height_bounds(camera, &heightMin, &heightMax);
arv_camera_set_region(camera, 0, 0, widthMax, heightMax);
if( (fpsAvailable = arv_camera_is_frame_rate_available(camera)) )
arv_camera_get_frame_rate_bounds(camera, &fpsMin, &fpsMax);
if( (gainAvailable = arv_camera_is_gain_available(camera)) )
arv_camera_get_gain_bounds (camera, &gainMin, &gainMax);
if( (exposureAvailable = arv_camera_is_exposure_time_available(camera)) )
arv_camera_get_exposure_time_bounds (camera, &exposureMin, &exposureMax);
// get initial values
pixelFormat = arv_camera_get_pixel_format(camera);
exposure = exposureAvailable ? arv_camera_get_exposure_time(camera) : 0;
gain = gainAvailable ? arv_camera_get_gain(camera) : 0;
fps = arv_camera_get_frame_rate(camera);
softwareTriggered = (strcmp(arv_camera_get_trigger_source(camera), "Software") == 0);
return startCapture();
}
return false;
}
bool CvCaptureCAM_Aravis::grabFrame()
{
// remove content of previous frame
framebuffer = NULL;
if(stream) {
ArvBuffer *arv_buffer = NULL;
int max_tries = 10;
int tries = 0;
if (softwareTriggered && allowAutoTrigger) {
arv_camera_software_trigger (camera);
}
for(; tries < max_tries; tries ++) {
arv_buffer = arv_stream_timeout_pop_buffer (stream, 200000);
if (arv_buffer != NULL && arv_buffer_get_status (arv_buffer) != ARV_BUFFER_STATUS_SUCCESS) {
arv_stream_push_buffer (stream, arv_buffer);
} else break;
}
if(arv_buffer != NULL && tries < max_tries) {
size_t buffer_size;
framebuffer = (void*)arv_buffer_get_data (arv_buffer, &buffer_size);
// retrieve image size properties
arv_buffer_get_image_region (arv_buffer, &xoffset, &yoffset, &width, &height);
// retrieve image ID set by camera
frameID = arv_buffer_get_frame_id(arv_buffer);
arv_stream_push_buffer(stream, arv_buffer);
return true;
}
}
return false;
}
IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
{
if(framebuffer) {
int depth = 0, channels = 0;
switch(pixelFormat) {
case ARV_PIXEL_FORMAT_MONO_8:
case ARV_PIXEL_FORMAT_BAYER_GR_8:
depth = IPL_DEPTH_8U;
channels = 1;
break;
case ARV_PIXEL_FORMAT_MONO_12:
case ARV_PIXEL_FORMAT_MONO_16:
depth = IPL_DEPTH_16U;
channels = 1;
break;
}
if(depth && channels) {
IplImage src;
cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );
cvSetData( &src, framebuffer, src.widthStep );
if( !frame ||
frame->width != src.width ||
frame->height != src.height ||
frame->depth != src.depth ||
frame->nChannels != src.nChannels) {
cvReleaseImage( &frame );
frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
}
cvCopy(&src, frame);
if(controlExposure && ((frameID - prevFrameID) >= 3)) {
// control exposure every third frame
// i.e. skip frame taken with previous exposure setup
autoExposureControl(frame);
}
return frame;
}
}
return NULL;
}
void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
{
// Software control of exposure parameters utilizing
// automatic change of exposure time & gain
// Priority is set as follows:
// - to increase brightness, first increase time then gain
// - to decrease brightness, first decrease gain then time
cv::Mat m = cv::cvarrToMat(image);
// calc mean value for luminance or green channel
double brightness = cv::mean(m)[image->nChannels > 1 ? 1 : 0];
if(brightness < 1) brightness = 1;
// mid point - 100 % means no change
static const double dmid = 100;
// distance from optimal value as a percentage
double d = (targetGrey * dmid) / brightness;
if(d >= dmid) d = ( d + (dmid * 2) ) / 3;
prevFrameID = frameID;
midGrey = brightness;
double maxe = 1e6 / fps;
double ne = CLIP( ( exposure * d ) / ( dmid * pow(sqrt(2), -2 * exposureCompensation) ), exposureMin, maxe);
// if change of value requires intervention
if(std::fabs(d-dmid) > 5) {
double ev, ng = 0;
if(gainAvailable && autoGain) {
ev = log( d / dmid ) / log(2);
ng = CLIP( gain + ev + exposureCompensation, gainMin, gainMax);
if( ng < gain ) {
// priority 1 - reduce gain
arv_camera_set_gain(camera, (gain = ng));
return;
}
}
if(exposureAvailable) {
// priority 2 - control of exposure time
if(std::fabs(exposure - ne) > 2) {
// we have not yet reach the max-e level
arv_camera_set_exposure_time(camera, (exposure = ne) );
return;
}
}
if(gainAvailable && autoGain) {
if(exposureAvailable) {
// exposure at maximum - increase gain if possible
if(ng > gain && ng < gainMax && ne >= maxe) {
arv_camera_set_gain(camera, (gain = ng));
return;
}
} else {
// priority 3 - increase gain
arv_camera_set_gain(camera, (gain = ng));
return;
}
}
}
// if gain can be reduced - do it
if(gainAvailable && autoGain && exposureAvailable) {
if(gain > gainMin && exposure < maxe) {
exposure = CLIP( ne * 1.05, exposureMin, maxe);
arv_camera_set_exposure_time(camera, exposure );
}
}
}
double CvCaptureCAM_Aravis::getProperty( int property_id ) const
{
switch(property_id) {
case CV_CAP_PROP_POS_MSEC:
return (double)frameID/fps;
case CV_CAP_PROP_FRAME_WIDTH:
return width;
case CV_CAP_PROP_FRAME_HEIGHT:
return height;
case CV_CAP_PROP_AUTO_EXPOSURE:
return (controlExposure ? 1 : 0);
case CV_CAP_PROP_BRIGHTNESS:
return exposureCompensation;
case CV_CAP_PROP_EXPOSURE:
if(exposureAvailable) {
/* exposure time in seconds, like 1/100 s */
return arv_camera_get_exposure_time(camera) / 1e6;
}
break;
case CV_CAP_PROP_FPS:
if(fpsAvailable) {
return arv_camera_get_frame_rate(camera);
}
break;
case CV_CAP_PROP_GAIN:
if(gainAvailable) {
return arv_camera_get_gain(camera);
}
break;
case CV_CAP_PROP_FOURCC:
{
ArvPixelFormat currFormat = arv_camera_get_pixel_format(camera);
switch( currFormat ) {
case ARV_PIXEL_FORMAT_MONO_8:
return MODE_Y800;
case ARV_PIXEL_FORMAT_MONO_12:
return MODE_Y12;
case ARV_PIXEL_FORMAT_MONO_16:
return MODE_Y16;
case ARV_PIXEL_FORMAT_BAYER_GR_8:
return MODE_GRBG;
}
}
break;
case CV_CAP_PROP_BUFFERSIZE:
if(stream) {
int in, out;
arv_stream_get_n_buffers(stream, &in, &out);
// return number of available buffers in Aravis output queue
return out;
}
break;
case cv::CAP_PROP_ARAVIS_AUTOTRIGGER:
{
return allowAutoTrigger ? 1. : 0.;
}
break;
}
return -1.0;
}
bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
{
switch(property_id) {
case CV_CAP_PROP_AUTO_EXPOSURE:
if(exposureAvailable || gainAvailable) {
if( (controlExposure = (bool)(int)value) ) {
exposure = exposureAvailable ? arv_camera_get_exposure_time(camera) : 0;
gain = gainAvailable ? arv_camera_get_gain(camera) : 0;
}
}
break;
case CV_CAP_PROP_BRIGHTNESS:
exposureCompensation = CLIP(value, -3., 3.);
break;
case CV_CAP_PROP_EXPOSURE:
if(exposureAvailable) {
/* exposure time in seconds, like 1/100 s */
value *= 1e6; // -> from s to us
arv_camera_set_exposure_time(camera, exposure = CLIP(value, exposureMin, exposureMax));
break;
} else return false;
case CV_CAP_PROP_FPS:
if(fpsAvailable) {
arv_camera_set_frame_rate(camera, fps = CLIP(value, fpsMin, fpsMax));
break;
} else return false;
case CV_CAP_PROP_GAIN:
if(gainAvailable) {
if ( (autoGain = (-1 == value) ) )
break;
arv_camera_set_gain(camera, gain = CLIP(value, gainMin, gainMax));
break;
} else return false;
case CV_CAP_PROP_FOURCC:
{
ArvPixelFormat newFormat = pixelFormat;
switch((int)value) {
case MODE_GREY:
case MODE_Y800:
newFormat = ARV_PIXEL_FORMAT_MONO_8;
targetGrey = 128;
break;
case MODE_Y12:
newFormat = ARV_PIXEL_FORMAT_MONO_12;
targetGrey = 2048;
break;
case MODE_Y16:
newFormat = ARV_PIXEL_FORMAT_MONO_16;
targetGrey = 32768;
break;
case MODE_GRBG:
newFormat = ARV_PIXEL_FORMAT_BAYER_GR_8;
targetGrey = 128;
break;
}
if(newFormat != pixelFormat) {
stopCapture();
arv_camera_set_pixel_format(camera, pixelFormat = newFormat);
startCapture();
}
}
break;
case CV_CAP_PROP_BUFFERSIZE:
{
int x = (int)value;
if((x > 0) && (x != num_buffers)) {
stopCapture();
num_buffers = x;
startCapture();
}
}
break;
case cv::CAP_PROP_ARAVIS_AUTOTRIGGER:
{
allowAutoTrigger = (bool) value;
}
break;
default:
return false;
}
return true;
}
void CvCaptureCAM_Aravis::stopCapture()
{
arv_camera_stop_acquisition(camera);
if(stream) {
g_object_unref(stream);
stream = NULL;
}
}
bool CvCaptureCAM_Aravis::startCapture()
{
if(init_buffers() ) {
arv_camera_set_acquisition_mode(camera, ARV_ACQUISITION_MODE_CONTINUOUS);
arv_camera_start_acquisition(camera);
return true;
}
return false;
}
cv::Ptr<cv::IVideoCapture> cv::create_Aravis_capture( int index )
{
CvCaptureCAM_Aravis* capture = new CvCaptureCAM_Aravis;
if(capture->open(index)) {
return cv::makePtr<cv::LegacyCapture>(capture);
}
delete capture;
return NULL;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,702 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "cap_interface.hpp"
#ifdef HAVE_DC1394_2
#include <unistd.h>
#include <stdint.h>
#ifdef _WIN32
// On Windows, we have no sys/select.h, but we need to pick up
// select() which is in winsock2.
#ifndef __SYS_SELECT_H__
#define __SYS_SELECT_H__ 1
#include <winsock2.h>
#endif
#else
#include <sys/select.h>
#endif /*_WIN32*/
#include <dc1394/dc1394.h>
#include <stdlib.h>
#include <string.h>
struct CvDC1394
{
CvDC1394();
~CvDC1394();
dc1394_t* dc;
fd_set camFds;
};
CvDC1394::CvDC1394()
{
dc = dc1394_new();
FD_ZERO(&camFds);
}
CvDC1394::~CvDC1394()
{
if (dc)
dc1394_free(dc);
dc = 0;
}
static CvDC1394& getDC1394()
{
static CvDC1394 dc1394;
return dc1394;
}
class CvCaptureCAM_DC1394_v2_CPP : public CvCapture
{
public:
static int dc1394properties[CV_CAP_PROP_MAX_DC1394];
CvCaptureCAM_DC1394_v2_CPP();
virtual ~CvCaptureCAM_DC1394_v2_CPP()
{
close();
}
virtual bool open(int index);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_DC1394; }
protected:
virtual bool startCapture();
uint64_t guid;
dc1394camera_t* dcCam;
int isoSpeed;
int videoMode;
int frameWidth, frameHeight;
double fps;
int nDMABufs;
bool started;
int userMode;
enum { VIDERE = 0x5505 };
int cameraId;
bool colorStereo;
dc1394bayer_method_t bayer;
dc1394color_filter_t bayerFilter;
enum { NIMG = 2 };
IplImage *img[NIMG];
dc1394video_frame_t* frameC;
int nimages;
dc1394featureset_t feature_set;
};
//mapping CV_CAP_PROP_ to DC1394_FEATUREs
int CvCaptureCAM_DC1394_v2_CPP::dc1394properties[CV_CAP_PROP_MAX_DC1394] = {
-1, //no corresponding feature for CV_CAP_PROP_POS_MSEC
-1,-1,-1,-1,
DC1394_FEATURE_FRAME_RATE, //CV_CAP_PROP_FPS - fps can be set for format 7 only!
-1,-1,-1,-1,
DC1394_FEATURE_BRIGHTNESS, //CV_CAP_PROP_BRIGHTNESS 10
-1,
DC1394_FEATURE_SATURATION, //CV_CAP_PROP_SATURATION
DC1394_FEATURE_HUE,
DC1394_FEATURE_GAIN,
DC1394_FEATURE_SHUTTER, //CV_CAP_PROP_EXPOSURE
-1, //CV_CAP_PROP_CONVERT_RGB
DC1394_FEATURE_WHITE_BALANCE, //corresponds to CV_CAP_PROP_WHITE_BALANCE_BLUE_U and CV_CAP_PROP_WHITE_BALANCE_RED_V, see set function to check these props are set
-1,-1,
DC1394_FEATURE_SHARPNESS, //20
DC1394_FEATURE_EXPOSURE, //CV_CAP_PROP_AUTO_EXPOSURE - this is auto exposure according to the IIDC standard
DC1394_FEATURE_GAMMA, //CV_CAP_PROP_GAMMA
DC1394_FEATURE_TEMPERATURE, //CV_CAP_PROP_TEMPERATURE
DC1394_FEATURE_TRIGGER, //CV_CAP_PROP_TRIGGER
DC1394_FEATURE_TRIGGER_DELAY, //CV_CAP_PROP_TRIGGER_DELAY
DC1394_FEATURE_WHITE_BALANCE, //CV_CAP_PROP_WHITE_BALANCE_RED_V
DC1394_FEATURE_ZOOM, //CV_CAP_PROP_ZOOM
DC1394_FEATURE_FOCUS, //CV_CAP_PROP_FOCUS
-1 //CV_CAP_PROP_GUID
};
CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
{
guid = 0;
dcCam = 0;
isoSpeed = 400;
fps = 15;
// Reset the value here to 1 in order to ensure only a single frame is stored in the buffer!
nDMABufs = 8;
started = false;
cameraId = 0;
colorStereo = false;
bayer = DC1394_BAYER_METHOD_BILINEAR;
bayerFilter = DC1394_COLOR_FILTER_GRBG;
frameWidth = 640;
frameHeight = 480;
for (int i = 0; i < NIMG; i++)
img[i] = 0;
frameC = 0;
nimages = 1;
userMode = -1;
}
bool CvCaptureCAM_DC1394_v2_CPP::startCapture()
{
int i;
int code = 0;
if (!dcCam)
return false;
if (isoSpeed > 0)
{
// if capable set operation mode to 1394b for iso speeds above 400
if (isoSpeed > 400 && dcCam->bmode_capable == DC1394_TRUE)
{
dc1394_video_set_operation_mode(dcCam, DC1394_OPERATION_MODE_1394B);
}
code = dc1394_video_set_iso_speed(dcCam,
isoSpeed <= 100 ? DC1394_ISO_SPEED_100 :
isoSpeed <= 200 ? DC1394_ISO_SPEED_200 :
isoSpeed <= 400 ? DC1394_ISO_SPEED_400 :
isoSpeed <= 800 ? DC1394_ISO_SPEED_800 :
isoSpeed == 1600 ? DC1394_ISO_SPEED_1600 :
DC1394_ISO_SPEED_3200);
}
dc1394video_modes_t videoModes;
dc1394_video_get_supported_modes(dcCam, &videoModes);
// should a specific mode be used
while (userMode >= 0) // 'if' semantic, no real loop here
{
dc1394video_mode_t wantedMode;
if (userMode < (int)videoModes.num)
{
// set mode from number, for example the second supported mode, i.e userMode = 1
wantedMode = videoModes.modes[userMode];
}
else if ((userMode >= DC1394_VIDEO_MODE_MIN) && (userMode <= DC1394_VIDEO_MODE_MAX))
{
// set modes directly from DC134 constants (from dc1394video_mode_t)
//search for wanted mode, to check if camera supports it
int j = 0;
while ((j < (int)videoModes.num) && videoModes.modes[j] != userMode)
{
j++;
}
if (!(j < (int)videoModes.num))
{
userMode = -1; // wanted mode not supported, search for best mode
break;
}
wantedMode = videoModes.modes[j];
}
else
{
userMode = -1; // wanted mode not supported, search for best mode
break;
}
//if userMode is available: set it and update size
{
code = dc1394_video_set_mode(dcCam, wantedMode);
uint32_t width = 0, height = 0;
dc1394_get_image_size_from_video_mode(dcCam, wantedMode, &width, &height);
frameWidth = (int)width;
frameHeight = (int)height;
}
break;
}
if (userMode == -1 && (frameWidth > 0 || frameHeight > 0))
{
dc1394video_mode_t bestMode = (dc1394video_mode_t)(-1);
for (i = 0; i < (int)videoModes.num; i++)
{
dc1394video_mode_t mode = videoModes.modes[i];
if (mode >= DC1394_VIDEO_MODE_FORMAT7_MIN && mode <= DC1394_VIDEO_MODE_FORMAT7_MAX)
continue;
int pref = -1;
dc1394color_coding_t colorCoding;
dc1394_get_color_coding_from_video_mode(dcCam, mode, &colorCoding);
uint32_t width, height;
dc1394_get_image_size_from_video_mode(dcCam, mode, &width, &height);
if ((int)width == frameWidth || (int)height == frameHeight)
{
if (colorCoding == DC1394_COLOR_CODING_RGB8 ||
colorCoding == DC1394_COLOR_CODING_RAW8)
{
bestMode = mode;
break;
}
if (colorCoding == DC1394_COLOR_CODING_YUV411 ||
colorCoding == DC1394_COLOR_CODING_YUV422 ||
(colorCoding == DC1394_COLOR_CODING_YUV444 &&
pref < 1))
{
bestMode = mode;
pref = 1;
break;
}
if (colorCoding == DC1394_COLOR_CODING_MONO8)
{
bestMode = mode;
pref = 0;
}
}
}
if ((int)bestMode >= 0)
code = dc1394_video_set_mode(dcCam, bestMode);
}
if (fps > 0)
{
dc1394video_mode_t mode;
dc1394framerates_t framerates;
double minDiff = DBL_MAX;
dc1394framerate_t bestFps = (dc1394framerate_t) - 1;
dc1394_video_get_mode(dcCam, &mode);
dc1394_video_get_supported_framerates(dcCam, mode, &framerates);
for (i = 0; i < (int)framerates.num; i++)
{
dc1394framerate_t ifps = framerates.framerates[i];
double fps1 = (1 << (ifps - DC1394_FRAMERATE_1_875)) * 1.875;
double diff = fabs(fps1 - fps);
if (diff < minDiff)
{
minDiff = diff;
bestFps = ifps;
}
}
if ((int)bestFps >= 0)
code = dc1394_video_set_framerate(dcCam, bestFps);
}
if (cameraId == VIDERE)
{
bayerFilter = DC1394_COLOR_FILTER_GBRG;
nimages = 2;
uint32_t value = 0;
dc1394_get_control_register(dcCam, 0x50c, &value);
colorStereo = (value & 0x80000000) != 0;
}
code = dc1394_capture_setup(dcCam, nDMABufs, DC1394_CAPTURE_FLAGS_DEFAULT);
if (code >= 0)
{
FD_SET(dc1394_capture_get_fileno(dcCam), &getDC1394().camFds);
dc1394_video_set_transmission(dcCam, DC1394_ON);
started = true;
}
return code >= 0;
}
bool CvCaptureCAM_DC1394_v2_CPP::open(int index)
{
bool result = false;
dc1394camera_list_t* cameraList = 0;
dc1394error_t err;
close();
if (!getDC1394().dc)
goto _exit_;
err = dc1394_camera_enumerate(getDC1394().dc, &cameraList);
if (err < 0 || !cameraList || (unsigned)index >= (unsigned)cameraList->num)
goto _exit_;
guid = cameraList->ids[index].guid;
dcCam = dc1394_camera_new(getDC1394().dc, guid);
if (!dcCam)
goto _exit_;
cameraId = dcCam->vendor_id;
//get all features
if (dc1394_feature_get_all(dcCam,&feature_set) == DC1394_SUCCESS)
result = true;
else
result = false;
_exit_:
if (cameraList)
dc1394_camera_free_list(cameraList);
return result;
}
void CvCaptureCAM_DC1394_v2_CPP::close()
{
if (dcCam)
{
// check for fileno valid before using
int fileno=dc1394_capture_get_fileno(dcCam);
if (fileno>=0 && FD_ISSET(fileno, &getDC1394().camFds))
FD_CLR(fileno, &getDC1394().camFds);
dc1394_video_set_transmission(dcCam, DC1394_OFF);
dc1394_capture_stop(dcCam);
dc1394_camera_free(dcCam);
dcCam = 0;
started = false;
}
for (int i = 0; i < NIMG; i++)
{
cvReleaseImage(&img[i]);
}
if (frameC)
{
if (frameC->image)
free(frameC->image);
free(frameC);
frameC = 0;
}
}
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
bool code = false, isColor;
dc1394video_frame_t *dcFrame = 0, *fs = 0;
int i, nch;
if (!dcCam || (!started && !startCapture()))
return false;
dc1394_capture_dequeue(dcCam, policy, &dcFrame);
if (!dcFrame)
return false;
if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
{
goto _exit_;
}
isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;
if (nimages == 2)
{
fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));
dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
dcFrame = 0;
if (!fs->image)
goto _exit_;
isColor = colorStereo;
}
nch = isColor ? 3 : 1;
for (i = 0; i < nimages; i++)
{
IplImage fhdr;
dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
f.size[1] /= nimages;
f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
if (isColor)
{
if (!frameC)
frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
if (nimages == 1)
{
dc1394_convert_frames(&f, frameC);
dc1394_capture_enqueue(dcCam, dcFrame);
dcFrame = 0;
}
else
{
f.color_filter = bayerFilter;
dc1394_debayer_frames(&f, frameC, bayer);
}
fc = frameC;
}
if (!img[i])
img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
cvSetData(&fhdr, fc->image, fc->size[0]*nch);
// Swap R&B channels:
if (nch==3)
{
cv::Mat tmp = cv::cvarrToMat(&fhdr);
cv::cvtColor(tmp, tmp, cv::COLOR_RGB2BGR, tmp.channels());
}
cvCopy(&fhdr, img[i]);
}
code = true;
_exit_:
if (dcFrame)
dc1394_capture_enqueue(dcCam, dcFrame);
if (fs)
{
if (fs->image)
free(fs->image);
free(fs);
}
return code;
}
IplImage* CvCaptureCAM_DC1394_v2_CPP::retrieveFrame(int idx)
{
return 0 <= idx && idx < nimages ? img[idx] : 0;
}
double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const
{
// Simulate mutable (C++11-like) member variable
dc1394featureset_t& fs = const_cast<dc1394featureset_t&>(feature_set);
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
return frameWidth ? frameWidth : frameHeight*4 / 3;
case CV_CAP_PROP_FRAME_HEIGHT:
return frameHeight ? frameHeight : frameWidth*3 / 4;
case CV_CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_RECTIFICATION:
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
return 0;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
if (dc1394_feature_whitebalance_get_value(dcCam,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value;
break;
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
if (dc1394_feature_whitebalance_get_value(dcCam,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value;
break;
case CV_CAP_PROP_GUID:
//the least 32 bits are enough to identify the camera
return (double) (guid & 0x00000000FFFFFFFF);
break;
case CV_CAP_PROP_MODE:
return (double) userMode;
break;
case CV_CAP_PROP_ISO_SPEED:
return (double) isoSpeed;
case CV_CAP_PROP_BUFFERSIZE:
return (double) nDMABufs;
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
//&& feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].on_off_capable)
if (dc1394_feature_get_value(dcCam,(dc1394feature_t)dc1394properties[propId],
&fs.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value) == DC1394_SUCCESS)
return feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value;
}
return -1; // the value of the feature can be 0, so returning 0 as an error is wrong
}
bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
{
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
if(started)
return false;
frameWidth = cvRound(value);
frameHeight = 0;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
if(started)
return false;
frameWidth = 0;
frameHeight = cvRound(value);
break;
case CV_CAP_PROP_FPS:
if(started)
return false;
fps = value;
break;
case CV_CAP_PROP_RECTIFICATION:
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
return false;
case CV_CAP_PROP_MODE:
if(started)
return false;
userMode = cvRound(value);
break;
case CV_CAP_PROP_ISO_SPEED:
if(started)
return false;
isoSpeed = cvRound(value);
break;
case CV_CAP_PROP_BUFFERSIZE:
if(started)
return false;
nDMABufs = value;
break;
//The code below is based on coriander, callbacks.c:795, refer to case RANGE_MENU_MAN :
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
{
//get the corresponding feature from property-id
dc1394feature_info_t *act_feature = &feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN];
if (cvRound(value) == CV_CAP_PROP_DC1394_OFF)
{
if ( (act_feature->on_off_capable)
&& (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_OFF) == DC1394_SUCCESS))
{
act_feature->is_on=DC1394_OFF;
return true;
}
return false;
}
//try to turn the feature ON, feature can be ON and at the same time it can be not capable to change state to OFF
if ( (act_feature->is_on == DC1394_OFF) && (act_feature->on_off_capable == DC1394_TRUE))
{
if (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_ON) == DC1394_SUCCESS)
feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].is_on=DC1394_ON;
}
//turn off absolute mode - the actual value will be stored in the value field,
//otherwise it would be stored into CSR (control and status register) absolute value
if (act_feature->absolute_capable
&& dc1394_feature_set_absolute_control(dcCam, act_feature->id, DC1394_OFF) !=DC1394_SUCCESS)
return false;
else
act_feature->abs_control=DC1394_OFF;
//set AUTO
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_AUTO)
{
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_AUTO)!=DC1394_SUCCESS)
return false;
act_feature->current_mode=DC1394_FEATURE_MODE_AUTO;
return true;
}
//set ONE PUSH
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)
{
//have to set to manual first, otherwise one push will be ignored (AVT manual 4.3.0 p. 115)
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_ONE_PUSH_AUTO)!=DC1394_SUCCESS)
return false;
//will change to
act_feature->current_mode=DC1394_FEATURE_MODE_ONE_PUSH_AUTO;
return true;
}
//set the feature to MANUAL mode,
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_MANUAL)!=DC1394_SUCCESS)
return false;
else
act_feature->current_mode=DC1394_FEATURE_MODE_MANUAL;
// if property is one of the white balance features treat it in different way
if (propId == CV_CAP_PROP_WHITE_BALANCE_BLUE_U)
{
if (dc1394_feature_whitebalance_set_value(dcCam,cvRound(value), act_feature->RV_value)!=DC1394_SUCCESS)
return false;
else
{
act_feature->BU_value = cvRound(value);
return true;
}
}
if (propId == CV_CAP_PROP_WHITE_BALANCE_RED_V)
{
if (dc1394_feature_whitebalance_set_value(dcCam, act_feature->BU_value, cvRound(value))!=DC1394_SUCCESS)
return false;
else
{
act_feature->RV_value = cvRound(value);
return true;
}
}
//first: check boundaries
if (value < act_feature->min)
{
value = act_feature->min;
}
else if (value > act_feature->max)
{
value = act_feature->max;
}
if (dc1394_feature_set_value(dcCam, act_feature->id, cvRound(value)) == DC1394_SUCCESS)
{
act_feature->value = value;
return true;
}
}
return false;
}
return true;
}
cv::Ptr<cv::IVideoCapture> cv::create_DC1394_capture(int index)
{
CvCaptureCAM_DC1394_v2_CPP* capture = new CvCaptureCAM_DC1394_v2_CPP;
if (capture->open(index))
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
//M*/
#ifndef _CAP_DSHOW_HPP_
#define _CAP_DSHOW_HPP_
#ifdef HAVE_DSHOW
class videoInput;
namespace cv
{
class VideoCapture_DShow : public IVideoCapture
{
public:
VideoCapture_DShow(int index);
virtual ~VideoCapture_DShow();
virtual double getProperty(int propIdx) const CV_OVERRIDE;
virtual bool setProperty(int propIdx, double propVal) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE;
virtual bool isOpened() const;
protected:
void open(int index);
void close();
int m_index, m_width, m_height, m_fourcc;
int m_widthSet, m_heightSet;
bool m_convertRGBSet;
static videoInput g_VI;
};
}
#endif //HAVE_DSHOW
#endif //_CAP_DSHOW_HPP_

View File

@ -0,0 +1,656 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if !defined(HAVE_FFMPEG)
#error "Build configuration error"
#endif
#include <string>
#include "cap_ffmpeg_impl.hpp"
// TODO drop legacy code
//#define icvCreateFileCapture_FFMPEG_p cvCreateFileCapture_FFMPEG
#define icvReleaseCapture_FFMPEG_p cvReleaseCapture_FFMPEG
#define icvGrabFrame_FFMPEG_p cvGrabFrame_FFMPEG
#define icvRetrieveFrame_FFMPEG_p cvRetrieveFrame_FFMPEG
#define icvSetCaptureProperty_FFMPEG_p cvSetCaptureProperty_FFMPEG
#define icvGetCaptureProperty_FFMPEG_p cvGetCaptureProperty_FFMPEG
#define icvCreateVideoWriter_FFMPEG_p cvCreateVideoWriter_FFMPEG
#define icvReleaseVideoWriter_FFMPEG_p cvReleaseVideoWriter_FFMPEG
#define icvWriteFrame_FFMPEG_p cvWriteFrame_FFMPEG
namespace cv {
namespace {
class CvCapture_FFMPEG_proxy CV_FINAL : public cv::IVideoCapture
{
public:
CvCapture_FFMPEG_proxy() { ffmpegCapture = 0; }
CvCapture_FFMPEG_proxy(const cv::String& filename, const cv::VideoCaptureParameters& params)
: ffmpegCapture(NULL)
{
open(filename, params);
}
virtual ~CvCapture_FFMPEG_proxy() { close(); }
virtual double getProperty(int propId) const CV_OVERRIDE
{
return ffmpegCapture ? icvGetCaptureProperty_FFMPEG_p(ffmpegCapture, propId) : 0;
}
virtual bool setProperty(int propId, double value) CV_OVERRIDE
{
return ffmpegCapture ? icvSetCaptureProperty_FFMPEG_p(ffmpegCapture, propId, value)!=0 : false;
}
virtual bool grabFrame() CV_OVERRIDE
{
return ffmpegCapture ? icvGrabFrame_FFMPEG_p(ffmpegCapture)!=0 : false;
}
virtual bool retrieveFrame(int, cv::OutputArray frame) CV_OVERRIDE
{
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0;
if (!ffmpegCapture)
return false;
// if UMat, try GPU to GPU copy using OpenCL extensions
if (frame.isUMat()) {
if (ffmpegCapture->retrieveHWFrame(frame)) {
return true;
}
}
if (!icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn))
return false;
cv::Mat tmp(height, width, CV_MAKETYPE(CV_8U, cn), data, step);
this->rotateFrame(tmp);
tmp.copyTo(frame);
return true;
}
bool open(const cv::String& filename, const cv::VideoCaptureParameters& params)
{
close();
ffmpegCapture = cvCreateFileCaptureWithParams_FFMPEG(filename.c_str(), params);
return ffmpegCapture != 0;
}
void close()
{
if (ffmpegCapture)
icvReleaseCapture_FFMPEG_p( &ffmpegCapture );
CV_Assert(ffmpegCapture == 0);
ffmpegCapture = 0;
}
virtual bool isOpened() const CV_OVERRIDE { return ffmpegCapture != 0; }
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_FFMPEG; }
protected:
CvCapture_FFMPEG* ffmpegCapture;
void rotateFrame(cv::Mat &mat) const
{
bool rotation_auto = 0 != getProperty(CAP_PROP_ORIENTATION_AUTO);
int rotation_angle = static_cast<int>(getProperty(CAP_PROP_ORIENTATION_META));
if(!rotation_auto || rotation_angle%360 == 0)
{
return;
}
cv::RotateFlags flag;
if(rotation_angle == 90 || rotation_angle == -270) { // Rotate clockwise 90 degrees
flag = cv::ROTATE_90_CLOCKWISE;
} else if(rotation_angle == 270 || rotation_angle == -90) { // Rotate clockwise 270 degrees
flag = cv::ROTATE_90_COUNTERCLOCKWISE;
} else if(rotation_angle == 180 || rotation_angle == -180) { // Rotate clockwise 180 degrees
flag = cv::ROTATE_180;
} else { // Unsupported rotation
return;
}
cv::rotate(mat, mat, flag);
}
};
} // namespace
cv::Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &filename, const cv::VideoCaptureParameters& params)
{
cv::Ptr<CvCapture_FFMPEG_proxy> capture = cv::makePtr<CvCapture_FFMPEG_proxy>(filename, params);
if (capture && capture->isOpened())
return capture;
return cv::Ptr<cv::IVideoCapture>();
}
namespace {
class CvVideoWriter_FFMPEG_proxy CV_FINAL :
public cv::IVideoWriter
{
public:
CvVideoWriter_FFMPEG_proxy() { ffmpegWriter = 0; }
CvVideoWriter_FFMPEG_proxy(const cv::String& filename, int fourcc, double fps, cv::Size frameSize, const VideoWriterParameters& params) { ffmpegWriter = 0; open(filename, fourcc, fps, frameSize, params); }
virtual ~CvVideoWriter_FFMPEG_proxy() { close(); }
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_FFMPEG; }
virtual void write(cv::InputArray image ) CV_OVERRIDE
{
if(!ffmpegWriter)
return;
CV_Assert(image.depth() == CV_8U);
// if UMat, try GPU to GPU copy using OpenCL extensions
if (image.isUMat()) {
if (ffmpegWriter->writeHWFrame(image)) {
return;
}
}
icvWriteFrame_FFMPEG_p(ffmpegWriter, (const uchar*)image.getMat().ptr(), (int)image.step(), image.cols(), image.rows(), image.channels(), 0);
}
virtual bool open( const cv::String& filename, int fourcc, double fps, cv::Size frameSize, const VideoWriterParameters& params )
{
close();
ffmpegWriter = cvCreateVideoWriterWithParams_FFMPEG( filename.c_str(), fourcc, fps, frameSize.width, frameSize.height, params );
return ffmpegWriter != 0;
}
virtual void close()
{
if (ffmpegWriter)
icvReleaseVideoWriter_FFMPEG_p( &ffmpegWriter );
CV_Assert(ffmpegWriter == 0);
ffmpegWriter = 0;
}
virtual double getProperty(int propId) const CV_OVERRIDE {
if(!ffmpegWriter)
return 0;
return ffmpegWriter->getProperty(propId);
}
virtual bool setProperty(int, double) CV_OVERRIDE { return false; }
virtual bool isOpened() const CV_OVERRIDE { return ffmpegWriter != 0; }
protected:
CvVideoWriter_FFMPEG* ffmpegWriter;
};
} // namespace
cv::Ptr<cv::IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& filename, int fourcc,
double fps, const cv::Size& frameSize,
const VideoWriterParameters& params)
{
cv::Ptr<CvVideoWriter_FFMPEG_proxy> writer = cv::makePtr<CvVideoWriter_FFMPEG_proxy>(filename, fourcc, fps, frameSize, params);
if (writer && writer->isOpened())
return writer;
return cv::Ptr<cv::IVideoWriter>();
}
} // namespace
//==================================================================================================
#if defined(BUILD_PLUGIN)
#define NEW_PLUGIN
#ifndef NEW_PLUGIN
#define ABI_VERSION 0
#define API_VERSION 0
#include "plugin_api.hpp"
#else
#define CAPTURE_ABI_VERSION 1
#define CAPTURE_API_VERSION 1
#include "plugin_capture_api.hpp"
#define WRITER_ABI_VERSION 1
#define WRITER_API_VERSION 1
#include "plugin_writer_api.hpp"
#endif
namespace cv {
static
CvResult CV_API_CALL cv_capture_open(const char* filename, int camera_index, CV_OUT CvPluginCapture* handle)
{
if (!handle)
return CV_ERROR_FAIL;
*handle = NULL;
if (!filename)
return CV_ERROR_FAIL;
CV_UNUSED(camera_index);
CvCapture_FFMPEG_proxy *cap = 0;
try
{
cap = new CvCapture_FFMPEG_proxy(filename, cv::VideoCaptureParameters());
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
}
if (cap)
delete cap;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_open_with_params(
const char* filename, int camera_index,
int* params, unsigned n_params,
CV_OUT CvPluginCapture* handle
)
{
if (!handle)
return CV_ERROR_FAIL;
*handle = NULL;
if (!filename)
return CV_ERROR_FAIL;
CV_UNUSED(camera_index);
CvCapture_FFMPEG_proxy *cap = 0;
try
{
cv::VideoCaptureParameters parameters(params, n_params);
cap = new CvCapture_FFMPEG_proxy(filename, parameters);
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
}
if (cap)
delete cap;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
{
if (!handle)
return CV_ERROR_FAIL;
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
delete instance;
return CV_ERROR_OK;
}
static
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
{
if (!handle)
return CV_ERROR_FAIL;
if (!val)
return CV_ERROR_FAIL;
try
{
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
*val = instance->getProperty(prop);
return CV_ERROR_OK;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
#ifndef NEW_PLUGIN
static
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
Mat img;
// TODO: avoid unnecessary copying
if (instance->retrieveFrame(stream_idx, img))
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
return CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
#else
static
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_capture_retrieve_cb_t callback, void* userdata)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
CvCapture_FFMPEG_proxy* instance = (CvCapture_FFMPEG_proxy*)handle;
Mat img;
// TODO: avoid unnecessary copying
if (instance->retrieveFrame(stream_idx, img))
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.type(), userdata);
return CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
#endif
static
CvResult CV_API_CALL cv_writer_open_with_params(
const char* filename, int fourcc, double fps, int width, int height,
int* params, unsigned n_params,
CV_OUT CvPluginWriter* handle)
{
Size sz(width, height);
CvVideoWriter_FFMPEG_proxy* wrt = 0;
try
{
VideoWriterParameters parameters(params, n_params);
wrt = new CvVideoWriter_FFMPEG_proxy(filename, fourcc, fps, sz, parameters);
if(wrt && wrt->isOpened())
{
*handle = (CvPluginWriter)wrt;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
}
if (wrt)
delete wrt;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_writer_open(const char* filename, int fourcc, double fps, int width, int height, int isColor,
CV_OUT CvPluginWriter* handle)
{
int params[2] = { VIDEOWRITER_PROP_IS_COLOR, isColor };
return cv_writer_open_with_params(filename, fourcc, fps, width, height, params, 1, handle);
}
static
CvResult CV_API_CALL cv_writer_release(CvPluginWriter handle)
{
if (!handle)
return CV_ERROR_FAIL;
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
delete instance;
return CV_ERROR_OK;
}
static
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter handle, int prop, CV_OUT double* val)
{
if (!handle)
return CV_ERROR_FAIL;
if (!val)
return CV_ERROR_FAIL;
try
{
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
*val = instance->getProperty(prop);
return CV_ERROR_OK;
}
catch (...)
{
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
{
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_writer_write(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
CvVideoWriter_FFMPEG_proxy* instance = (CvVideoWriter_FFMPEG_proxy*)handle;
Mat img(Size(width, height), CV_MAKETYPE(CV_8U, cn), const_cast<uchar*>(data), step);
instance->write(img);
return CV_ERROR_OK;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
} // namespace
#ifndef NEW_PLUGIN
static const OpenCV_VideoIO_Plugin_API_preview plugin_api =
{
{
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
"FFmpeg OpenCV Video I/O plugin"
},
{
/* 1*/CAP_FFMPEG,
/* 2*/cv_capture_open,
/* 3*/cv_capture_release,
/* 4*/cv_capture_get_prop,
/* 5*/cv_capture_set_prop,
/* 6*/cv_capture_grab,
/* 7*/cv_capture_retrieve,
/* 8*/cv_writer_open,
/* 9*/cv_writer_release,
/* 10*/cv_writer_get_prop,
/* 11*/cv_writer_set_prop,
/* 12*/cv_writer_write
}
};
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
{
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
return &plugin_api;
return NULL;
}
#else // NEW_PLUGIN
static const OpenCV_VideoIO_Capture_Plugin_API capture_plugin_api =
{
{
sizeof(OpenCV_VideoIO_Capture_Plugin_API), CAPTURE_ABI_VERSION, CAPTURE_API_VERSION,
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
"FFmpeg OpenCV Video I/O Capture plugin"
},
{
/* 1*/CAP_FFMPEG,
/* 2*/cv_capture_open,
/* 3*/cv_capture_release,
/* 4*/cv_capture_get_prop,
/* 5*/cv_capture_set_prop,
/* 6*/cv_capture_grab,
/* 7*/cv_capture_retrieve,
},
{
/* 8*/cv_capture_open_with_params,
}
};
const OpenCV_VideoIO_Capture_Plugin_API* opencv_videoio_capture_plugin_init_v1(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
{
if (requested_abi_version == CAPTURE_ABI_VERSION && requested_api_version <= CAPTURE_API_VERSION)
return &capture_plugin_api;
return NULL;
}
static const OpenCV_VideoIO_Writer_Plugin_API writer_plugin_api =
{
{
sizeof(OpenCV_VideoIO_Writer_Plugin_API), WRITER_ABI_VERSION, WRITER_API_VERSION,
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
"FFmpeg OpenCV Video I/O Writer plugin"
},
{
/* 1*/CAP_FFMPEG,
/* 2*/cv_writer_open,
/* 3*/cv_writer_release,
/* 4*/cv_writer_get_prop,
/* 5*/cv_writer_set_prop,
/* 6*/cv_writer_write
},
{
/* 7*/cv_writer_open_with_params
}
};
const OpenCV_VideoIO_Writer_Plugin_API* opencv_videoio_writer_plugin_init_v1(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
{
if (requested_abi_version == WRITER_ABI_VERSION && requested_api_version <= WRITER_API_VERSION)
return &writer_plugin_api;
return NULL;
}
#endif // NEW_PLUGIN
#endif // BUILD_PLUGIN

View File

@ -0,0 +1,993 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020-2021 Intel Corporation
#include "opencv2/videoio.hpp"
#ifdef HAVE_OPENCL
#include "opencv2/core/ocl.hpp"
#endif
#if defined(__OPENCV_BUILD) && !defined(BUILD_PLUGIN) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
#include "cvconfig.h"
#endif
#include <sstream>
#ifdef HAVE_D3D11
#define D3D11_NO_HELPERS
#include <d3d11.h>
#include <codecvt>
#include "opencv2/core/directx.hpp"
#ifdef HAVE_OPENCL
#include <CL/cl_d3d11.h>
#endif
#endif // HAVE_D3D11
#ifdef HAVE_VA
#include <va/va_backend.h>
#ifdef HAVE_VA_INTEL
#include "opencv2/core/va_intel.hpp"
#ifndef CL_TARGET_OPENCL_VERSION
#define CL_TARGET_OPENCL_VERSION 120
#endif
#ifdef HAVE_VA_INTEL_OLD_HEADER
#include <CL/va_ext.h>
#else
#include <CL/cl_va_api_media_sharing_intel.h>
#endif
#endif
#endif // HAVE_VA
// FFMPEG "C" headers
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/hwcontext.h>
#ifdef HAVE_D3D11
#include <libavutil/hwcontext_d3d11va.h>
#endif
#ifdef HAVE_VA
#include <libavutil/hwcontext_vaapi.h>
#endif
#ifdef HAVE_MFX // dependency only on MFX header files, no linkage dependency
#include <libavutil/hwcontext_qsv.h>
#endif
}
#define HW_DEFAULT_POOL_SIZE 32
#define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
using namespace cv;
static AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *),
const char *disabled_codecs, AVPixelFormat *hw_pix_fmt);
static AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl);
static AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format);
static AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt);
static VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type);
static
const char* getVideoAccelerationName(VideoAccelerationType va_type)
{
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: return "none";
case VIDEO_ACCELERATION_ANY: return "any";
case VIDEO_ACCELERATION_D3D11: return "d3d11";
case VIDEO_ACCELERATION_VAAPI: return "vaapi";
case VIDEO_ACCELERATION_MFX: return "mfx";
}
return "unknown";
}
static
std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict)
{
std::string va_name = getVideoAccelerationName(va_type);
std::string key_name = std::string("hw_decoders_") + va_name;
const char *hw_acceleration = NULL;
if (dict)
{
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
if (entry)
hw_acceleration = entry->value;
}
if (hw_acceleration)
return hw_acceleration;
// some default values (FFMPEG_DECODE_ACCELERATION_TYPES)
#ifdef _WIN32
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: return "";
case VIDEO_ACCELERATION_ANY: return "d3d11va";
case VIDEO_ACCELERATION_D3D11: return "d3d11va";
case VIDEO_ACCELERATION_VAAPI: return "";
case VIDEO_ACCELERATION_MFX: return ""; // "qsv" fails if non-Intel D3D11 device
}
return "";
#else
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: return "";
case VIDEO_ACCELERATION_ANY: return "vaapi.iHD";
case VIDEO_ACCELERATION_D3D11: return "";
case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD";
case VIDEO_ACCELERATION_MFX: return "qsv.iHD";
}
return "";
#endif
}
static
std::string getEncoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict)
{
std::string va_name = getVideoAccelerationName(va_type);
std::string key_name = std::string("hw_encoders_") + va_name;
const char *hw_acceleration = NULL;
if (dict)
{
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
if (entry)
hw_acceleration = entry->value;
}
if (hw_acceleration)
return hw_acceleration;
// some default values (FFMPEG_ENCODE_ACCELERATION_TYPES)
#ifdef _WIN32
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: return "";
case VIDEO_ACCELERATION_ANY: return "qsv";
case VIDEO_ACCELERATION_D3D11: return "";
case VIDEO_ACCELERATION_VAAPI: return "";
case VIDEO_ACCELERATION_MFX: return "qsv";
}
return "";
#else
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: return "";
case VIDEO_ACCELERATION_ANY: return "qsv.iHD,vaapi.iHD";
case VIDEO_ACCELERATION_D3D11: return "";
case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD";
case VIDEO_ACCELERATION_MFX: return "qsv.iHD";
}
return "unknown";
#endif
}
static
std::string getDecoderDisabledCodecs(AVDictionary *dict)
{
std::string key_name = std::string("hw_disable_decoders");
const char *disabled_codecs = NULL;
if (dict)
{
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
if (entry)
disabled_codecs = entry->value;
}
if (disabled_codecs)
return disabled_codecs;
// some default values (FFMPEG_DECODE_DISABLE_CODECS)
#ifdef _WIN32
return "none";
#else
return "av1.vaapi,av1_qsv,vp8.vaapi,vp8_qsv"; // "vp9_qsv"
#endif
}
static
std::string getEncoderDisabledCodecs(AVDictionary *dict)
{
std::string key_name = std::string("hw_disabled_encoders");
const char *disabled_codecs = NULL;
if (dict)
{
AVDictionaryEntry* entry = av_dict_get(dict, key_name.c_str(), NULL, 0);
if (entry)
disabled_codecs = entry->value;
}
if (disabled_codecs)
return disabled_codecs;
// some default values (FFMPEG_ENCODE_DISABLE_CODECS)
#ifdef _WIN32
return "mjpeg_qsv";
#else
return "mjpeg_vaapi,mjpeg_qsv,vp8_vaapi";
#endif
}
static
bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string& device_subname) {
if (!ctx)
return false;
AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data;
if (!hw_device_ctx->hwctx)
return false;
const char *hw_name = av_hwdevice_get_type_name(hw_type);
if (hw_type == AV_HWDEVICE_TYPE_QSV)
hw_name = "MFX";
bool ret = true;
std::string device_name;
#if defined(HAVE_D3D11)
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
ID3D11Device* device = ((AVD3D11VADeviceContext*)hw_device_ctx->hwctx)->device;
IDXGIDevice* dxgiDevice = nullptr;
if (device && SUCCEEDED(device->QueryInterface(__uuidof(IDXGIDevice), reinterpret_cast<void**>(&dxgiDevice)))) {
IDXGIAdapter* adapter = nullptr;
if (SUCCEEDED(dxgiDevice->GetAdapter(&adapter))) {
DXGI_ADAPTER_DESC desc;
if (SUCCEEDED(adapter->GetDesc(&desc))) {
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> conv;
device_name = conv.to_bytes(desc.Description);
}
adapter->Release();
}
dxgiDevice->Release();
}
}
#endif
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
#if defined(HAVE_VA) && (VA_MAJOR_VERSION >= 1)
VADisplay display = ((AVVAAPIDeviceContext *) hw_device_ctx->hwctx)->display;
if (display) {
VADriverContext *va_ctx = ((VADisplayContext *) display)->pDriverContext;
device_name = va_ctx->str_vendor;
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
// Workaround for issue fixed in MediaSDK 21.x https://github.com/Intel-Media-SDK/MediaSDK/issues/2595
// Checks VAAPI driver for support of VideoProc operation required by MediaSDK
ret = false;
int n_entrypoints = va_ctx->max_entrypoints;
std::vector<VAEntrypoint> entrypoints(n_entrypoints);
if (va_ctx->vtable->vaQueryConfigEntrypoints(va_ctx, VAProfileNone, entrypoints.data(), &n_entrypoints) == VA_STATUS_SUCCESS) {
for (int i = 0; i < n_entrypoints; i++) {
if (entrypoints[i] == VAEntrypointVideoProc) {
ret = true;
break;
}
}
}
if (!ret)
CV_LOG_INFO(NULL, "FFMPEG: Skipping MFX video acceleration as entrypoint VideoProc not found in: " << device_name);
}
}
#else
ret = (hw_type != AV_HWDEVICE_TYPE_QSV); // disable MFX if we can't check VAAPI for VideoProc entrypoint
#endif
}
if (ret && !device_subname.empty() && device_name.find(device_subname) == std::string::npos)
{
CV_LOG_INFO(NULL, "FFMPEG: Skipping '" << hw_name <<
"' video acceleration on the following device name as not matching substring '" << device_subname << "': " << device_name);
ret = false; // reject configuration
}
if (ret)
{
if (!device_name.empty()) {
CV_LOG_INFO(NULL, "FFMPEG: Using " << hw_name << " video acceleration on device: " << device_name);
} else {
CV_LOG_INFO(NULL, "FFMPEG: Using " << hw_name << " video acceleration");
}
}
return ret;
}
static
AVBufferRef* hw_create_derived_context(AVHWDeviceType hw_type, AVBufferRef* hw_device_ctx) {
AVBufferRef* derived_ctx = NULL;
const char* hw_name = av_hwdevice_get_type_name(hw_type);
int err = av_hwdevice_ctx_create_derived(&derived_ctx, hw_type, hw_device_ctx, 0);
if (!derived_ctx || err < 0)
{
if (derived_ctx)
av_buffer_unref(&derived_ctx);
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " << hw_name << ". Error=" << err);
return NULL;
}
else
{
// Store child context in 'user_opaque' field of parent context.
struct FreeChildContext {
static void free(struct AVHWDeviceContext* ctx) {
AVBufferRef* child_ctx = (AVBufferRef*)ctx->user_opaque;
if (child_ctx)
av_buffer_unref(&child_ctx);
}
};
AVHWDeviceContext* ctx = (AVHWDeviceContext*)derived_ctx->data;
ctx->user_opaque = av_buffer_ref(hw_device_ctx);
ctx->free = FreeChildContext::free;
CV_LOG_INFO(NULL, "FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " << hw_name);
return derived_ctx;
}
}
#ifdef HAVE_OPENCL // GPU buffer interop with cv::UMat
// FFmpeg context attached to OpenCL context
class OpenCL_FFMPEG_Context : public ocl::Context::UserContext {
public:
OpenCL_FFMPEG_Context(AVBufferRef* ctx) {
ctx_ = av_buffer_ref(ctx);
}
virtual ~OpenCL_FFMPEG_Context() {
av_buffer_unref(&ctx_);
}
AVBufferRef* GetAVHWDevice() {
return ctx_;
}
private:
AVBufferRef* ctx_;
};
#ifdef HAVE_MFX
static
int hw_find_qsv_surface_index(AVFrame* hw_frame)
{
if (AV_PIX_FMT_QSV != hw_frame->format)
return -1;
mfxFrameSurface1* surface = (mfxFrameSurface1*)hw_frame->data[3]; // As defined by AV_PIX_FMT_QSV
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
AVQSVFramesContext* qsv_ctx = (AVQSVFramesContext*)frames_ctx->hwctx;
for (int i = 0; i < qsv_ctx->nb_surfaces; i++) {
if (surface == qsv_ctx->surfaces + i) {
return i;
}
}
return -1;
}
#endif
#ifdef HAVE_VA
static
VADisplay hw_get_va_display(AVHWDeviceContext* hw_device_ctx)
{
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_QSV) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
}
if (hw_device_ctx && hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
return ((AVVAAPIDeviceContext*)hw_device_ctx->hwctx)->display;
}
return NULL;
}
#endif // HAVE_VA
#ifdef HAVE_VA_INTEL
static
VASurfaceID hw_get_va_surface(AVFrame* hw_frame) {
if (AV_PIX_FMT_VAAPI == hw_frame->format) {
return (VASurfaceID)(size_t)hw_frame->data[3]; // As defined by AV_PIX_FMT_VAAPI
}
#ifdef HAVE_MFX
else if (AV_PIX_FMT_QSV == hw_frame->format) {
int frame_idx = hw_find_qsv_surface_index(hw_frame);
if (frame_idx >= 0) { // frame index is same in parent (QSV) and child (VAAPI) frame context
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
if (child_ctx && AV_HWDEVICE_TYPE_VAAPI == child_ctx->device_ctx->type) {
AVVAAPIFramesContext *vaapi_ctx = (AVVAAPIFramesContext *) child_ctx->hwctx;
CV_Assert(frame_idx < vaapi_ctx->nb_surfaces);
return vaapi_ctx->surface_ids[frame_idx];
}
}
}
#endif // HAVE_MFX
return VA_INVALID_SURFACE;
}
#endif // HAVE_VA_INTEL
#ifdef HAVE_D3D11
static
AVD3D11VADeviceContext* hw_get_d3d11_device_ctx(AVHWDeviceContext* hw_device_ctx) {
if (AV_HWDEVICE_TYPE_QSV == hw_device_ctx->type) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
}
if (AV_HWDEVICE_TYPE_D3D11VA == hw_device_ctx->type) {
return (AVD3D11VADeviceContext*)hw_device_ctx->hwctx;
}
return NULL;
}
ID3D11Texture2D* hw_get_d3d11_texture(AVFrame* hw_frame, int* subresource) {
ID3D11Texture2D* texture = NULL;
if (AV_PIX_FMT_D3D11 == hw_frame->format) {
texture = (ID3D11Texture2D*)hw_frame->data[0]; // As defined by AV_PIX_FMT_D3D11
*subresource = (intptr_t)hw_frame->data[1]; // As defined by AV_PIX_FMT_D3D11
}
#ifdef HAVE_MFX
else if (AV_PIX_FMT_QSV == hw_frame->format) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
if (child_ctx && AV_HWDEVICE_TYPE_D3D11VA == child_ctx->device_ctx->type) {
texture = ((AVD3D11VAFramesContext*)child_ctx->hwctx)->texture;
}
*subresource = hw_find_qsv_surface_index(hw_frame);
CV_Assert(*subresource >= 0);
}
#endif
return texture;
}
// In D3D11 case we allocate additional texture as single texture (not texture array) because
// OpenCL interop with D3D11 doesn't support/work with NV12 sub-texture of texture array.
ID3D11Texture2D* hw_get_d3d11_single_texture(AVFrame* hw_frame, AVD3D11VADeviceContext* d3d11_device_ctx, ID3D11Texture2D* texture) {
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
if (AV_HWDEVICE_TYPE_QSV == frames_ctx->device_ctx->type) {
frames_ctx = (AVHWFramesContext*)frames_ctx->user_opaque; // we stored pointer to child context in 'user_opaque' field
}
if (!frames_ctx || AV_HWDEVICE_TYPE_D3D11VA != frames_ctx->device_ctx->type) {
return NULL;
}
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)frames_ctx->user_opaque;
if (!singleTexture && d3d11_device_ctx && texture) {
D3D11_TEXTURE2D_DESC desc = {};
texture->GetDesc(&desc);
desc.ArraySize = 1;
desc.BindFlags |= D3D11_BIND_SHADER_RESOURCE;
desc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED;
if (SUCCEEDED(d3d11_device_ctx->device->CreateTexture2D(&desc, NULL, &singleTexture))) {
frames_ctx->user_opaque = singleTexture;
}
}
return singleTexture;
}
#endif // HAVE_D3D11
static
AVHWDeviceType hw_check_opencl_context(AVHWDeviceContext* ctx) {
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
if (!ctx || ocl_context.empty())
return AV_HWDEVICE_TYPE_NONE;
#ifdef HAVE_VA_INTEL
VADisplay vadisplay_ocl = ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL);
VADisplay vadisplay_ctx = hw_get_va_display(ctx);
if (vadisplay_ocl && vadisplay_ocl == vadisplay_ctx)
return AV_HWDEVICE_TYPE_VAAPI;
#endif
#ifdef HAVE_D3D11
ID3D11Device* d3d11device_ocl = (ID3D11Device*)ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_D3D11_DEVICE_KHR);
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(ctx);
if (d3d11_device_ctx && d3d11device_ocl && d3d11_device_ctx->device == d3d11device_ocl)
return AV_HWDEVICE_TYPE_D3D11VA;
#endif
return AV_HWDEVICE_TYPE_NONE;
}
static
void hw_init_opencl(AVBufferRef* ctx) {
if (!ctx)
return;
AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data;
if (!hw_device_ctx)
return;
#ifdef HAVE_VA_INTEL
VADisplay va_display = hw_get_va_display(hw_device_ctx);
if (va_display) {
va_intel::ocl::initializeContextFromVA(va_display);
}
#endif
#ifdef HAVE_D3D11
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
if (d3d11_device_ctx) {
directx::ocl::initializeContextFromD3D11Device(d3d11_device_ctx->device);
}
#endif
if (hw_check_opencl_context(hw_device_ctx) != AV_HWDEVICE_TYPE_NONE) {
// Attach AVHWDeviceContext to OpenCL context
ocl::Context &ocl_context = ocl::OpenCLExecutionContext::getCurrent().getContext();
ocl_context.setUserContext(std::make_shared<OpenCL_FFMPEG_Context>(ctx));
}
}
static
AVBufferRef* hw_create_context_from_opencl(ocl::OpenCLExecutionContext& ocl_context, AVHWDeviceType hw_type) {
if (ocl_context.empty())
return NULL;
auto ocl_ffmpeg_context = ocl_context.getContext().getUserContext<OpenCL_FFMPEG_Context>();
if (!ocl_ffmpeg_context)
return NULL;
AVBufferRef* ctx = ocl_ffmpeg_context->GetAVHWDevice();
if (hw_type != ((AVHWDeviceContext*)ctx->data)->type) {
ctx = hw_create_derived_context(hw_type, ctx);
}
else {
ctx = av_buffer_ref(ctx);
}
if (ctx)
CV_LOG_INFO(NULL, "FFMPEG: Using " << av_hwdevice_get_type_name(hw_type) << " video acceleration context attached to OpenCL context");
return ctx;
}
#endif // HAVE_OPENCL
static
AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl) {
AVBufferRef* hw_device_ctx = NULL;
if (AV_HWDEVICE_TYPE_NONE == hw_type)
return NULL;
#ifdef HAVE_OPENCL
// Check if OpenCL context has AVHWDeviceContext attached to it
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
try {
hw_device_ctx = hw_create_context_from_opencl(ocl_context, hw_type);
if (hw_device_ctx) {
if (hw_device >= 0)
CV_LOG_ERROR(NULL, "VIDEOIO/FFMPEG: ignoring property HW_DEVICE as device context already created and attached to OpenCL context");
return hw_device_ctx;
}
}
catch (...) {
CV_LOG_INFO(NULL, "FFMPEG: Exception creating Video Acceleration context using current OpenCL context");
}
#endif
// Create new media context. In QSV case, first create 'child' context.
std::vector<AVHWDeviceType> child_types = { hw_type };
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
#ifdef _WIN32
child_types = { AV_HWDEVICE_TYPE_D3D11VA, AV_HWDEVICE_TYPE_DXVA2 };
#else
child_types = { AV_HWDEVICE_TYPE_VAAPI };
#endif
}
for (AVHWDeviceType child_type : child_types) {
char device[128] = "";
char* pdevice = NULL;
if (hw_device >= 0 && hw_device < 100000) {
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
snprintf(device, sizeof(device), "/dev/dri/renderD%d", 128 + hw_device);
}
else {
snprintf(device, sizeof(device), "%d", hw_device);
}
pdevice = device;
}
const char* hw_child_name = av_hwdevice_get_type_name(child_type);
const char* device_name = pdevice ? pdevice : "'default'";
int err = av_hwdevice_ctx_create(&hw_device_ctx, child_type, pdevice, NULL, 0);
if (hw_device_ctx && err >= 0)
{
if (!hw_check_device(hw_device_ctx, hw_type, device_subname)) {
av_buffer_unref(&hw_device_ctx);
continue;
}
CV_LOG_INFO(NULL, "FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " << hw_child_name << " on device " << device_name);
#ifdef HAVE_OPENCL
// if OpenCL context not created yet or property HW_ACCELERATION_USE_OPENCL set, create OpenCL context with binding to video acceleration context
if (ocl::haveOpenCL()) {
if (ocl_context.empty() || use_opencl) {
try {
hw_init_opencl(hw_device_ctx);
ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
if (!ocl_context.empty()) {
CV_LOG_INFO(NULL, "FFMPEG: Created OpenCL context with " << hw_child_name <<
" video acceleration on OpenCL device: " << ocl_context.getDevice().name());
}
} catch (...) {
CV_LOG_INFO(NULL, "FFMPEG: Exception creating OpenCL context with " << hw_child_name << " video acceleration");
}
}
else {
CV_LOG_INFO(NULL, "FFMPEG: Can't bind " << hw_child_name << " video acceleration context to already created OpenCL context");
}
}
#else
CV_UNUSED(use_opencl);
#endif
if (hw_type != child_type) {
AVBufferRef* derived_ctx = hw_create_derived_context(hw_type, hw_device_ctx);
av_buffer_unref(&hw_device_ctx);
return derived_ctx;
} else {
return hw_device_ctx;
}
}
else
{
const char* hw_name = hw_child_name;
CV_LOG_INFO(NULL, "FFMPEG: Failed to create " << hw_name << " video acceleration (av_hwdevice_ctx_create) on device " << device_name);
}
}
return NULL;
}
static
AVBufferRef* hw_create_frames(struct AVCodecContext* codec_ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format)
{
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hw_device_ctx->data;
AVBufferRef* child_ctx = hw_device_ctx;
// In QSV case we first allocate child D3D11/VAAPI frames (except DXVA2 as no OpenCL interop), then derive to parent QSV frames
if (AV_HWDEVICE_TYPE_QSV == device_ctx->type) {
AVBufferRef *ctx = (AVBufferRef *) device_ctx->user_opaque; // child context stored during creation of derived context
if (ctx && AV_HWDEVICE_TYPE_DXVA2 != ((AVHWDeviceContext *) ctx->data)->type) {
child_ctx = ctx;
}
}
AVBufferRef *hw_frames_ref = nullptr;
if (codec_ctx)
{
int res = avcodec_get_hw_frames_parameters(codec_ctx, child_ctx, hw_format, &hw_frames_ref);
if (res < 0)
{
CV_LOG_DEBUG(NULL, "FFMPEG: avcodec_get_hw_frames_parameters() call failed: " << res)
}
}
if (!hw_frames_ref)
{
hw_frames_ref = av_hwframe_ctx_alloc(child_ctx);
}
if (!hw_frames_ref)
{
CV_LOG_INFO(NULL, "FFMPEG: Failed to create HW frame context (av_hwframe_ctx_alloc)");
return NULL;
}
AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->width = width;
frames_ctx->height = height;
if (frames_ctx->format == AV_PIX_FMT_NONE) {
if (child_ctx == hw_device_ctx) {
frames_ctx->format = hw_format;
}
else {
AVHWFramesConstraints* constraints = av_hwdevice_get_hwframe_constraints(child_ctx, NULL);
if (constraints) {
frames_ctx->format = constraints->valid_hw_formats[0];
av_hwframe_constraints_free(&constraints);
}
}
}
if (frames_ctx->sw_format == AV_PIX_FMT_NONE)
frames_ctx->sw_format = HW_DEFAULT_SW_FORMAT;
if (frames_ctx->initial_pool_size == 0)
frames_ctx->initial_pool_size = HW_DEFAULT_POOL_SIZE;
#ifdef HAVE_D3D11
if (frames_ctx->device_ctx && AV_HWDEVICE_TYPE_D3D11VA == frames_ctx->device_ctx->type) {
// BindFlags
AVD3D11VAFramesContext* frames_hwctx = (AVD3D11VAFramesContext*)frames_ctx->hwctx;
frames_hwctx->BindFlags |= D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER;
// See function hw_get_d3d11_single_texture(), it allocates additional ID3D11Texture2D texture and
// attaches it as 'user_opaque' field. We have to set free() callback before av_hwframe_ctx_init() call.
struct D3D11SingleTexture {
static void free(struct AVHWFramesContext* ctx) {
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)ctx->user_opaque;
if (ctx->user_opaque)
singleTexture->Release();
}
};
frames_ctx->free = D3D11SingleTexture::free;
}
#endif
int res = av_hwframe_ctx_init(hw_frames_ref);
if (res < 0)
{
CV_LOG_INFO(NULL, "FFMPEG: Failed to initialize HW frame context (av_hwframe_ctx_init): " << res);
av_buffer_unref(&hw_frames_ref);
return NULL;
}
if (child_ctx != hw_device_ctx) {
AVBufferRef* derived_frame_ctx = NULL;
int flags = AV_HWFRAME_MAP_READ | AV_HWFRAME_MAP_WRITE;
res = av_hwframe_ctx_create_derived(&derived_frame_ctx, hw_format, hw_device_ctx, hw_frames_ref, flags);
av_buffer_unref(&hw_frames_ref);
if (res < 0)
{
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived HW frame context (av_hwframe_ctx_create_derived): " << res);
return NULL;
}
else {
((AVHWFramesContext*)derived_frame_ctx->data)->user_opaque = frames_ctx;
return derived_frame_ctx;
}
}
else {
return hw_frames_ref;
}
}
static
bool hw_check_codec(AVCodec* codec, AVHWDeviceType hw_type, const char *disabled_codecs)
{
CV_Assert(disabled_codecs);
std::string hw_name = std::string(".") + av_hwdevice_get_type_name(hw_type);
std::stringstream s_stream(disabled_codecs);
while (s_stream.good()) {
std::string name;
getline(s_stream, name, ',');
if (name == codec->name || name == hw_name || name == codec->name + hw_name || name == "hw") {
CV_LOG_INFO(NULL, "FFMPEG: skipping codec " << codec->name << hw_name);
return false;
}
}
return true;
}
static
AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *), const char *disabled_codecs, AVPixelFormat *hw_pix_fmt) {
AVCodec *c = 0;
void *opaque = 0;
while (NULL != (c = (AVCodec*)av_codec_iterate(&opaque)))
{
if (!check_category(c))
continue;
if (c->id != id)
continue;
if (c->capabilities & AV_CODEC_CAP_EXPERIMENTAL)
continue;
if (hw_type != AV_HWDEVICE_TYPE_NONE) {
AVPixelFormat hw_native_fmt = AV_PIX_FMT_NONE;
#if LIBAVUTIL_BUILD < AV_VERSION_INT(56, 51, 100) // VAAPI encoders support avcodec_get_hw_config() starting ffmpeg 4.3
if (hw_type == AV_HWDEVICE_TYPE_VAAPI)
hw_native_fmt = AV_PIX_FMT_VAAPI_VLD;
#endif
if (hw_type == AV_HWDEVICE_TYPE_CUDA) // CUDA encoders don't support avcodec_get_hw_config()
hw_native_fmt = AV_PIX_FMT_CUDA;
if (av_codec_is_encoder(c) && hw_native_fmt != AV_PIX_FMT_NONE && c->pix_fmts) {
for (int i = 0; c->pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
if (c->pix_fmts[i] == hw_native_fmt) {
*hw_pix_fmt = hw_native_fmt;
if (hw_check_codec(c, hw_type, disabled_codecs))
return c;
}
}
}
for (int i = 0;; i++) {
const AVCodecHWConfig *hw_config = avcodec_get_hw_config(c, i);
if (!hw_config)
break;
if (hw_config->device_type == hw_type) {
*hw_pix_fmt = hw_config->pix_fmt;
if (hw_check_codec(c, hw_type, disabled_codecs))
return c;
}
}
} else {
return c;
}
}
return NULL;
}
// Callback to select hardware pixel format (not software format) and allocate frame pool (hw_frames_ctx)
static
AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt) {
if (!ctx->hw_device_ctx)
return fmt[0];
AVHWDeviceType hw_type = ((AVHWDeviceContext*)ctx->hw_device_ctx->data)->type;
for (int j = 0;; j++) {
const AVCodecHWConfig *hw_config = avcodec_get_hw_config(ctx->codec, j);
if (!hw_config)
break;
if (hw_config->device_type == hw_type) {
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
if (fmt[i] == hw_config->pix_fmt) {
if (hw_config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) {
ctx->sw_pix_fmt = HW_DEFAULT_SW_FORMAT;
ctx->hw_frames_ctx = hw_create_frames(ctx, ctx->hw_device_ctx, ctx->width, ctx->height, fmt[i]);
if (ctx->hw_frames_ctx) {
//ctx->sw_pix_fmt = ((AVHWFramesContext *)(ctx->hw_frames_ctx->data))->sw_format;
return fmt[i];
}
}
}
}
}
}
CV_LOG_DEBUG(NULL, "FFMPEG: Can't select HW format in 'get_format()' callback, use default");
return fmt[0];
}
// GPU color conversion NV12->BGRA via OpenCL extensions
static bool
hw_copy_frame_to_umat(AVBufferRef* ctx, AVFrame* hw_frame, cv::OutputArray output) {
CV_UNUSED(hw_frame);
CV_UNUSED(output);
if (!ctx)
return false;
#ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
if (child_type == AV_HWDEVICE_TYPE_NONE)
return false;
#ifdef HAVE_VA_INTEL
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
VADisplay va_display = hw_get_va_display(hw_device_ctx);
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
if (va_display && va_surface != VA_INVALID_SURFACE) {
va_intel::convertFromVASurface(va_display, va_surface, {hw_frame->width, hw_frame->height}, output);
return true;
}
}
#endif
#ifdef HAVE_D3D11
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
int subresource = 0;
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
if (texture && singleTexture) {
// Copy D3D11 sub-texture to D3D11 single texture
d3d11_device_ctx->device_context->CopySubresourceRegion(singleTexture, 0, 0, 0, 0, texture, subresource, NULL);
// Copy D3D11 single texture to cv::UMat
directx::convertFromD3D11Texture2D(singleTexture, output);
return true;
}
}
#endif
}
catch (...)
{
return false;
}
#endif // HAVE_OPENCL
return false;
}
// GPU color conversion BGRA->NV12 via OpenCL extensions
static bool
hw_copy_umat_to_frame(AVBufferRef* ctx, cv::InputArray input, AVFrame* hw_frame) {
CV_UNUSED(input);
CV_UNUSED(hw_frame);
if (!ctx)
return false;
#ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
if (child_type == AV_HWDEVICE_TYPE_NONE)
return false;
#ifdef HAVE_VA_INTEL
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
VADisplay va_display = hw_get_va_display(hw_device_ctx);
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
if (va_display != NULL && va_surface != VA_INVALID_SURFACE) {
va_intel::convertToVASurface(va_display, input, va_surface, {hw_frame->width, hw_frame->height});
return true;
}
}
#endif
#ifdef HAVE_D3D11
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
int subresource = 0;
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
if (texture && singleTexture) {
// Copy cv::UMat to D3D11 single texture
directx::convertToD3D11Texture2D(input, singleTexture);
// Copy D3D11 single texture to D3D11 sub-texture
d3d11_device_ctx->device_context->CopySubresourceRegion(texture, subresource, 0, 0, 0, singleTexture, 0, NULL);
return true;
}
}
#endif
}
catch (...)
{
return false;
}
#endif // HAVE_OPENCL
return false;
}
static
VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type) {
struct HWTypeFFMPEG {
AVHWDeviceType hw_type;
VideoAccelerationType va_type;
} known_hw_types[] = {
{ AV_HWDEVICE_TYPE_D3D11VA, VIDEO_ACCELERATION_D3D11 },
{ AV_HWDEVICE_TYPE_VAAPI, VIDEO_ACCELERATION_VAAPI },
{ AV_HWDEVICE_TYPE_QSV, VIDEO_ACCELERATION_MFX },
{ AV_HWDEVICE_TYPE_CUDA, (VideoAccelerationType)(1 << 11) },
};
for (const HWTypeFFMPEG& hw : known_hw_types) {
if (hw_type == hw.hw_type)
return hw.va_type;
}
return VIDEO_ACCELERATION_NONE;
}
class HWAccelIterator {
public:
HWAccelIterator(VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict)
: hw_type_(AV_HWDEVICE_TYPE_NONE)
{
std::string accel_list;
if (va_type != VIDEO_ACCELERATION_NONE)
{
updateAccelList_(accel_list, va_type, isEncoder, dict);
}
if (va_type == VIDEO_ACCELERATION_ANY)
{
if (!accel_list.empty())
accel_list += ","; // add no-acceleration case to the end of the list
}
CV_LOG_DEBUG(NULL, "FFMPEG: allowed acceleration types (" << getVideoAccelerationName(va_type) << "): '" << accel_list << "'");
if (accel_list.empty() && va_type != VIDEO_ACCELERATION_NONE && va_type != VIDEO_ACCELERATION_ANY)
{
// broke stream
std::string tmp;
s_stream_ >> tmp;
}
else
{
s_stream_ = std::istringstream(accel_list);
}
if (va_type != VIDEO_ACCELERATION_NONE)
{
disabled_codecs_ = isEncoder
? getEncoderDisabledCodecs(dict)
: getDecoderDisabledCodecs(dict);
CV_LOG_DEBUG(NULL, "FFMPEG: disabled codecs: '" << disabled_codecs_ << "'");
}
}
bool good() const
{
return s_stream_.good();
}
void parse_next()
{
getline(s_stream_, hw_type_device_string_, ',');
size_t index = hw_type_device_string_.find('.');
if (index != std::string::npos) {
device_subname_ = hw_type_device_string_.substr(index + 1);
hw_type_string_ = hw_type_device_string_.substr(0, index);
} else {
device_subname_.clear();
hw_type_string_ = hw_type_device_string_;
}
hw_type_ = av_hwdevice_find_type_by_name(hw_type_string_.c_str());
}
const std::string& hw_type_device_string() const { return hw_type_device_string_; }
const std::string& hw_type_string() const { return hw_type_string_; }
AVHWDeviceType hw_type() const { return hw_type_; }
const std::string& device_subname() const { return device_subname_; }
const std::string& disabled_codecs() const { return disabled_codecs_; }
private:
bool updateAccelList_(std::string& accel_list, VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict)
{
std::string new_accels = isEncoder
? getEncoderConfiguration(va_type, dict)
: getDecoderConfiguration(va_type, dict);
if (new_accels.empty())
return false;
if (accel_list.empty())
accel_list = new_accels;
else
accel_list = accel_list + "," + new_accels;
return true;
}
std::istringstream s_stream_;
std::string hw_type_device_string_;
std::string hw_type_string_;
AVHWDeviceType hw_type_;
std::string device_subname_;
std::string disabled_codecs_;
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,46 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_FFMPEG_LEGACY_API_H__
#define __OPENCV_FFMPEG_LEGACY_API_H__
#ifdef __cplusplus
extern "C"
{
#endif
#ifndef OPENCV_FFMPEG_API
#if defined(__OPENCV_BUILD)
# define OPENCV_FFMPEG_API
#elif defined _WIN32
# define OPENCV_FFMPEG_API __declspec(dllexport)
#elif defined __GNUC__ && __GNUC__ >= 4
# define OPENCV_FFMPEG_API __attribute__ ((visibility ("default")))
#else
# define OPENCV_FFMPEG_API
#endif
#endif
typedef struct CvCapture_FFMPEG CvCapture_FFMPEG;
typedef struct CvVideoWriter_FFMPEG CvVideoWriter_FFMPEG;
//OPENCV_FFMPEG_API struct CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG(const char* filename);
OPENCV_FFMPEG_API int cvSetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap,
int prop, double value);
OPENCV_FFMPEG_API double cvGetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap, int prop);
OPENCV_FFMPEG_API int cvGrabFrame_FFMPEG(struct CvCapture_FFMPEG* cap);
OPENCV_FFMPEG_API int cvRetrieveFrame_FFMPEG(struct CvCapture_FFMPEG* capture, unsigned char** data,
int* step, int* width, int* height, int* cn);
OPENCV_FFMPEG_API void cvReleaseCapture_FFMPEG(struct CvCapture_FFMPEG** cap);
OPENCV_FFMPEG_API struct CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG(const char* filename,
int fourcc, double fps, int width, int height, int isColor );
OPENCV_FFMPEG_API int cvWriteFrame_FFMPEG(struct CvVideoWriter_FFMPEG* writer, const unsigned char* data,
int step, int width, int height, int cn, int origin);
OPENCV_FFMPEG_API void cvReleaseVideoWriter_FFMPEG(struct CvVideoWriter_FFMPEG** writer);
#ifdef __cplusplus
}
#endif
#endif // __OPENCV_FFMPEG_LEGACY_API_H__

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,452 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Nils Hasler, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Author: Nils Hasler <hasler@mpi-inf.mpg.de>
//
// Max-Planck-Institut Informatik
//
// capture video from a sequence of images
// the filename when opening can either be a printf pattern such as
// video%04d.png or the first frame of the sequence i.e. video0001.png
//
#include "precomp.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/core/utils/filesystem.hpp"
#if 0
#define CV_WARN(message)
#else
#define CV_WARN(message) CV_LOG_INFO(NULL, "CAP_IMAGES warning: %s (%s:%d)" << message)
#endif
namespace cv {
class CvCapture_Images: public IVideoCapture
{
public:
void init()
{
filename_pattern.clear();
frame.release();
currentframe = firstframe = 0;
length = 0;
grabbedInOpen = false;
}
CvCapture_Images()
{
init();
}
CvCapture_Images(const String& _filename)
{
init();
open(_filename);
}
virtual ~CvCapture_Images() CV_OVERRIDE
{
close();
}
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE;
virtual int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_IMAGES; }
bool open(const String&);
void close();
protected:
std::string filename_pattern; // actually a printf-pattern
unsigned currentframe;
unsigned firstframe; // number of first frame
unsigned length; // length of sequence
Mat frame;
bool grabbedInOpen;
};
void CvCapture_Images::close()
{
init();
}
bool CvCapture_Images::grabFrame()
{
cv::String filename = cv::format(filename_pattern.c_str(), (int)(firstframe + currentframe));
CV_Assert(!filename.empty());
if (grabbedInOpen)
{
grabbedInOpen = false;
++currentframe;
return !frame.empty();
}
frame = imread(filename, IMREAD_UNCHANGED);
if( !frame.empty() )
currentframe++;
return !frame.empty();
}
bool CvCapture_Images::retrieveFrame(int, OutputArray out)
{
frame.copyTo(out);
return grabbedInOpen ? false : !frame.empty();
}
double CvCapture_Images::getProperty(int id) const
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
CV_WARN("collections of images don't have framerates");
return 0;
case CV_CAP_PROP_POS_FRAMES:
return currentframe;
case CV_CAP_PROP_FRAME_COUNT:
return length;
case CV_CAP_PROP_POS_AVI_RATIO:
return (double)currentframe / (double)(length - 1);
case CV_CAP_PROP_FRAME_WIDTH:
return frame.cols;
case CV_CAP_PROP_FRAME_HEIGHT:
return frame.rows;
case CV_CAP_PROP_FPS:
CV_WARN("collections of images don't have framerates");
return 1;
case CV_CAP_PROP_FOURCC:
CV_WARN("collections of images don't have 4-character codes");
return 0;
}
return 0;
}
bool CvCapture_Images::setProperty(int id, double value)
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping");
value = 0;
}
if(value >= length) {
CV_WARN("seeking beyond end of sequence - clamping");
value = length - 1;
}
currentframe = cvRound(value);
if (currentframe != 0)
grabbedInOpen = false; // grabbed frame is not valid anymore
return true;
case CV_CAP_PROP_POS_AVI_RATIO:
if(value > 1) {
CV_WARN("seeking beyond end of sequence - clamping");
value = 1;
} else if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping");
value = 0;
}
currentframe = cvRound((length - 1) * value);
if (currentframe != 0)
grabbedInOpen = false; // grabbed frame is not valid anymore
return true;
}
CV_WARN("unknown/unhandled property");
return false;
}
static
std::string icvExtractPattern(const std::string& filename, unsigned *offset)
{
size_t len = filename.size();
CV_Assert(!filename.empty());
CV_Assert(offset);
*offset = 0;
// check whether this is a valid image sequence filename
std::string::size_type pos = filename.find('%');
if (pos != std::string::npos)
{
pos++; CV_Assert(pos < len);
if (filename[pos] == '0') // optional zero prefix
{
pos++; CV_Assert(pos < len);
}
if (filename[pos] >= '1' && filename[pos] <= '9') // optional numeric size (1..9) (one symbol only)
{
pos++; CV_Assert(pos < len);
}
if (filename[pos] == 'd' || filename[pos] == 'u')
{
pos++;
if (pos == len)
return filename; // end of string '...%5d'
CV_Assert(pos < len);
if (filename.find('%', pos) == std::string::npos)
return filename; // no more patterns
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: invalid multiple patterns: %s", filename.c_str()));
}
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: error, expected '0?[1-9][du]' pattern, got: %s", filename.c_str()));
}
else // no pattern filename was given - extract the pattern
{
pos = filename.rfind('/');
#ifdef _WIN32
if (pos == std::string::npos)
pos = filename.rfind('\\');
#endif
if (pos != std::string::npos)
pos++;
else
pos = 0;
while (pos < len && !isdigit(filename[pos])) pos++;
if (pos == len)
{
CV_Error_(Error::StsBadArg, ("CAP_IMAGES: can't find starting number (in the name of file): %s", filename.c_str()));
}
std::string::size_type pos0 = pos;
const int64_t max_number = 1000000000;
CV_Assert(max_number < INT_MAX); // offset is 'int'
int number_str_size = 0;
uint64_t number = 0;
while (pos < len && isdigit(filename[pos]))
{
char ch = filename[pos];
number = (number * 10) + (uint64_t)((int)ch - (int)'0');
CV_Assert(number < max_number);
number_str_size++;
CV_Assert(number_str_size <= 64); // don't allow huge zero prefixes
pos++;
}
CV_Assert(number_str_size > 0);
*offset = (int)number;
std::string result;
if (pos0 > 0)
result += filename.substr(0, pos0);
result += cv::format("%%0%dd", number_str_size);
if (pos < len)
result += filename.substr(pos);
CV_LOG_INFO(NULL, "Pattern: " << result << " @ " << number);
return result;
}
}
bool CvCapture_Images::open(const std::string& _filename)
{
unsigned offset = 0;
close();
CV_Assert(!_filename.empty());
filename_pattern = icvExtractPattern(_filename, &offset);
CV_Assert(!filename_pattern.empty());
// determine the length of the sequence
for (length = 0; ;)
{
cv::String filename = cv::format(filename_pattern.c_str(), (int)(offset + length));
if (!utils::fs::exists(filename))
{
if (length == 0 && offset == 0) // allow starting with 0 or 1
{
offset++;
continue;
}
break;
}
if(!haveImageReader(filename))
{
CV_LOG_INFO(NULL, "CAP_IMAGES: Stop scanning. Can't read image file: " << filename);
break;
}
length++;
}
if (length == 0)
{
close();
return false;
}
firstframe = offset;
// grab frame to enable properties retrieval
bool grabRes = grabFrame();
grabbedInOpen = true;
currentframe = 0;
return grabRes;
}
bool CvCapture_Images::isOpened() const
{
return !filename_pattern.empty();
}
Ptr<IVideoCapture> create_Images_capture(const std::string &filename)
{
return makePtr<CvCapture_Images>(filename);
}
//
//
// image sequence writer
//
//
class CvVideoWriter_Images CV_FINAL : public CvVideoWriter
{
public:
CvVideoWriter_Images()
{
filename_pattern.clear();
currentframe = 0;
}
virtual ~CvVideoWriter_Images() { close(); }
virtual bool open( const char* _filename );
virtual void close();
virtual bool setProperty( int, double ); // FIXIT doesn't work: IVideoWriter interface only!
virtual bool writeFrame( const IplImage* ) CV_OVERRIDE;
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_IMAGES; }
protected:
std::string filename_pattern;
unsigned currentframe;
std::vector<int> params;
};
bool CvVideoWriter_Images::writeFrame( const IplImage* image )
{
CV_Assert(!filename_pattern.empty());
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
CV_Assert(!filename.empty());
std::vector<int> image_params = params;
image_params.push_back(0); // append parameters 'stop' mark
image_params.push_back(0);
cv::Mat img = cv::cvarrToMat(image);
bool ret = cv::imwrite(filename, img, image_params);
currentframe++;
return ret;
}
void CvVideoWriter_Images::close()
{
filename_pattern.clear();
currentframe = 0;
params.clear();
}
bool CvVideoWriter_Images::open( const char* _filename )
{
unsigned offset = 0;
close();
CV_Assert(_filename);
filename_pattern = icvExtractPattern(_filename, &offset);
CV_Assert(!filename_pattern.empty());
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
if (!cv::haveImageWriter(filename))
{
close();
return false;
}
currentframe = offset;
params.clear();
return true;
}
bool CvVideoWriter_Images::setProperty( int id, double value )
{
if (id >= cv::CAP_PROP_IMAGES_BASE && id < cv::CAP_PROP_IMAGES_LAST)
{
params.push_back( id - cv::CAP_PROP_IMAGES_BASE );
params.push_back( static_cast<int>( value ) );
return true;
}
return false; // not supported
}
Ptr<IVideoWriter> create_Images_writer(const std::string &filename, int, double, const Size &,
const cv::VideoWriterParameters&)
{
CvVideoWriter_Images *writer = new CvVideoWriter_Images;
try
{
if( writer->open( filename.c_str() ))
return makePtr<LegacyWriter>(writer);
delete writer;
}
catch (...)
{
delete writer;
throw;
}
return 0;
}
} // cv::

View File

@ -0,0 +1,436 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef CAP_INTERFACE_HPP
#define CAP_INTERFACE_HPP
#include "opencv2/core.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/videoio.hpp"
#include "opencv2/videoio/videoio_c.h"
//===================================================
// Legacy structs
struct CvCapture
{
virtual ~CvCapture() {}
virtual double getProperty(int) const { return 0; }
virtual bool setProperty(int, double) { return 0; }
virtual bool grabFrame() { return true; }
virtual IplImage* retrieveFrame(int) { return 0; }
virtual int getCaptureDomain() { return cv::CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
};
struct CvVideoWriter
{
virtual ~CvVideoWriter() {}
virtual bool writeFrame(const IplImage*) { return false; }
virtual int getCaptureDomain() const { return cv::CAP_ANY; } // Return the type of the capture object: CAP_FFMPEG, etc...
virtual double getProperty(int) const { return 0; }
};
//===================================================
// Modern classes
namespace cv
{
namespace
{
template <class T>
inline T castParameterTo(int paramValue)
{
return static_cast<T>(paramValue);
}
template <>
inline bool castParameterTo(int paramValue)
{
return paramValue != 0;
}
}
class VideoParameters
{
public:
struct VideoParameter {
VideoParameter() = default;
VideoParameter(int key_, int value_) : key(key_), value(value_) {}
int key{-1};
int value{-1};
mutable bool isConsumed{false};
};
VideoParameters() = default;
explicit VideoParameters(const std::vector<int>& params)
{
const auto count = params.size();
if (count % 2 != 0)
{
CV_Error_(Error::StsVecLengthErr,
("Vector of VideoWriter parameters should have even length"));
}
params_.reserve(count / 2);
for (std::size_t i = 0; i < count; i += 2)
{
add(params[i], params[i + 1]);
}
}
VideoParameters(int* params, unsigned n_params)
{
params_.reserve(n_params);
for (unsigned i = 0; i < n_params; ++i)
{
add(params[2*i], params[2*i + 1]);
}
}
void add(int key, int value)
{
params_.emplace_back(key, value);
}
bool has(int key) const
{
auto it = std::find_if(params_.begin(), params_.end(),
[key](const VideoParameter &param)
{
return param.key == key;
}
);
return it != params_.end();
}
template <class ValueType>
ValueType get(int key) const
{
auto it = std::find_if(params_.begin(), params_.end(),
[key](const VideoParameter &param)
{
return param.key == key;
}
);
if (it != params_.end())
{
it->isConsumed = true;
return castParameterTo<ValueType>(it->value);
}
else
{
CV_Error_(Error::StsBadArg, ("Missing value for parameter: [%d]", key));
}
}
template <class ValueType>
ValueType get(int key, ValueType defaultValue) const
{
auto it = std::find_if(params_.begin(), params_.end(),
[key](const VideoParameter &param)
{
return param.key == key;
}
);
if (it != params_.end())
{
it->isConsumed = true;
return castParameterTo<ValueType>(it->value);
}
else
{
return defaultValue;
}
}
std::vector<int> getUnused() const
{
std::vector<int> unusedParams;
for (const auto &param : params_)
{
if (!param.isConsumed)
{
unusedParams.push_back(param.key);
}
}
return unusedParams;
}
std::vector<int> getIntVector() const
{
std::vector<int> vint_params;
for (const auto& param : params_)
{
vint_params.push_back(param.key);
vint_params.push_back(param.value);
}
return vint_params;
}
bool empty() const
{
return params_.empty();
}
bool warnUnusedParameters() const
{
bool found = false;
for (const auto &param : params_)
{
if (!param.isConsumed)
{
found = true;
CV_LOG_INFO(NULL, "VIDEOIO: unused parameter: [" << param.key << "]=" <<
cv::format("%lld / 0x%016llx", (long long)param.value, (long long)param.value));
}
}
return found;
}
private:
std::vector<VideoParameter> params_;
};
class VideoWriterParameters : public VideoParameters
{
public:
using VideoParameters::VideoParameters; // reuse constructors
};
class VideoCaptureParameters : public VideoParameters
{
public:
using VideoParameters::VideoParameters; // reuse constructors
};
class IVideoCapture
{
public:
virtual ~IVideoCapture() {}
virtual double getProperty(int) const { return 0; }
virtual bool setProperty(int, double) { return false; }
virtual bool grabFrame() = 0;
virtual bool retrieveFrame(int, OutputArray) = 0;
virtual bool isOpened() const = 0;
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
};
class IVideoWriter
{
public:
virtual ~IVideoWriter() {}
virtual double getProperty(int) const { return 0; }
virtual bool setProperty(int, double) { return false; }
virtual bool isOpened() const = 0;
virtual void write(InputArray) = 0;
virtual int getCaptureDomain() const { return cv::CAP_ANY; } // Return the type of the capture object: CAP_FFMPEG, etc...
};
namespace internal {
class VideoCapturePrivateAccessor
{
public:
static
IVideoCapture* getIVideoCapture(const VideoCapture& cap) { return cap.icap.get(); }
};
} // namespace
//===================================================
// Wrapper
class LegacyCapture : public IVideoCapture
{
private:
CvCapture * cap;
LegacyCapture(const LegacyCapture &);
LegacyCapture& operator=(const LegacyCapture &);
public:
LegacyCapture(CvCapture * cap_) : cap(cap_) {}
~LegacyCapture()
{
cvReleaseCapture(&cap);
}
double getProperty(int propId) const CV_OVERRIDE
{
return cap ? cap->getProperty(propId) : 0;
}
bool setProperty(int propId, double value) CV_OVERRIDE
{
return cvSetCaptureProperty(cap, propId, value) != 0;
}
bool grabFrame() CV_OVERRIDE
{
return cap ? cvGrabFrame(cap) != 0 : false;
}
bool retrieveFrame(int channel, OutputArray image) CV_OVERRIDE
{
IplImage* _img = cvRetrieveFrame(cap, channel);
if( !_img )
{
image.release();
return false;
}
if(_img->origin == IPL_ORIGIN_TL)
{
cv::cvarrToMat(_img).copyTo(image);
}
else
{
Mat temp = cv::cvarrToMat(_img);
flip(temp, image, 0);
}
return true;
}
bool isOpened() const CV_OVERRIDE
{
return cap != 0; // legacy interface doesn't support closed files
}
int getCaptureDomain() CV_OVERRIDE
{
return cap ? cap->getCaptureDomain() : 0;
}
CvCapture* getCvCapture() const { return cap; }
};
class LegacyWriter : public IVideoWriter
{
private:
CvVideoWriter * writer;
LegacyWriter(const LegacyWriter &);
LegacyWriter& operator=(const LegacyWriter &);
public:
LegacyWriter(CvVideoWriter * wri_) : writer(wri_)
{}
~LegacyWriter()
{
cvReleaseVideoWriter(&writer);
}
double getProperty(int propId) const CV_OVERRIDE
{
if (writer)
{
return writer->getProperty(propId);
}
return 0.;
}
bool setProperty(int, double) CV_OVERRIDE
{
return false;
}
bool isOpened() const CV_OVERRIDE
{
return writer != NULL;
}
void write(InputArray image) CV_OVERRIDE
{
IplImage _img = cvIplImage(image.getMat());
cvWriteFrame(writer, &_img);
}
int getCaptureDomain() const CV_OVERRIDE
{
return writer ? writer->getCaptureDomain() : 0;
}
};
//==================================================================================================
Ptr<IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &filename, const VideoCaptureParameters& params);
Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> createGStreamerCapture_file(const std::string& filename, const cv::VideoCaptureParameters& params);
Ptr<IVideoCapture> createGStreamerCapture_cam(int index, const cv::VideoCaptureParameters& params);
Ptr<IVideoWriter> create_GStreamer_writer(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_MFX_capture(const std::string &filename);
Ptr<IVideoWriter> create_MFX_writer(const std::string& filename, int _fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_AVFoundation_capture_file(const std::string &filename);
Ptr<IVideoCapture> create_AVFoundation_capture_cam(int index);
Ptr<IVideoWriter> create_AVFoundation_writer(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_WRT_capture(int device);
Ptr<IVideoCapture> cvCreateCapture_MSMF(int index, const VideoCaptureParameters& params);
Ptr<IVideoCapture> cvCreateCapture_MSMF(const std::string& filename, const VideoCaptureParameters& params);
Ptr<IVideoWriter> cvCreateVideoWriter_MSMF(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_DShow_capture(int index);
Ptr<IVideoCapture> create_V4L_capture_cam(int index);
Ptr<IVideoCapture> create_V4L_capture_file(const std::string &filename);
Ptr<IVideoCapture> create_OpenNI2_capture_cam( int index );
Ptr<IVideoCapture> create_OpenNI2_capture_file( const std::string &filename );
Ptr<IVideoCapture> create_Images_capture(const std::string &filename);
Ptr<IVideoWriter> create_Images_writer(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_DC1394_capture(int index);
Ptr<IVideoCapture> create_RealSense_capture(int index);
Ptr<IVideoCapture> create_PvAPI_capture( int index );
Ptr<IVideoCapture> create_XIMEA_capture_cam( int index );
Ptr<IVideoCapture> create_XIMEA_capture_file( const std::string &serialNumber );
Ptr<IVideoCapture> create_ueye_camera(int camera);
Ptr<IVideoCapture> create_Aravis_capture( int index );
Ptr<IVideoCapture> createMotionJpegCapture(const std::string& filename);
Ptr<IVideoWriter> createMotionJpegWriter(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> createGPhoto2Capture(int index);
Ptr<IVideoCapture> createGPhoto2Capture(const std::string& deviceName);
Ptr<IVideoCapture> createXINECapture(const std::string &filename);
Ptr<IVideoCapture> createAndroidCapture_cam( int index );
Ptr<IVideoCapture> createAndroidCapture_file(const std::string &filename);
bool VideoCapture_V4L_waitAny(
const std::vector<VideoCapture>& streams,
CV_OUT std::vector<int>& ready,
int64 timeoutNs);
static inline
std::ostream& operator<<(std::ostream& out, const VideoAccelerationType& va_type)
{
switch (va_type)
{
case VIDEO_ACCELERATION_NONE: out << "NONE"; return out;
case VIDEO_ACCELERATION_ANY: out << "ANY"; return out;
case VIDEO_ACCELERATION_D3D11: out << "D3D11"; return out;
case VIDEO_ACCELERATION_VAAPI: out << "VAAPI"; return out;
case VIDEO_ACCELERATION_MFX: out << "MFX"; return out;
}
out << cv::format("UNKNOWN(0x%ux)", static_cast<unsigned int>(va_type));
return out;
}
} // cv::
#endif // CAP_INTERFACE_HPP

View File

@ -0,0 +1,516 @@
/*
* cap_ios_abstract_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/videoio/cap_ios.h"
#include "precomp.hpp"
#pragma mark - Private Interface
@interface CvAbstractCamera ()
@property (nonatomic, strong) AVCaptureVideoPreviewLayer* captureVideoPreviewLayer;
- (void)deviceOrientationDidChange:(NSNotification*)notification;
- (void)startCaptureSession;
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
- (void)updateSize;
@end
#pragma mark - Implementation
@implementation CvAbstractCamera
#pragma mark Public
@synthesize imageWidth;
@synthesize imageHeight;
@synthesize defaultFPS;
@synthesize defaultAVCaptureDevicePosition;
@synthesize defaultAVCaptureVideoOrientation;
@synthesize defaultAVCaptureSessionPreset;
@synthesize captureSession;
@synthesize captureVideoPreviewLayer;
@synthesize videoCaptureConnection;
@synthesize running;
@synthesize captureSessionLoaded;
@synthesize useAVCaptureVideoPreviewLayer;
@synthesize parentView;
#pragma mark - Constructors
- (id)init;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
self.parentView = nil;
self.useAVCaptureVideoPreviewLayer = NO;
}
return self;
}
- (id)initWithParentView:(UIView*)parent;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset640x480;
self.parentView = parent;
self.useAVCaptureVideoPreviewLayer = YES;
}
return self;
}
- (void)dealloc;
{
[[NSNotificationCenter defaultCenter] removeObserver:self];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
[super dealloc];
}
#pragma mark - Public interface
- (void)start;
{
if (![NSThread isMainThread]) {
NSLog(@"[Camera] Warning: Call start only from main thread");
[self performSelectorOnMainThread:@selector(start) withObject:nil waitUntilDone:NO];
return;
}
if (running == YES) {
return;
}
running = YES;
// TODO: update image size data before actually starting (needed for recording)
[self updateSize];
if (cameraAvailable) {
[self startCaptureSession];
}
}
- (void)pause;
{
running = NO;
[self.captureSession stopRunning];
}
- (void)stop;
{
running = NO;
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
if (self.captureSession) {
for (AVCaptureInput *input in self.captureSession.inputs) {
[self.captureSession removeInput:input];
}
for (AVCaptureOutput *output in self.captureSession.outputs) {
[self.captureSession removeOutput:output];
}
[self.captureSession stopRunning];
[captureSession release];
}
[captureVideoPreviewLayer release];
[videoCaptureConnection release];
captureSessionLoaded = NO;
}
// use front/back camera
- (void)switchCameras;
{
BOOL was_running = self.running;
if (was_running) {
[self stop];
}
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
} else {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
}
if (was_running) {
[self start];
}
}
#pragma mark - Device Orientation Changes
- (void)deviceOrientationDidChange:(NSNotification*)notification
{
(void)notification;
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
switch (orientation)
{
case UIDeviceOrientationPortrait:
case UIDeviceOrientationPortraitUpsideDown:
case UIDeviceOrientationLandscapeLeft:
case UIDeviceOrientationLandscapeRight:
currentDeviceOrientation = orientation;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break;
}
NSLog(@"deviceOrientationDidChange: %d", (int)orientation);
[self updateOrientation];
}
#pragma mark - Private Interface
- (void)createCaptureSession;
{
// set a av capture session preset
self.captureSession = [[AVCaptureSession alloc] init];
if ([self.captureSession canSetSessionPreset:self.defaultAVCaptureSessionPreset]) {
[self.captureSession setSessionPreset:self.defaultAVCaptureSessionPreset];
} else if ([self.captureSession canSetSessionPreset:AVCaptureSessionPresetLow]) {
[self.captureSession setSessionPreset:AVCaptureSessionPresetLow];
} else {
NSLog(@"[Camera] Error: could not set session preset");
}
}
- (void)createCaptureDevice;
{
// setup the device
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
[self setDesiredCameraPosition:self.defaultAVCaptureDevicePosition];
NSLog(@"[Camera] device connected? %@", device.connected ? @"YES" : @"NO");
NSLog(@"[Camera] device position %@", (device.position == AVCaptureDevicePositionBack) ? @"back" : @"front");
}
- (void)createVideoPreviewLayer;
{
self.captureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
if ([self.captureVideoPreviewLayer respondsToSelector:@selector(connection)])
{
if ([self.captureVideoPreviewLayer.connection isVideoOrientationSupported])
{
[self.captureVideoPreviewLayer.connection setVideoOrientation:self.defaultAVCaptureVideoOrientation];
}
}
else
{
#if (!defined(TARGET_OS_MACCATALYST) || !TARGET_OS_MACCATALYST)
// Deprecated in 6.0; here for backward compatibility
if ([self.captureVideoPreviewLayer isOrientationSupported])
{
[self.captureVideoPreviewLayer setOrientation:self.defaultAVCaptureVideoOrientation];
}
#endif
}
if (parentView != nil) {
self.captureVideoPreviewLayer.frame = self.parentView.bounds;
self.captureVideoPreviewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.parentView.layer addSublayer:self.captureVideoPreviewLayer];
}
NSLog(@"[Camera] created AVCaptureVideoPreviewLayer");
}
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
{
for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if ([device position] == desiredPosition) {
[self.captureSession beginConfiguration];
NSError* error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if (!input) {
NSLog(@"error creating input %@", [error description]);
}
// support for autofocus
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
}
}
[self.captureSession addInput:input];
for (AVCaptureInput *oldInput in self.captureSession.inputs) {
[self.captureSession removeInput:oldInput];
}
[self.captureSession addInput:input];
[self.captureSession commitConfiguration];
break;
}
}
}
- (void)startCaptureSession
{
if (!cameraAvailable) {
return;
}
if (self.captureSessionLoaded == NO) {
[self createCaptureSession];
[self createCaptureDevice];
[self createCaptureOutput];
// setup preview layer
if (self.useAVCaptureVideoPreviewLayer) {
[self createVideoPreviewLayer];
} else {
[self createCustomVideoPreview];
}
captureSessionLoaded = YES;
}
[self.captureSession startRunning];
}
- (void)createCaptureOutput;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %s in a subclass", __FUNCTION__];
}
- (void)createCustomVideoPreview;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %s in a subclass", __FUNCTION__];
}
- (void)updateOrientation;
{
// nothing to do here
}
- (void)updateSize;
{
if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetPhoto]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetHigh]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetMedium]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetLow]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset352x288]) {
self.imageWidth = 352;
self.imageHeight = 288;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset640x480]) {
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset1280x720]) {
self.imageWidth = 1280;
self.imageHeight = 720;
} else {
self.imageWidth = 640;
self.imageHeight = 480;
}
}
- (void)lockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked focus configuration %@", [error description]);
}
}
}
- (void) unlockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error description]);
}
}
}
- (void)lockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked exposure configuration %@", [error description]);
}
}
}
- (void) unlockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autoexposure configuration %@", [error description]);
}
}
}
- (void)lockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked white balance configuration %@", [error description]);
}
}
}
- (void) unlockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for auto white balance configuration %@", [error description]);
}
}
}
@end

View File

@ -0,0 +1,172 @@
/*
* cap_ios_photo_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/videoio/cap_ios.h"
#include "precomp.hpp"
#pragma mark - Private Interface
@interface CvPhotoCamera ()
{
id<CvPhotoCameraDelegate> _delegate;
}
@property (nonatomic, strong) AVCaptureStillImageOutput* stillImageOutput;
@end
#pragma mark - Implementation
@implementation CvPhotoCamera
#pragma mark Public
@synthesize stillImageOutput;
- (void)setDelegate:(id<CvPhotoCameraDelegate>)newDelegate {
_delegate = newDelegate;
}
- (id<CvPhotoCameraDelegate>)delegate {
return _delegate;
}
#pragma mark - Public interface
- (void)takePicture
{
if (cameraAvailable == NO) {
return;
}
cameraAvailable = NO;
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:self.videoCaptureConnection
completionHandler:
^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
if (error == nil && imageSampleBuffer != NULL)
{
// TODO check
// NSNumber* imageOrientation = [UIImage cgImageOrientationForUIDeviceOrientation:currentDeviceOrientation];
// CMSetAttachment(imageSampleBuffer, kCGImagePropertyOrientation, imageOrientation, 1);
NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
[self.captureSession stopRunning];
// Make sure we create objects on the main thread in the main context
UIImage* newImage = [UIImage imageWithData:jpegData];
//UIImageOrientation orientation = [newImage imageOrientation];
// TODO: only apply rotation, don't scale, since we can set this directly in the camera
/*
switch (orientation) {
case UIImageOrientationUp:
case UIImageOrientationDown:
newImage = [newImage imageWithAppliedRotationAndMaxSize:CGSizeMake(640.0, 480.0)];
break;
case UIImageOrientationLeft:
case UIImageOrientationRight:
newImage = [newImage imageWithMaxSize:CGSizeMake(640.0, 480.0)];
default:
break;
}
*/
// We have captured the image, we can allow the user to take another picture
cameraAvailable = YES;
NSLog(@"CvPhotoCamera captured image");
[self.delegate photoCamera:self capturedImage:newImage];
[self.captureSession startRunning];
});
}
}];
}
- (void)stop;
{
[super stop];
self.stillImageOutput = nil;
}
#pragma mark - Private Interface
- (void)createStillImageOutput;
{
// setup still image output with jpeg codec
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = [NSDictionary dictionaryWithObjectsAndKeys:AVVideoCodecJPEG, AVVideoCodecKey, nil];
[self.stillImageOutput setOutputSettings:outputSettings];
[self.captureSession addOutput:self.stillImageOutput];
for (AVCaptureConnection *connection in self.stillImageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([port.mediaType isEqual:AVMediaTypeVideo]) {
self.videoCaptureConnection = connection;
break;
}
}
if (self.videoCaptureConnection) {
break;
}
}
NSLog(@"[Camera] still image output created");
}
- (void)createCaptureOutput;
{
[self createStillImageOutput];
}
- (void)createCustomVideoPreview;
{
//do nothing, always use AVCaptureVideoPreviewLayer
}
@end

View File

@ -0,0 +1,653 @@
/*
* cap_ios_video_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/videoio/cap_ios.h"
#include "precomp.hpp"
#import <UIKit/UIKit.h>
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}
#pragma mark - Private Interface
@interface CvVideoCamera () {
int recordingCountDown;
}
- (void)createVideoDataOutput;
- (void)createVideoFileOutput;
@property (nonatomic, strong) CALayer *customPreviewLayer;
@property (nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
@end
#pragma mark - Implementation
@implementation CvVideoCamera
{
id<CvVideoCameraDelegate> _delegate;
}
@synthesize grayscaleMode;
@synthesize customPreviewLayer;
@synthesize videoDataOutput;
@synthesize recordVideo;
@synthesize rotateVideo;
//@synthesize videoFileOutput;
@synthesize recordAssetWriterInput;
@synthesize recordPixelBufferAdaptor;
@synthesize recordAssetWriter;
- (void)setDelegate:(id<CvVideoCameraDelegate>)newDelegate {
_delegate = newDelegate;
}
- (id<CvVideoCameraDelegate>)delegate {
return _delegate;
}
#pragma mark - Constructors
- (id)initWithParentView:(UIView*)parent;
{
self = [super initWithParentView:parent];
if (self) {
self.useAVCaptureVideoPreviewLayer = NO;
self.recordVideo = NO;
self.rotateVideo = NO;
}
return self;
}
#pragma mark - Public interface
- (void)start;
{
if (self.running == YES) {
return;
}
recordingCountDown = 10;
[super start];
if (self.recordVideo == YES) {
NSError* error = nil;
if ([[NSFileManager defaultManager] fileExistsAtPath:[self videoFileString]]) {
[[NSFileManager defaultManager] removeItemAtPath:[self videoFileString] error:&error];
}
if (error == nil) {
NSLog(@"[Camera] Delete file %@", [self videoFileString]);
}
}
}
- (void)stop;
{
if (self.running == YES) {
[super stop];
[videoDataOutput release];
if (videoDataOutputQueue) {
dispatch_release(videoDataOutputQueue);
}
if (self.recordVideo == YES) {
if (self.recordAssetWriter) {
if (self.recordAssetWriter.status == AVAssetWriterStatusWriting) {
[self.recordAssetWriter finishWriting];
NSLog(@"[Camera] recording stopped");
} else {
NSLog(@"[Camera] Recording Error: asset writer status is not writing");
}
[recordAssetWriter release];
}
[recordAssetWriterInput release];
[recordPixelBufferAdaptor release];
}
if (self.customPreviewLayer) {
[self.customPreviewLayer removeFromSuperlayer];
self.customPreviewLayer = nil;
}
}
}
// TODO fix
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
{
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (interfaceOrientation) {
case UIInterfaceOrientationPortrait:
NSLog(@"to Portrait");
rotation_angle = 270;
break;
case UIInterfaceOrientationPortraitUpsideDown:
rotation_angle = 90;
NSLog(@"to UpsideDown");
break;
case UIInterfaceOrientationLandscapeLeft:
rotation_angle = 0;
NSLog(@"to LandscapeLeft");
break;
case UIInterfaceOrientationLandscapeRight:
rotation_angle = 180;
NSLog(@"to LandscapeRight");
break;
default:
break; // leave the layer in its last known orientation
}
switch (self.defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
// TODO fix
- (void)layoutPreviewLayer;
{
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (currentDeviceOrientation) {
case UIDeviceOrientationPortrait:
rotation_angle = 270;
break;
case UIDeviceOrientationPortraitUpsideDown:
rotation_angle = 90;
break;
case UIDeviceOrientationLandscapeLeft:
NSLog(@"left");
rotation_angle = 180;
break;
case UIDeviceOrientationLandscapeRight:
NSLog(@"right");
rotation_angle = 0;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break; // leave the layer in its last known orientation
}
switch (self.defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
#pragma mark - Private Interface
- (void)createVideoDataOutput;
{
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
// In grayscale mode we want YUV (YpCbCr 4:2:0) so we can directly access the graylevel intensity values (Y component)
// In color mode we, BGRA format is used
OSType format = self.grayscaleMode ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
// set default FPS
AVCaptureDeviceInput *currentInput = [self.captureSession.inputs objectAtIndex:0];
AVCaptureDevice *device = currentInput.device;
NSError *error = nil;
[device lockForConfiguration:&error];
float maxRate = ((AVFrameRateRange*) [device.activeFormat.videoSupportedFrameRateRanges objectAtIndex:0]).maxFrameRate;
if (maxRate > self.defaultFPS - 1 && error == nil) {
[device setActiveVideoMinFrameDuration:CMTimeMake(1, self.defaultFPS)];
[device setActiveVideoMaxFrameDuration:CMTimeMake(1, self.defaultFPS)];
NSLog(@"[Camera] FPS set to %d", self.defaultFPS);
} else {
NSLog(@"[Camera] unable to set defaultFPS at %d FPS, max is %f FPS", self.defaultFPS, maxRate);
}
if (error != nil) {
NSLog(@"[Camera] unable to set defaultFPS: %@", error);
}
[device unlockForConfiguration];
// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}
// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}
// create a custom preview layer
self.customPreviewLayer = [CALayer layer];
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
[self updateOrientation];
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
NSLog(@"[Camera] created AVCaptureVideoDataOutput");
}
- (void)createVideoFileOutput;
{
/* Video File Output in H.264, via AVAsserWriter */
NSLog(@"Create Video with dimensions %dx%d", self.imageWidth, self.imageHeight);
NSDictionary *outputSettings
= [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:self.imageWidth], AVVideoWidthKey,
[NSNumber numberWithInt:self.imageHeight], AVVideoHeightKey,
AVVideoCodecH264, AVVideoCodecKey,
nil
];
self.recordAssetWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings];
int pixelBufferFormat = (self.grayscaleMode == YES) ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.recordPixelBufferAdaptor =
[[AVAssetWriterInputPixelBufferAdaptor alloc]
initWithAssetWriterInput:self.recordAssetWriterInput
sourcePixelBufferAttributes:[NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:pixelBufferFormat], kCVPixelBufferPixelFormatTypeKey, nil]];
NSError* error = nil;
NSLog(@"Create AVAssetWriter with url: %@", [self videoFileURL]);
self.recordAssetWriter = [AVAssetWriter assetWriterWithURL:[self videoFileURL]
fileType:AVFileTypeMPEG4
error:&error];
if (error != nil) {
NSLog(@"[Camera] Unable to create AVAssetWriter: %@", error);
}
[self.recordAssetWriter addInput:self.recordAssetWriterInput];
self.recordAssetWriterInput.expectsMediaDataInRealTime = YES;
NSLog(@"[Camera] created AVAssetWriter");
}
- (void)createCaptureOutput;
{
[self createVideoDataOutput];
if (self.recordVideo == YES) {
[self createVideoFileOutput];
}
}
- (void)createCustomVideoPreview;
{
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image));
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:NO], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameSize.width,
frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options),
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width,
frameSize.height, 8, 4*frameSize.width, rgbColorSpace,
kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
#pragma mark - Protocol AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
auto strongDelegate = self.delegate;
if (strongDelegate) {
// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;
CGColorSpaceRef colorSpace;
CGContextRef context;
int format_opencv;
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
format_opencv = CV_8UC1;
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
} else { // expect kCVPixelFormatType_32BGRA
format_opencv = CV_8UC4;
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
}
// delegate image processing to the delegate
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
CGImage* dstImage;
if ([strongDelegate respondsToSelector:@selector(processImage:)]) {
[strongDelegate processImage:image];
}
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;
// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}
if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);
CGDataProviderRelease(provider);
}
// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});
recordingCountDown--;
if (self.recordVideo == YES && recordingCountDown < 0) {
lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
// CMTimeShow(lastSampleTime);
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
[self.recordAssetWriter startWriting];
[self.recordAssetWriter startSessionAtSourceTime:lastSampleTime];
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
NSLog(@"[Camera] Recording Error: asset writer status is not writing: %@", self.recordAssetWriter.error);
return;
} else {
NSLog(@"[Camera] Video recording started");
}
}
if (self.recordAssetWriterInput.readyForMoreMediaData) {
CVImageBufferRef pixelBuffer = [self pixelBufferFromCGImage:dstImage];
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:pixelBuffer
withPresentationTime:lastSampleTime] ) {
NSLog(@"Video Writing Error");
}
if (pixelBuffer != nullptr)
CVPixelBufferRelease(pixelBuffer);
}
}
// cleanup
CGImageRelease(dstImage);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}
- (void)updateOrientation;
{
if (self.rotateVideo == YES)
{
NSLog(@"rotate..");
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
}
}
- (void)saveVideo;
{
if (self.recordVideo == NO) {
return;
}
UISaveVideoAtPathToSavedPhotosAlbum([self videoFileString], nil, nil, NULL);
}
- (NSURL *)videoFileURL;
{
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
NSURL *outputURL = [NSURL fileURLWithPath:outputPath];
NSFileManager *fileManager = [NSFileManager defaultManager];
if ([fileManager fileExistsAtPath:outputPath]) {
NSLog(@"file exists");
}
return outputURL;
}
- (NSString *)videoFileString;
{
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
return outputPath;
}
@end

View File

@ -0,0 +1,249 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#ifdef HAVE_LIBREALSENSE
#include "cap_librealsense.hpp"
namespace cv
{
VideoCapture_LibRealsense::VideoCapture_LibRealsense(int) : mAlign(RS2_STREAM_COLOR)
{
try
{
rs2::config config;
// Configure all streams to run at VGA resolution at default fps
config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16);
config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8);
config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8);
mPipe.start(config);
}
catch (const rs2::error&)
{
}
}
VideoCapture_LibRealsense::~VideoCapture_LibRealsense(){}
double VideoCapture_LibRealsense::getProperty(int propIdx) const
{
double propValue = 0.0;
const int purePropIdx = propIdx & ~CAP_INTELPERC_GENERATORS_MASK;
if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_IMAGE_GENERATOR)
{
propValue = getImageGeneratorProperty(purePropIdx);
}
else if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_DEPTH_GENERATOR)
{
propValue = getDepthGeneratorProperty(purePropIdx);
}
else if((propIdx & CAP_INTELPERC_GENERATORS_MASK) == CAP_INTELPERC_IR_GENERATOR)
{
propValue = getIrGeneratorProperty(purePropIdx);
}
else
{
propValue = getCommonProperty(purePropIdx);
}
return propValue;
}
double VideoCapture_LibRealsense::getImageGeneratorProperty(int propIdx) const
{
double propValue = 0.0;
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();
if(!profile)
{
return propValue;
}
switch(propIdx)
{
case CAP_PROP_FRAME_WIDTH:
propValue = static_cast<double>(profile.width());
break;
case CAP_PROP_FRAME_HEIGHT:
propValue = static_cast<double>(profile.height());
break;
case CAP_PROP_FPS:
propValue = static_cast<double>(profile.fps());
break;
}
return propValue;
}
double VideoCapture_LibRealsense::getDepthGeneratorProperty(int propIdx) const
{
double propValue = 0.0;
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
const rs2::depth_sensor sensor = mPipe.get_active_profile().get_device().first<rs2::depth_sensor>();
if(!profile || !sensor)
{
return propValue;
}
switch(propIdx)
{
case CAP_PROP_FRAME_WIDTH:
propValue = static_cast<double>(profile.width());
break;
case CAP_PROP_FRAME_HEIGHT:
propValue = static_cast<double>(profile.height());
break;
case CAP_PROP_FPS:
propValue = static_cast<double>(profile.fps());
break;
case CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
propValue = static_cast<double>(sensor.get_depth_scale());
break;
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ:
propValue = static_cast<double>(profile.get_intrinsics().fx);
break;
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT:
propValue = static_cast<double>(profile.get_intrinsics().fy);
break;
}
return propValue;
}
double VideoCapture_LibRealsense::getIrGeneratorProperty(int propIdx) const
{
double propValue = 0.0;
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_INFRARED).as<rs2::video_stream_profile>();
if(!profile)
{
return propValue;
}
switch(propIdx)
{
case CAP_PROP_FRAME_WIDTH:
propValue = static_cast<double>(profile.width());
break;
case CAP_PROP_FRAME_HEIGHT:
propValue = static_cast<double>(profile.height());
break;
case CAP_PROP_FPS:
propValue = static_cast<double>(profile.fps());
break;
}
return propValue;
}
double VideoCapture_LibRealsense::getCommonProperty(int propIdx) const
{
double propValue = 0.0;
const rs2::video_stream_profile profile = mPipe.get_active_profile().get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
const rs2::depth_sensor sensor = mPipe.get_active_profile().get_device().first<rs2::depth_sensor>();
if(!profile || !sensor)
{
return propValue;
}
switch(propIdx)
{
case CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FPS:
propValue = getDepthGeneratorProperty(propIdx);
break;
case CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
propValue = static_cast<double>(sensor.get_depth_scale());
break;
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ:
propValue = static_cast<double>(profile.get_intrinsics().fx);
break;
case CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT:
propValue = static_cast<double>(profile.get_intrinsics().fy);
break;
}
return propValue;
}
bool VideoCapture_LibRealsense::setProperty(int, double)
{
bool isSet = false;
return isSet;
}
bool VideoCapture_LibRealsense::grabFrame()
{
if (!isOpened())
return false;
try
{
mData = mAlign.process(mPipe.wait_for_frames());
}
catch (const rs2::error&)
{
return false;
}
return true;
}
bool VideoCapture_LibRealsense::retrieveFrame(int outputType, cv::OutputArray frame)
{
rs2::video_frame _frame(nullptr);
int type;
switch (outputType)
{
case CAP_INTELPERC_DEPTH_MAP:
_frame = mData.get_depth_frame().as<rs2::video_frame>();
type = CV_16UC1;
break;
case CAP_INTELPERC_IR_MAP:
_frame = mData.get_infrared_frame();
type = CV_8UC1;
break;
case CAP_INTELPERC_IMAGE:
_frame = mData.get_color_frame();
type = CV_8UC3;
break;
default:
return false;
}
try
{
// we copy the data straight away, so const_cast should be fine
void* data = const_cast<void*>(_frame.get_data());
Mat(_frame.get_height(), _frame.get_width(), type, data, _frame.get_stride_in_bytes()).copyTo(frame);
if(_frame.get_profile().format() == RS2_FORMAT_RGB8)
cvtColor(frame, frame, COLOR_RGB2BGR);
}
catch (const rs2::error&)
{
return false;
}
return true;
}
int VideoCapture_LibRealsense::getCaptureDomain()
{
return CAP_INTELPERC;
}
bool VideoCapture_LibRealsense::isOpened() const
{
return bool(std::shared_ptr<rs2_pipeline>(mPipe));
}
Ptr<IVideoCapture> create_RealSense_capture(int index)
{
return makePtr<VideoCapture_LibRealsense>(index);
}
}
#endif

View File

@ -0,0 +1,43 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef _CAP_LIBREALSENE_HPP_
#define _CAP_LIBREALSENE_HPP_
#ifdef HAVE_LIBREALSENSE
#include <librealsense2/rs.hpp>
namespace cv
{
class VideoCapture_LibRealsense : public IVideoCapture
{
public:
VideoCapture_LibRealsense(int index);
virtual ~VideoCapture_LibRealsense();
virtual double getProperty(int propIdx) const CV_OVERRIDE;
virtual bool setProperty(int propIdx, double propVal) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE;
protected:
rs2::pipeline mPipe;
rs2::frameset mData;
rs2::align mAlign;
double getDepthGeneratorProperty(int propIdx) const;
double getImageGeneratorProperty(int propIdx) const;
double getIrGeneratorProperty(int propIdx) const;
double getCommonProperty(int propIdx) const;
};
}
#endif
#endif

View File

@ -0,0 +1,233 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "cap_mfx_common.hpp"
// Linux specific
#ifdef __linux__
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#endif
using namespace std;
using namespace cv;
static mfxIMPL getImpl()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_IMPL", MFX_IMPL_AUTO_ANY);
return (mfxIMPL)res;
}
static size_t getExtraSurfaceNum()
{
static const size_t res = cv::utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_EXTRA_SURFACE_NUM", 1);
return res;
}
static size_t getPoolTimeoutSec()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_POOL_TIMEOUT", 1);
return res;
}
//==================================================================================================
bool DeviceHandler::init(MFXVideoSession &session)
{
mfxStatus res = MFX_ERR_NONE;
mfxIMPL impl = getImpl();
mfxVersion ver = { {19, 1} };
res = session.Init(impl, &ver);
DBG(cout << "MFX SessionInit: " << res << endl);
res = session.QueryIMPL(&impl);
DBG(cout << "MFX QueryIMPL: " << res << " => " << asHex(impl) << endl);
res = session.QueryVersion(&ver);
DBG(cout << "MFX QueryVersion: " << res << " => " << ver.Major << "." << ver.Minor << endl);
if (res != MFX_ERR_NONE)
return false;
return initDeviceSession(session);
}
//==================================================================================================
#ifdef __linux__
VAHandle::VAHandle() {
// TODO: provide a way of modifying this path
const string filename = "/dev/dri/renderD128";
file = open(filename.c_str(), O_RDWR);
if (file < 0)
CV_Error(Error::StsError, "Can't open file: " + filename);
display = vaGetDisplayDRM(file);
}
VAHandle::~VAHandle() {
if (display) {
vaTerminate(display);
}
if (file >= 0) {
close(file);
}
}
bool VAHandle::initDeviceSession(MFXVideoSession &session) {
int majorVer = 0, minorVer = 0;
VAStatus va_res = vaInitialize(display, &majorVer, &minorVer);
DBG(cout << "vaInitialize: " << va_res << endl << majorVer << '.' << minorVer << endl);
if (va_res == VA_STATUS_SUCCESS) {
mfxStatus mfx_res = session.SetHandle(static_cast<mfxHandleType>(MFX_HANDLE_VA_DISPLAY), display);
DBG(cout << "MFX SetHandle: " << mfx_res << endl);
if (mfx_res == MFX_ERR_NONE) {
return true;
}
}
return false;
}
#endif // __linux__
DeviceHandler * createDeviceHandler()
{
#if defined __linux__
return new VAHandle();
#elif defined _WIN32
return new DXHandle();
#else
return 0;
#endif
}
//==================================================================================================
SurfacePool::SurfacePool(ushort width_, ushort height_, ushort count, const mfxFrameInfo &frameInfo, uchar bpp)
: width(alignSize(width_, 32)),
height(alignSize(height_, 32)),
oneSize(width * height * bpp / 8),
buffers(count * oneSize),
surfaces(count)
{
for(int i = 0; i < count; ++i)
{
mfxFrameSurface1 &surface = surfaces[i];
uint8_t * dataPtr = buffers.data() + oneSize * i;
memset(&surface, 0, sizeof(mfxFrameSurface1));
surface.Info = frameInfo;
surface.Data.Y = dataPtr;
surface.Data.UV = dataPtr + width * height;
surface.Data.PitchLow = width & 0xFFFF;
surface.Data.PitchHigh = (width >> 16) & 0xFFFF;
DBG(cout << "allocate surface " << (void*)&surface << ", Y = " << (void*)dataPtr << " (" << width << "x" << height << ")" << endl);
}
DBG(cout << "Allocated: " << endl
<< "- surface data: " << buffers.size() << " bytes" << endl
<< "- surface headers: " << surfaces.size() * sizeof(mfxFrameSurface1) << " bytes" << endl);
}
SurfacePool::~SurfacePool()
{
}
SurfacePool * SurfacePool::_create(const mfxFrameAllocRequest &request, const mfxVideoParam &params)
{
return new SurfacePool(request.Info.Width,
request.Info.Height,
saturate_cast<ushort>((size_t)request.NumFrameSuggested + getExtraSurfaceNum()),
params.mfx.FrameInfo);
}
mfxFrameSurface1 *SurfacePool::getFreeSurface()
{
const int64 start = cv::getTickCount();
do
{
for(std::vector<mfxFrameSurface1>::iterator i = surfaces.begin(); i != surfaces.end(); ++i)
if (!i->Data.Locked)
return &(*i);
sleep_ms(10);
}
while((cv::getTickCount() - start) / cv::getTickFrequency() < getPoolTimeoutSec()); // seconds
DBG(cout << "No free surface!" << std::endl);
return 0;
}
//==================================================================================================
ReadBitstream::ReadBitstream(const char *filename, size_t maxSize) : drain(false)
{
input.open(filename, std::ios::in | std::ios::binary);
DBG(cout << "Open " << filename << " -> " << input.is_open() << std::endl);
memset(&stream, 0, sizeof(stream));
stream.MaxLength = (mfxU32)maxSize;
stream.Data = new mfxU8[stream.MaxLength];
CV_Assert(stream.Data);
}
ReadBitstream::~ReadBitstream()
{
delete[] stream.Data;
}
bool ReadBitstream::isOpened() const
{
return input.is_open();
}
bool ReadBitstream::isDone() const
{
return input.eof();
}
bool ReadBitstream::read()
{
memmove(stream.Data, stream.Data + stream.DataOffset, stream.DataLength);
stream.DataOffset = 0;
input.read((char*)(stream.Data + stream.DataLength), stream.MaxLength - stream.DataLength);
if (input.eof() || input.good())
{
mfxU32 bytesRead = (mfxU32)input.gcount();
if (bytesRead > 0)
{
stream.DataLength += bytesRead;
DBG(cout << "read " << bytesRead << " bytes" << endl);
return true;
}
}
return false;
}
//==================================================================================================
WriteBitstream::WriteBitstream(const char * filename, size_t maxSize)
{
output.open(filename, std::ios::out | std::ios::binary);
DBG(cout << "BS Open " << filename << " -> " << output.is_open() << std::endl);
memset(&stream, 0, sizeof(stream));
stream.MaxLength = (mfxU32)maxSize;
stream.Data = new mfxU8[stream.MaxLength];
DBG(cout << "BS Allocate " << maxSize << " bytes (" << ((float)maxSize / (1 << 20)) << " Mb)" << endl);
CV_Assert(stream.Data);
}
WriteBitstream::~WriteBitstream()
{
delete[] stream.Data;
}
bool WriteBitstream::write()
{
output.write((char*)(stream.Data + stream.DataOffset), stream.DataLength);
stream.DataLength = 0;
return output.good();
}
bool WriteBitstream::isOpened() const
{
return output.is_open();
}

View File

@ -0,0 +1,370 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef MFXHELPER_H
#define MFXHELPER_H
#include "opencv2/core.hpp"
#include "opencv2/core/utils/configuration.private.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
#ifdef HAVE_ONEVPL
# include <vpl/mfxcommon.h>
# include <vpl/mfxstructures.h>
# include <vpl/mfxvideo++.h>
# include <vpl/mfxvp8.h>
# include <vpl/mfxjpeg.h>
#else
# include <mfxcommon.h>
# include <mfxstructures.h>
# include <mfxvideo++.h>
# include <mfxvp8.h>
# include <mfxjpeg.h>
# ifdef HAVE_MFX_PLUGIN
# include <mfxplugin++.h>
# endif
#endif
// //
// Debug helpers //
// //
#if 0
# define DBG(i) i
#else
# define DBG(i)
#endif
#if 1
# define MSG(i) i
#else
# define MSG(i)
#endif
template <typename T>
struct HexWrap {
HexWrap(T val_) : val(val_) {}
T val;
};
template <typename T>
inline std::ostream & operator<<(std::ostream &out, const HexWrap<T> &wrap) {
std::ios_base::fmtflags flags = out.flags(std::ios::hex | std::ios::showbase);
out << wrap.val;
out.flags(flags);
return out;
}
template <typename T>
inline ::HexWrap<T> asHex(const T & val) {
return ::HexWrap<T>(val);
}
struct FourCC
{
FourCC(uint val) : val32(val) {}
FourCC(char a, char b, char c, char d) { val8[0] = a; val8[1] = b; val8[2] = c; val8[3] = d; }
union {
uint val32;
int vali32;
uchar val8[4];
};
};
inline std::ostream & operator<<(std::ostream &out, FourCC cc) {
for (size_t i = 0; i < 4; out << cc.val8[i++]) {}
out << " (" << asHex(cc.val32) << ")";
return out;
}
inline std::string mfxStatusToString(mfxStatus s) {
switch (s)
{
case MFX_ERR_NONE: return "MFX_ERR_NONE";
case MFX_ERR_UNKNOWN: return "MFX_ERR_UNKNOWN";
case MFX_ERR_NULL_PTR: return "MFX_ERR_NULL_PTR";
case MFX_ERR_UNSUPPORTED: return "MFX_ERR_UNSUPPORTED";
case MFX_ERR_MEMORY_ALLOC: return "MFX_ERR_MEMORY_ALLOC";
case MFX_ERR_NOT_ENOUGH_BUFFER: return "MFX_ERR_NOT_ENOUGH_BUFFER";
case MFX_ERR_INVALID_HANDLE: return "MFX_ERR_INVALID_HANDLE";
case MFX_ERR_LOCK_MEMORY: return "MFX_ERR_LOCK_MEMORY";
case MFX_ERR_NOT_INITIALIZED: return "MFX_ERR_NOT_INITIALIZED";
case MFX_ERR_NOT_FOUND: return "MFX_ERR_NOT_FOUND";
case MFX_ERR_MORE_DATA: return "MFX_ERR_MORE_DATA";
case MFX_ERR_MORE_SURFACE: return "MFX_ERR_MORE_SURFACE";
case MFX_ERR_ABORTED: return "MFX_ERR_ABORTED";
case MFX_ERR_DEVICE_LOST: return "MFX_ERR_DEVICE_LOST";
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM: return "MFX_ERR_INCOMPATIBLE_VIDEO_PARAM";
case MFX_ERR_INVALID_VIDEO_PARAM: return "MFX_ERR_INVALID_VIDEO_PARAM";
case MFX_ERR_UNDEFINED_BEHAVIOR: return "MFX_ERR_UNDEFINED_BEHAVIOR";
case MFX_ERR_DEVICE_FAILED: return "MFX_ERR_DEVICE_FAILED";
case MFX_ERR_MORE_BITSTREAM: return "MFX_ERR_MORE_BITSTREAM";
case MFX_ERR_GPU_HANG: return "MFX_ERR_GPU_HANG";
case MFX_ERR_REALLOC_SURFACE: return "MFX_ERR_REALLOC_SURFACE";
case MFX_WRN_IN_EXECUTION: return "MFX_WRN_IN_EXECUTION";
case MFX_WRN_DEVICE_BUSY: return "MFX_WRN_DEVICE_BUSY";
case MFX_WRN_VIDEO_PARAM_CHANGED: return "MFX_WRN_VIDEO_PARAM_CHANGED";
case MFX_WRN_PARTIAL_ACCELERATION: return "MFX_WRN_PARTIAL_ACCELERATION";
case MFX_WRN_INCOMPATIBLE_VIDEO_PARAM: return "MFX_WRN_INCOMPATIBLE_VIDEO_PARAM";
case MFX_WRN_VALUE_NOT_CHANGED: return "MFX_WRN_VALUE_NOT_CHANGED";
case MFX_WRN_OUT_OF_RANGE: return "MFX_WRN_OUT_OF_RANGE";
case MFX_WRN_FILTER_SKIPPED: return "MFX_WRN_FILTER_SKIPPED";
default: return "<Invalid or unknown mfxStatus>";
}
}
inline std::ostream & operator<<(std::ostream &out, mfxStatus s) {
out << mfxStatusToString(s) << " (" << (int)s << ")"; return out;
}
inline std::ostream & operator<<(std::ostream &out, const mfxInfoMFX &info) {
out << "InfoMFX:" << std::endl
<< "| Codec: " << FourCC(info.CodecId) << " / " << info.CodecProfile << " / " << info.CodecLevel << std::endl
<< "| DecodedOrder: " << info.DecodedOrder << std::endl
<< "| TimeStampCalc: " << info.TimeStampCalc << std::endl
;
return out;
}
inline std::ostream & operator<<(std::ostream & out, const mfxFrameInfo & info) {
out << "FrameInfo: " << std::endl
<< "| FourCC: " << FourCC(info.FourCC) << std::endl
<< "| Size: " << info.Width << "x" << info.Height << std::endl
<< "| ROI: " << "(" << info.CropX << ";" << info.CropY << ") " << info.CropW << "x" << info.CropH << std::endl
<< "| BitDepth(L/C): " << info.BitDepthLuma << " / " << info.BitDepthChroma << std::endl
<< "| Shift: " << info.Shift << std::endl
<< "| TemporalID: " << info.FrameId.TemporalId << std::endl
<< "| FrameRate: " << info.FrameRateExtN << "/" << info.FrameRateExtD << std::endl
<< "| AspectRatio: " << info.AspectRatioW << "x" << info.AspectRatioH << std::endl
<< "| PicStruct: " << info.PicStruct << std::endl
<< "| ChromaFormat: " << info.ChromaFormat << std::endl
;
return out;
}
inline std::ostream & operator<<(std::ostream &out, const mfxFrameData &data) {
out << "FrameData:" << std::endl
<< "| NumExtParam: " << data.NumExtParam << std::endl
<< "| MemType: " << data.MemType << std::endl
<< "| PitchHigh: " << data.PitchHigh << std::endl
<< "| TimeStamp: " << data.TimeStamp << std::endl
<< "| FrameOrder: " << data.FrameOrder << std::endl
<< "| Locked: " << data.Locked << std::endl
<< "| Pitch: " << data.PitchHigh << ", " << data.PitchLow << std::endl
<< "| Y: " << (void*)data.Y << std::endl
<< "| U: " << (void*)data.U << std::endl
<< "| V: " << (void*)data.V << std::endl
;
return out;
}
//==================================================================================================
template <typename T>
inline void cleanup(T * &ptr)
{
if (ptr)
{
delete ptr;
ptr = 0;
}
}
//==================================================================================================
class Plugin
{
public:
static Plugin * loadEncoderPlugin(MFXVideoSession &session, mfxU32 codecId)
{
#ifdef HAVE_MFX_PLUGIN
static const mfxPluginUID hevc_enc_uid = { 0x6f, 0xad, 0xc7, 0x91, 0xa0, 0xc2, 0xeb, 0x47, 0x9a, 0xb6, 0xdc, 0xd5, 0xea, 0x9d, 0xa3, 0x47 };
if (codecId == MFX_CODEC_HEVC)
return new Plugin(session, hevc_enc_uid);
#else
CV_UNUSED(session); CV_UNUSED(codecId);
#endif
return 0;
}
static Plugin * loadDecoderPlugin(MFXVideoSession &session, mfxU32 codecId)
{
#ifdef HAVE_MFX_PLUGIN
static const mfxPluginUID hevc_dec_uid = { 0x33, 0xa6, 0x1c, 0x0b, 0x4c, 0x27, 0x45, 0x4c, 0xa8, 0xd8, 0x5d, 0xde, 0x75, 0x7c, 0x6f, 0x8e };
if (codecId == MFX_CODEC_HEVC)
return new Plugin(session, hevc_dec_uid);
#else
CV_UNUSED(session); CV_UNUSED(codecId);
#endif
return 0;
}
~Plugin()
{
#ifdef HAVE_MFX_PLUGIN
if (isGood())
MFXVideoUSER_UnLoad(session, &uid);
#endif
}
bool isGood() const { return res >= MFX_ERR_NONE; }
private:
mfxStatus res;
private:
#ifdef HAVE_MFX_PLUGIN
MFXVideoSession &session;
mfxPluginUID uid;
Plugin(MFXVideoSession &_session, mfxPluginUID _uid) : session(_session), uid(_uid)
{
res = MFXVideoUSER_Load(session, &uid, 1);
}
#endif
Plugin(const Plugin &);
Plugin &operator=(const Plugin &);
};
//==================================================================================================
class ReadBitstream
{
public:
ReadBitstream(const char * filename, size_t maxSize = 10 * 1024 * 1024);
~ReadBitstream();
bool isOpened() const;
bool isDone() const;
bool read();
private:
ReadBitstream(const ReadBitstream &);
ReadBitstream &operator=(const ReadBitstream &);
public:
std::fstream input;
mfxBitstream stream;
bool drain;
};
//==================================================================================================
class WriteBitstream
{
public:
WriteBitstream(const char * filename, size_t maxSize);
~WriteBitstream();
bool write();
bool isOpened() const;
private:
WriteBitstream(const WriteBitstream &);
WriteBitstream &operator=(const WriteBitstream &);
public:
std::fstream output;
mfxBitstream stream;
};
//==================================================================================================
class SurfacePool
{
public:
SurfacePool(ushort width_, ushort height_, ushort count, const mfxFrameInfo & frameInfo, uchar bpp = 12);
~SurfacePool();
mfxFrameSurface1 *getFreeSurface();
template <typename T>
static SurfacePool * create(T * instance, mfxVideoParam &params)
{
CV_Assert(instance);
mfxFrameAllocRequest request;
memset(&request, 0, sizeof(request));
mfxStatus res = instance->QueryIOSurf(&params, &request);
DBG(std::cout << "MFX QueryIOSurf: " << res << std::endl);
if (res < MFX_ERR_NONE)
return 0;
return _create(request, params);
}
private:
static SurfacePool* _create(const mfxFrameAllocRequest& request, const mfxVideoParam& params);
private:
SurfacePool(const SurfacePool &);
SurfacePool &operator=(const SurfacePool &);
public:
size_t width, height;
size_t oneSize;
cv::AutoBuffer<uchar, 0> buffers;
std::vector<mfxFrameSurface1> surfaces;
};
//==================================================================================================
class DeviceHandler {
public:
virtual ~DeviceHandler() {}
bool init(MFXVideoSession &session);
protected:
virtual bool initDeviceSession(MFXVideoSession &session) = 0;
};
// TODO: move to core::util?
#ifdef CV_CXX11
#include <thread>
static void sleep_ms(int64 ms)
{
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
}
#elif defined(__linux__)
#include <time.h>
static void sleep_ms(int64 ms)
{
nanosleep(ms * 1000 * 1000);
}
#elif defined _WIN32
static void sleep_ms(int64 ms)
{
Sleep(ms);
}
#else
#error "Can not detect sleep_ms() implementation"
#endif
// Linux specific
#ifdef __linux__
#include <unistd.h>
#include <va/va_drm.h>
class VAHandle : public DeviceHandler {
public:
VAHandle();
~VAHandle();
private:
VAHandle(const VAHandle &);
VAHandle &operator=(const VAHandle &);
bool initDeviceSession(MFXVideoSession &session) CV_OVERRIDE;
private:
VADisplay display;
int file;
};
#endif // __linux__
// Windows specific
#ifdef _WIN32
#include <Windows.h>
class DXHandle : public DeviceHandler {
public:
DXHandle() {}
~DXHandle() {}
private:
DXHandle(const DXHandle &);
DXHandle &operator=(const DXHandle &);
bool initDeviceSession(MFXVideoSession &) CV_OVERRIDE { return true; }
};
#endif // _WIN32
DeviceHandler * createDeviceHandler();
#endif // MFXHELPER_H

View File

@ -0,0 +1,263 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#if defined(BUILD_PLUGIN)
#include <string>
#include "cap_mfx_reader.hpp"
#include "cap_mfx_writer.hpp"
#define ABI_VERSION 0
#define API_VERSION 0
#include "plugin_api.hpp"
using namespace std;
namespace cv {
static
CvResult CV_API_CALL cv_capture_open(const char* filename, int, CV_OUT CvPluginCapture* handle)
{
if (!handle)
return CV_ERROR_FAIL;
*handle = NULL;
if (!filename)
return CV_ERROR_FAIL;
VideoCapture_IntelMFX *cap = 0;
try
{
if (filename)
{
cap = new VideoCapture_IntelMFX(string(filename));
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
return CV_ERROR_OK;
}
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
}
if (cap)
delete cap;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
{
if (!handle)
return CV_ERROR_FAIL;
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
delete instance;
return CV_ERROR_OK;
}
static
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
{
if (!handle)
return CV_ERROR_FAIL;
if (!val)
return CV_ERROR_FAIL;
try
{
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
*val = instance->getProperty(prop);
return CV_ERROR_OK;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
VideoCapture_IntelMFX* instance = (VideoCapture_IntelMFX*)handle;
Mat img;
if (instance->retrieveFrame(stream_idx, img))
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
return CV_ERROR_FAIL;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static
CvResult CV_API_CALL cv_writer_open(const char* filename, int fourcc, double fps, int width, int height, int isColor,
CV_OUT CvPluginWriter* handle)
{
VideoWriter_IntelMFX* wrt = 0;
try
{
wrt = new VideoWriter_IntelMFX(filename, fourcc, fps, Size(width, height), isColor);
if(wrt->isOpened())
{
*handle = (CvPluginWriter)wrt;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
}
if (wrt)
delete wrt;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_writer_release(CvPluginWriter handle)
{
if (!handle)
return CV_ERROR_FAIL;
VideoWriter_IntelMFX* instance = (VideoWriter_IntelMFX*)handle;
delete instance;
return CV_ERROR_OK;
}
static
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/)
{
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
{
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_writer_write(CvPluginWriter handle, const unsigned char *data, int step, int width, int height, int cn)
{
if (!handle)
return CV_ERROR_FAIL;
try
{
VideoWriter_IntelMFX* instance = (VideoWriter_IntelMFX*)handle;
Mat img(Size(width, height), CV_MAKETYPE(CV_8U, cn), const_cast<uchar*>(data), step);
instance->write(img);
return CV_ERROR_OK;
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MFX: Exception is raised: " << e.what());
return CV_ERROR_FAIL;
}
catch (...)
{
CV_LOG_WARNING(NULL, "MFX: Unknown C++ exception is raised");
return CV_ERROR_FAIL;
}
}
static const OpenCV_VideoIO_Plugin_API_preview plugin_api =
{
{
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
"MediaSDK OpenCV Video I/O plugin"
},
{
/* 1*/CAP_INTEL_MFX,
/* 2*/cv_capture_open,
/* 3*/cv_capture_release,
/* 4*/cv_capture_get_prop,
/* 5*/cv_capture_set_prop,
/* 6*/cv_capture_grab,
/* 7*/cv_capture_retrieve,
/* 8*/cv_writer_open,
/* 9*/cv_writer_release,
/* 10*/cv_writer_get_prop,
/* 11*/cv_writer_set_prop,
/* 12*/cv_writer_write
}
};
} // namespace
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
{
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
return &cv::plugin_api;
return NULL;
}
#endif // BUILD_PLUGIN

View File

@ -0,0 +1,286 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "cap_mfx_reader.hpp"
#include "opencv2/core/base.hpp"
#include "cap_mfx_common.hpp"
#include "opencv2/imgproc/hal/hal.hpp"
#include "cap_interface.hpp"
using namespace cv;
using namespace std;
inline bool hasExtension(const String &filename, const String &ext)
{
if (filename.size() <= ext.size())
return false;
const size_t diff = filename.size() - ext.size();
const size_t found_at = filename.rfind(ext);
return found_at == diff;
}
inline mfxU32 determineCodecId(const String &filename)
{
if (hasExtension(filename, ".h264") || hasExtension(filename, ".264"))
return MFX_CODEC_AVC;
else if (hasExtension(filename, ".mp2") || hasExtension(filename, ".mpeg2"))
return MFX_CODEC_MPEG2;
else if (hasExtension(filename, ".265") || hasExtension(filename, ".hevc"))
return MFX_CODEC_HEVC;
else
return (mfxU32)-1;
}
//==========================================================================
VideoCapture_IntelMFX::VideoCapture_IntelMFX(const cv::String &filename)
: session(0), plugin(0), deviceHandler(0), bs(0), decoder(0), pool(0), outSurface(0), good(false)
{
mfxStatus res = MFX_ERR_NONE;
// Init device and session
deviceHandler = createDeviceHandler();
session = new MFXVideoSession();
if (!deviceHandler->init(*session))
{
MSG(cerr << "MFX: Can't initialize session" << endl);
return;
}
// Load appropriate plugin
mfxU32 codecId = determineCodecId(filename);
if (codecId == (mfxU32)-1)
{
MSG(cerr << "MFX: Unsupported extension: " << filename << endl);
return;
}
plugin = Plugin::loadDecoderPlugin(*session, codecId);
if (plugin && !plugin->isGood())
{
MSG(cerr << "MFX: LoadPlugin failed for codec: " << codecId << " (" << filename << ")" << endl);
return;
}
// Read some content from file
bs = new ReadBitstream(filename.c_str());
if (!bs->read())
{
MSG(cerr << "MFX: Failed to read bitstream" << endl);
return;
}
// Create decoder and decode stream header
decoder = new MFXVideoDECODE(*session);
mfxVideoParam params;
memset(&params, 0, sizeof(params));
params.mfx.CodecId = codecId;
params.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
res = decoder->DecodeHeader(&bs->stream, &params);
DBG(cout << "DecodeHeader: " << res << endl << params.mfx << params.mfx.FrameInfo << endl);
if (res < MFX_ERR_NONE)
{
MSG(cerr << "MFX: Failed to decode stream header: " << res << endl);
return;
}
// Adjust parameters
res = decoder->Query(&params, &params);
DBG(cout << "MFX Query: " << res << endl << params.mfx << params.mfx.FrameInfo);
CV_Assert(res >= MFX_ERR_NONE);
// Init surface pool
pool = SurfacePool::create(decoder, params);
if (!pool)
{
MSG(cerr << "MFX: Failed to create surface pool" << endl);
return;
}
// Init decoder
res = decoder->Init(&params);
DBG(cout << "MFX Init: " << res << endl << params.mfx.FrameInfo);
if (res < MFX_ERR_NONE)
{
MSG(cerr << "MFX: Failed to init decoder: " << res << endl);
return;
}
frameSize = Size(params.mfx.FrameInfo.CropW, params.mfx.FrameInfo.CropH);
good = true;
}
VideoCapture_IntelMFX::~VideoCapture_IntelMFX()
{
cleanup(plugin);
cleanup(bs);
cleanup(decoder);
cleanup(pool);
session->Close();
cleanup(session);
cleanup(deviceHandler);
}
double VideoCapture_IntelMFX::getProperty(int prop) const
{
if (!good)
{
MSG(cerr << "MFX: can not call getProperty(), backend has not been initialized" << endl);
return 0;
}
switch (prop)
{
case CAP_PROP_FRAME_WIDTH:
return frameSize.width;
case CAP_PROP_FRAME_HEIGHT:
return frameSize.height;
default:
MSG(cerr << "MFX: unsupported property" << endl);
return 0;
}
}
bool VideoCapture_IntelMFX::setProperty(int, double)
{
MSG(cerr << "MFX: setProperty() is not implemented" << endl);
return false;
}
bool VideoCapture_IntelMFX::grabFrame()
{
mfxStatus res;
mfxFrameSurface1 *workSurface = 0;
mfxSyncPoint sync;
workSurface = pool->getFreeSurface();
while (true)
{
if (!workSurface)
{
// not enough surfaces
MSG(cerr << "MFX: Failed to get free surface" << endl);
return false;
}
outSurface = 0;
res = decoder->DecodeFrameAsync(bs->drain ? 0 : &bs->stream, workSurface, (mfxFrameSurface1**)&outSurface, &sync);
if (res == MFX_ERR_NONE)
{
res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout
if (res == MFX_ERR_NONE)
{
// ready to retrieve
DBG(cout << "Frame ready to retrieve" << endl);
return true;
}
else
{
MSG(cerr << "MFX: Sync error: " << res << endl);
return false;
}
}
else if (res == MFX_ERR_MORE_DATA)
{
if (bs->isDone())
{
if (bs->drain)
{
// finish
DBG(cout << "Drain finished" << endl);
return false;
}
else
{
DBG(cout << "Bitstream finished - Drain started" << endl);
bs->drain = true;
continue;
}
}
else
{
bool read_res = bs->read();
if (!read_res)
{
// failed to read
MSG(cerr << "MFX: Bitstream read failure" << endl);
return false;
}
else
{
DBG(cout << "Bitstream read success" << endl);
continue;
}
}
}
else if (res == MFX_ERR_MORE_SURFACE)
{
DBG(cout << "Getting another surface" << endl);
workSurface = pool->getFreeSurface();
continue;
}
else if (res == MFX_WRN_DEVICE_BUSY)
{
DBG(cout << "Waiting for device" << endl);
sleep_ms(1000);
continue;
}
else if (res == MFX_WRN_VIDEO_PARAM_CHANGED)
{
DBG(cout << "Video param changed" << endl);
continue;
}
else
{
MSG(cerr << "MFX: Bad status: " << res << endl);
return false;
}
}
}
bool VideoCapture_IntelMFX::retrieveFrame(int, OutputArray out)
{
if (!outSurface)
{
MSG(cerr << "MFX: No frame ready to retrieve" << endl);
return false;
}
mfxFrameSurface1 * s = (mfxFrameSurface1*)outSurface;
mfxFrameInfo &info = s->Info;
mfxFrameData &data = s->Data;
const int cols = info.CropW;
const int rows = info.CropH;
out.create(rows, cols, CV_8UC3);
Mat res = out.getMat();
hal::cvtTwoPlaneYUVtoBGR(data.Y, data.UV, data.Pitch, res.data, res.step, cols, rows, 3, false, 0);
return true;
}
bool VideoCapture_IntelMFX::isOpened() const
{
return good;
}
int VideoCapture_IntelMFX::getCaptureDomain()
{
return CAP_INTEL_MFX;
}
//==================================================================================================
cv::Ptr<IVideoCapture> cv::create_MFX_capture(const std::string &filename)
{
return cv::makePtr<VideoCapture_IntelMFX>(filename);
}

View File

@ -0,0 +1,42 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef CAP_MFX_HPP
#define CAP_MFX_HPP
#include "precomp.hpp"
class MFXVideoSession;
class Plugin;
class DeviceHandler;
class ReadBitstream;
class SurfacePool;
class MFXVideoDECODE;
class VideoCapture_IntelMFX : public cv::IVideoCapture
{
public:
VideoCapture_IntelMFX(const cv::String &filename);
~VideoCapture_IntelMFX();
double getProperty(int) const CV_OVERRIDE;
bool setProperty(int, double) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int, cv::OutputArray out) CV_OVERRIDE;
bool isOpened() const CV_OVERRIDE;
int getCaptureDomain() CV_OVERRIDE;
private:
MFXVideoSession *session;
Plugin *plugin;
DeviceHandler *deviceHandler;
ReadBitstream *bs;
MFXVideoDECODE *decoder;
SurfacePool *pool;
void *outSurface;
cv::Size frameSize;
bool good;
};
#endif

View File

@ -0,0 +1,277 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "cap_mfx_writer.hpp"
#include "opencv2/core/base.hpp"
#include "cap_mfx_common.hpp"
#include "opencv2/imgproc/hal/hal.hpp"
#include "cap_interface.hpp"
using namespace std;
using namespace cv;
static size_t getBitrateDivisor()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_BITRATE_DIVISOR", 300);
return res;
}
static mfxU32 getWriterTimeoutMS()
{
static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_WRITER_TIMEOUT", 1);
return saturate_cast<mfxU32>(res * 1000); // convert from seconds
}
inline mfxU32 codecIdByFourCC(int fourcc)
{
const int CC_MPG2 = FourCC('M', 'P', 'G', '2').vali32;
const int CC_H264 = FourCC('H', '2', '6', '4').vali32;
const int CC_X264 = FourCC('X', '2', '6', '4').vali32;
const int CC_AVC = FourCC('A', 'V', 'C', ' ').vali32;
const int CC_H265 = FourCC('H', '2', '6', '5').vali32;
const int CC_HEVC = FourCC('H', 'E', 'V', 'C').vali32;
if (fourcc == CC_X264 || fourcc == CC_H264 || fourcc == CC_AVC)
return MFX_CODEC_AVC;
else if (fourcc == CC_H265 || fourcc == CC_HEVC)
return MFX_CODEC_HEVC;
else if (fourcc == CC_MPG2)
return MFX_CODEC_MPEG2;
else
return (mfxU32)-1;
}
VideoWriter_IntelMFX::VideoWriter_IntelMFX(const String &filename, int _fourcc, double fps, Size frameSize_, bool)
: session(0), plugin(0), deviceHandler(0), bs(0), encoder(0), pool(0), outSurface(NULL), frameSize(frameSize_), good(false)
{
mfxStatus res = MFX_ERR_NONE;
if (frameSize.width % 2 || frameSize.height % 2)
{
MSG(cerr << "MFX: Invalid frame size passed to encoder" << endl);
return;
}
if (fps <= 0)
{
MSG(cerr << "MFX: Invalid FPS passed to encoder" << endl);
return;
}
// Init device and session
deviceHandler = createDeviceHandler();
session = new MFXVideoSession();
if (!deviceHandler->init(*session))
{
MSG(cerr << "MFX: Can't initialize session" << endl);
return;
}
// Load appropriate plugin
mfxU32 codecId = codecIdByFourCC(_fourcc);
if (codecId == (mfxU32)-1)
{
MSG(cerr << "MFX: Unsupported FourCC: " << FourCC(_fourcc) << endl);
return;
}
plugin = Plugin::loadEncoderPlugin(*session, codecId);
if (plugin && !plugin->isGood())
{
MSG(cerr << "MFX: LoadPlugin failed for codec: " << codecId << " (" << FourCC(_fourcc) << ")" << endl);
return;
}
// Init encoder
encoder = new MFXVideoENCODE(*session);
mfxVideoParam params;
memset(&params, 0, sizeof(params));
params.mfx.CodecId = codecId;
params.mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED;
params.mfx.TargetKbps = saturate_cast<mfxU16>((frameSize.area() * fps) / (42.6666 * getBitrateDivisor())); // TODO: set in options
params.mfx.RateControlMethod = MFX_RATECONTROL_VBR;
params.mfx.FrameInfo.FrameRateExtN = cvRound(fps * 1000);
params.mfx.FrameInfo.FrameRateExtD = 1000;
params.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
params.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
params.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
params.mfx.FrameInfo.CropX = 0;
params.mfx.FrameInfo.CropY = 0;
params.mfx.FrameInfo.CropW = (mfxU16)frameSize.width;
params.mfx.FrameInfo.CropH = (mfxU16)frameSize.height;
params.mfx.FrameInfo.Width = (mfxU16)alignSize(frameSize.width, 32);
params.mfx.FrameInfo.Height = (mfxU16)alignSize(frameSize.height, 32);
params.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
res = encoder->Query(&params, &params);
DBG(cout << "MFX Query: " << res << endl << params.mfx << params.mfx.FrameInfo);
if (res < MFX_ERR_NONE)
{
MSG(cerr << "MFX: Query failed: " << res << endl);
return;
}
// Init surface pool
pool = SurfacePool::create(encoder, params);
if (!pool)
{
MSG(cerr << "MFX: Failed to create surface pool" << endl);
return;
}
// Init encoder
res = encoder->Init(&params);
DBG(cout << "MFX Init: " << res << endl << params.mfx.FrameInfo);
if (res < MFX_ERR_NONE)
{
MSG(cerr << "MFX: Failed to init encoder: " << res << endl);
return;
}
// Open output bitstream
{
mfxVideoParam par;
memset(&par, 0, sizeof(par));
res = encoder->GetVideoParam(&par);
DBG(cout << "MFX GetVideoParam: " << res << endl << "requested " << par.mfx.BufferSizeInKB << " kB" << endl);
CV_Assert(res >= MFX_ERR_NONE);
bs = new WriteBitstream(filename.c_str(), par.mfx.BufferSizeInKB * 1024 * 2);
if (!bs->isOpened())
{
MSG(cerr << "MFX: Failed to open output file: " << filename << endl);
return;
}
}
good = true;
}
VideoWriter_IntelMFX::~VideoWriter_IntelMFX()
{
if (isOpened())
{
DBG(cout << "====== Drain bitstream..." << endl);
Mat dummy;
while (write_one(dummy)) {}
DBG(cout << "====== Drain Finished" << endl);
}
cleanup(bs);
cleanup(pool);
cleanup(encoder);
cleanup(plugin);
cleanup(session);
cleanup(deviceHandler);
}
double VideoWriter_IntelMFX::getProperty(int) const
{
MSG(cerr << "MFX: getProperty() is not implemented" << endl);
return 0;
}
bool VideoWriter_IntelMFX::setProperty(int, double)
{
MSG(cerr << "MFX: setProperty() is not implemented" << endl);
return false;
}
bool VideoWriter_IntelMFX::isOpened() const
{
return good;
}
void VideoWriter_IntelMFX::write(cv::InputArray input)
{
write_one(input);
}
bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr)
{
mfxStatus res;
mfxFrameSurface1 *workSurface = 0;
mfxSyncPoint sync;
if (!bgr.empty() && (bgr.dims() != 2 || bgr.type() != CV_8UC3 || bgr.size() != frameSize))
{
MSG(cerr << "MFX: invalid frame passed to encoder: "
<< "dims/depth/cn=" << bgr.dims() << "/" << bgr.depth() << "/" << bgr.channels()
<< ", size=" << bgr.size() << endl);
return false;
}
if (!bgr.empty())
{
workSurface = pool->getFreeSurface();
if (!workSurface)
{
// not enough surfaces
MSG(cerr << "MFX: Failed to get free surface" << endl);
return false;
}
Mat src = bgr.getMat();
hal::cvtBGRtoTwoPlaneYUV(src.data, src.step,
workSurface->Data.Y, workSurface->Data.UV, workSurface->Data.Pitch,
workSurface->Info.CropW, workSurface->Info.CropH,
3, false, 1);
}
while (true)
{
outSurface = 0;
DBG(cout << "Calling with surface: " << workSurface << endl);
res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync);
if (res == MFX_ERR_NONE)
{
res = session->SyncOperation(sync, getWriterTimeoutMS()); // TODO: provide interface to modify timeout
if (res == MFX_ERR_NONE)
{
// ready to write
if (!bs->write())
{
MSG(cerr << "MFX: Failed to write bitstream" << endl);
return false;
}
else
{
DBG(cout << "Write bitstream" << endl);
return true;
}
}
else
{
MSG(cerr << "MFX: Sync error: " << res << endl);
return false;
}
}
else if (res == MFX_ERR_MORE_DATA)
{
DBG(cout << "ERR_MORE_DATA" << endl);
return false;
}
else if (res == MFX_WRN_DEVICE_BUSY)
{
DBG(cout << "Waiting for device" << endl);
sleep_ms(1000);
continue;
}
else
{
MSG(cerr << "MFX: Bad status: " << res << endl);
return false;
}
}
}
Ptr<IVideoWriter> cv::create_MFX_writer(const std::string& filename, int _fourcc, double fps,
const Size& frameSize, const VideoWriterParameters& params)
{
if (codecIdByFourCC(_fourcc) > 0)
{
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
Ptr<VideoWriter_IntelMFX> a = makePtr<VideoWriter_IntelMFX>(filename, _fourcc, fps, frameSize, isColor);
if (a->isOpened())
return a;
}
return Ptr<VideoWriter_IntelMFX>();
}

View File

@ -0,0 +1,47 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef CAP_MFX_WRITER_HPP
#define CAP_MFX_WRITER_HPP
#include "precomp.hpp"
class MFXVideoSession;
class Plugin;
class DeviceHandler;
class WriteBitstream;
class SurfacePool;
class MFXVideoDECODE;
class MFXVideoENCODE;
class VideoWriter_IntelMFX : public cv::IVideoWriter
{
public:
VideoWriter_IntelMFX(const cv::String &filename, int _fourcc, double fps, cv::Size frameSize, bool isColor);
~VideoWriter_IntelMFX() CV_OVERRIDE;
double getProperty(int) const CV_OVERRIDE;
bool setProperty(int, double) CV_OVERRIDE;
bool isOpened() const CV_OVERRIDE;
void write(cv::InputArray input) CV_OVERRIDE;
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_INTEL_MFX; }
protected:
bool write_one(cv::InputArray bgr);
private:
VideoWriter_IntelMFX(const VideoWriter_IntelMFX &);
VideoWriter_IntelMFX & operator=(const VideoWriter_IntelMFX &);
private:
MFXVideoSession *session;
Plugin *plugin;
DeviceHandler *deviceHandler;
WriteBitstream *bs;
MFXVideoENCODE *encoder;
SurfacePool *pool;
void *outSurface;
cv::Size frameSize;
bool good;
};
#endif // CAP_MFX_WRITER_HPP

View File

@ -0,0 +1,234 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videoio/container_avi.private.hpp"
namespace cv
{
class MotionJpegCapture: public IVideoCapture
{
public:
virtual ~MotionJpegCapture() CV_OVERRIDE;
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CAP_OPENCV_MJPEG; }
MotionJpegCapture(const String&);
bool open(const String&);
void close();
protected:
inline uint64_t getFramePos() const;
Ptr<AVIReadContainer> m_avi_container;
bool m_is_first_frame;
frame_list m_mjpeg_frames;
frame_iterator m_frame_iterator;
Mat m_current_frame;
//frame width/height and fps could be different for
//each frame/stream. At the moment we suppose that they
//stays the same within single avi file.
uint32_t m_frame_width;
uint32_t m_frame_height;
double m_fps;
};
uint64_t MotionJpegCapture::getFramePos() const
{
if(m_is_first_frame)
return 0;
if(m_frame_iterator == m_mjpeg_frames.end())
return m_mjpeg_frames.size();
return m_frame_iterator - m_mjpeg_frames.begin() + 1;
}
bool MotionJpegCapture::setProperty(int property, double value)
{
if(property == CAP_PROP_POS_FRAMES)
{
if(int(value) == 0)
{
m_is_first_frame = true;
m_frame_iterator = m_mjpeg_frames.end();
return true;
}
else if(m_mjpeg_frames.size() > value)
{
m_frame_iterator = m_mjpeg_frames.begin() + int(value - 1);
m_is_first_frame = false;
return true;
}
}
return false;
}
double MotionJpegCapture::getProperty(int property) const
{
switch(property)
{
case CAP_PROP_POS_FRAMES:
return (double)getFramePos();
case CAP_PROP_POS_MSEC:
return (double)getFramePos() * (1000. / m_fps);
case CAP_PROP_POS_AVI_RATIO:
return double(getFramePos())/m_mjpeg_frames.size();
case CAP_PROP_FRAME_WIDTH:
return (double)m_frame_width;
case CAP_PROP_FRAME_HEIGHT:
return (double)m_frame_height;
case CAP_PROP_FPS:
return m_fps;
case CAP_PROP_FOURCC:
return (double)CV_FOURCC('M','J','P','G');
case CAP_PROP_FRAME_COUNT:
return (double)m_mjpeg_frames.size();
case CAP_PROP_FORMAT:
return 0;
default:
return 0;
}
}
bool MotionJpegCapture::grabFrame()
{
if(isOpened())
{
if(m_is_first_frame)
{
m_is_first_frame = false;
m_frame_iterator = m_mjpeg_frames.begin();
}
else
{
if (m_frame_iterator == m_mjpeg_frames.end())
return false;
++m_frame_iterator;
}
}
return m_frame_iterator != m_mjpeg_frames.end();
}
bool MotionJpegCapture::retrieveFrame(int, OutputArray output_frame)
{
if(m_frame_iterator != m_mjpeg_frames.end())
{
std::vector<char> data = m_avi_container->readFrame(m_frame_iterator);
if(data.size())
{
m_current_frame = imdecode(data, IMREAD_ANYDEPTH | IMREAD_COLOR | IMREAD_IGNORE_ORIENTATION);
}
m_current_frame.copyTo(output_frame);
return true;
}
return false;
}
MotionJpegCapture::~MotionJpegCapture()
{
close();
}
MotionJpegCapture::MotionJpegCapture(const String& filename)
{
m_avi_container = makePtr<AVIReadContainer>();
m_avi_container->initStream(filename);
open(filename);
}
bool MotionJpegCapture::isOpened() const
{
return m_mjpeg_frames.size() > 0;
}
void MotionJpegCapture::close()
{
m_avi_container->close();
m_frame_iterator = m_mjpeg_frames.end();
}
bool MotionJpegCapture::open(const String& filename)
{
close();
m_avi_container = makePtr<AVIReadContainer>();
m_avi_container->initStream(filename);
m_frame_iterator = m_mjpeg_frames.end();
m_is_first_frame = true;
if(!m_avi_container->parseRiff(m_mjpeg_frames))
{
close();
} else
{
m_frame_width = m_avi_container->getWidth();
m_frame_height = m_avi_container->getHeight();
m_fps = m_avi_container->getFps();
}
return isOpened();
}
Ptr<IVideoCapture> createMotionJpegCapture(const String& filename)
{
Ptr<MotionJpegCapture> mjdecoder(new MotionJpegCapture(filename));
if( mjdecoder->isOpened() )
return mjdecoder;
return Ptr<MotionJpegCapture>();
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,613 @@
////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//
//
// The code has been contributed by Justin G. Eskesen on 2010 Jan
//
#include "precomp.hpp"
#include "cap_interface.hpp"
#ifdef HAVE_PVAPI
#if !defined _WIN32 && !defined _LINUX
#define _LINUX
#endif
#if defined(_x64) || defined (__x86_64) || defined (_M_X64)
#define _x64 1
#elif defined(_x86) || defined(__i386) || defined (_M_IX86)
#define _x86 1
#endif
#include <PvApi.h>
#ifdef _WIN32
# include <io.h>
#else
# include <time.h>
# include <unistd.h>
#endif
//#include <arpa/inet.h>
#define MAX_CAMERAS 10
/********************* Capturing video from camera via PvAPI *********************/
class CvCaptureCAM_PvAPI : public CvCapture
{
public:
CvCaptureCAM_PvAPI();
virtual ~CvCaptureCAM_PvAPI()
{
close();
}
virtual bool open( int index );
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE
{
return CV_CAP_PVAPI;
}
protected:
#ifndef _WIN32
virtual void Sleep(unsigned int time);
#endif
void stopCapture();
bool startCapture();
bool resizeCaptureFrame (int frameWidth, int frameHeight);
typedef struct
{
unsigned long UID;
tPvHandle Handle;
tPvFrame Frame;
} tCamera;
IplImage *frame;
tCamera Camera;
tPvErr Errcode;
};
CvCaptureCAM_PvAPI::CvCaptureCAM_PvAPI()
{
frame = NULL;
memset(&this->Camera, 0, sizeof(this->Camera));
}
#ifndef _WIN32
void CvCaptureCAM_PvAPI::Sleep(unsigned int time)
{
struct timespec t,r;
t.tv_sec = time / 1000;
t.tv_nsec = (time % 1000) * 1000000;
while(nanosleep(&t,&r)==-1)
t = r;
}
#endif
void CvCaptureCAM_PvAPI::close()
{
// Stop the acquisition & free the camera
stopCapture();
PvCameraClose(Camera.Handle);
PvUnInitialize();
}
// Initialize camera input
bool CvCaptureCAM_PvAPI::open( int index )
{
tPvCameraInfo cameraList[MAX_CAMERAS];
tPvCameraInfo camInfo;
tPvIpSettings ipSettings;
if (PvInitialize()) {
}
//return false;
Sleep(1000);
//close();
int numCameras=PvCameraList(cameraList, MAX_CAMERAS, NULL);
if (numCameras <= 0 || index >= numCameras)
return false;
Camera.UID = cameraList[index].UniqueId;
if (!PvCameraInfo(Camera.UID,&camInfo) && !PvCameraIpSettingsGet(Camera.UID,&ipSettings))
{
/*
struct in_addr addr;
addr.s_addr = ipSettings.CurrentIpAddress;
printf("Current address:\t%s\n",inet_ntoa(addr));
addr.s_addr = ipSettings.CurrentIpSubnet;
printf("Current subnet:\t\t%s\n",inet_ntoa(addr));
addr.s_addr = ipSettings.CurrentIpGateway;
printf("Current gateway:\t%s\n",inet_ntoa(addr));
*/
}
else
{
fprintf(stderr,"ERROR: could not retrieve camera IP settings.\n");
return false;
}
if (PvCameraOpen(Camera.UID, ePvAccessMaster, &(Camera.Handle))==ePvErrSuccess)
{
tPvUint32 frameWidth, frameHeight;
unsigned long maxSize;
PvAttrUint32Get(Camera.Handle, "Width", &frameWidth);
PvAttrUint32Get(Camera.Handle, "Height", &frameHeight);
// Determine the maximum packet size supported by the system (ethernet adapter)
// and then configure the camera to use this value. If the system's NIC only supports
// an MTU of 1500 or lower, this will automatically configure an MTU of 1500.
// 8228 is the optimal size described by the API in order to enable jumbo frames
maxSize = 8228;
//PvAttrUint32Get(Camera.Handle,"PacketSize",&maxSize);
if (PvCaptureAdjustPacketSize(Camera.Handle,maxSize)!=ePvErrSuccess)
return false;
resizeCaptureFrame(frameWidth, frameHeight);
return startCapture();
}
fprintf(stderr,"Error cannot open camera\n");
return false;
}
bool CvCaptureCAM_PvAPI::grabFrame()
{
//if(Camera.Frame.Status != ePvErrUnplugged && Camera.Frame.Status != ePvErrCancelled)
return PvCaptureQueueFrame(Camera.Handle, &(Camera.Frame), NULL) == ePvErrSuccess;
}
IplImage* CvCaptureCAM_PvAPI::retrieveFrame(int)
{
if (PvCaptureWaitForFrameDone(Camera.Handle, &(Camera.Frame), 1000) == ePvErrSuccess)
{
return frame;
}
else return NULL;
}
double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
{
tPvUint32 nTemp;
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
PvAttrUint32Get(Camera.Handle, "Width", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_FRAME_HEIGHT:
PvAttrUint32Get(Camera.Handle, "Height", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_EXPOSURE:
PvAttrUint32Get(Camera.Handle,"ExposureValue",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_FPS:
tPvFloat32 nfTemp;
PvAttrFloat32Get(Camera.Handle, "StatFrameRate", &nfTemp);
return (double)nfTemp;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
char mEnable[2];
char mIp[11];
PvAttrEnumGet(Camera.Handle,"MulticastEnable",mEnable,sizeof(mEnable),NULL);
if (strcmp(mEnable, "Off") == 0)
{
return -1;
}
else
{
long int ip;
int a,b,c,d;
PvAttrStringGet(Camera.Handle, "MulticastIPAddress",mIp,sizeof(mIp),NULL);
sscanf(mIp, "%d.%d.%d.%d", &a, &b, &c, &d); ip = ((a*256 + b)*256 + c)*256 + d;
return (double)ip;
}
case CV_CAP_PROP_GAIN:
PvAttrUint32Get(Camera.Handle, "GainValue", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
char triggerMode[256];
PvAttrEnumGet(Camera.Handle, "FrameStartTriggerMode", triggerMode, 256, NULL);
if (strcmp(triggerMode, "Freerun")==0)
return 0.0;
else if (strcmp(triggerMode, "SyncIn1")==0)
return 1.0;
else if (strcmp(triggerMode, "SyncIn2")==0)
return 2.0;
else if (strcmp(triggerMode, "FixedRate")==0)
return 3.0;
else if (strcmp(triggerMode, "Software")==0)
return 4.0;
else
return -1.0;
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
PvAttrUint32Get(Camera.Handle, "DecimationHorizontal", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
PvAttrUint32Get(Camera.Handle, "DecimationVertical", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_BINNINGX:
PvAttrUint32Get(Camera.Handle,"BinningX",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_BINNINGY:
PvAttrUint32Get(Camera.Handle,"BinningY",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
char pixelFormat[256];
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
if (strcmp(pixelFormat, "Mono8")==0)
return 1.0;
else if (strcmp(pixelFormat, "Mono16")==0)
return 2.0;
else if (strcmp(pixelFormat, "Bayer8")==0)
return 3.0;
else if (strcmp(pixelFormat, "Bayer16")==0)
return 4.0;
else if (strcmp(pixelFormat, "Rgb24")==0)
return 5.0;
else if (strcmp(pixelFormat, "Bgr24")==0)
return 6.0;
else if (strcmp(pixelFormat, "Rgba32")==0)
return 7.0;
else if (strcmp(pixelFormat, "Bgra32")==0)
return 8.0;
}
return -1.0;
}
bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
{
tPvErr error;
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
{
tPvUint32 currHeight;
PvAttrUint32Get(Camera.Handle, "Height", &currHeight);
stopCapture();
// Reallocate Frames
if (!resizeCaptureFrame(value, currHeight))
{
startCapture();
return false;
}
startCapture();
break;
}
case CV_CAP_PROP_FRAME_HEIGHT:
{
tPvUint32 currWidth;
PvAttrUint32Get(Camera.Handle, "Width", &currWidth);
stopCapture();
// Reallocate Frames
if (!resizeCaptureFrame(currWidth, value))
{
startCapture();
return false;
}
startCapture();
break;
}
case CV_CAP_PROP_EXPOSURE:
if ((PvAttrUint32Set(Camera.Handle,"ExposureValue",(tPvUint32)value)==ePvErrSuccess))
break;
else
return false;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
if (value==-1)
{
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "Off")==ePvErrSuccess))
break;
else
return false;
}
else
{
cv::String ip=cv::format("%d.%d.%d.%d", ((unsigned int)value>>24)&255, ((unsigned int)value>>16)&255, ((unsigned int)value>>8)&255, (unsigned int)value&255);
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "On")==ePvErrSuccess) &&
(PvAttrStringSet(Camera.Handle, "MulticastIPAddress", ip.c_str())==ePvErrSuccess))
break;
else
return false;
}
case CV_CAP_PROP_GAIN:
if (PvAttrUint32Set(Camera.Handle,"GainValue",(tPvUint32)value)!=ePvErrSuccess)
{
return false;
}
break;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
if (value==0)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun");
else if (value==1)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn1");
else if (value==2)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn2");
else if (value==3)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "FixedRate");
else if (value==4)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Software");
else
error = ePvErrOutOfRange;
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
if (value >= 1 && value <= 8)
error = PvAttrUint32Set(Camera.Handle, "DecimationHorizontal", value);
else
error = ePvErrOutOfRange;
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
if (value >= 1 && value <= 8)
error = PvAttrUint32Set(Camera.Handle, "DecimationVertical", value);
else
error = ePvErrOutOfRange;
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_BINNINGX:
error = PvAttrUint32Set(Camera.Handle, "BinningX", value);
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_BINNINGY:
error = PvAttrUint32Set(Camera.Handle, "BinningY", value);
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
{
cv::String pixelFormat;
if (value==1)
pixelFormat = "Mono8";
else if (value==2)
pixelFormat = "Mono16";
else if (value==3)
pixelFormat = "Bayer8";
else if (value==4)
pixelFormat = "Bayer16";
else if (value==5)
pixelFormat = "Rgb24";
else if (value==6)
pixelFormat = "Bgr24";
else if (value==7)
pixelFormat = "Rgba32";
else if (value==8)
pixelFormat = "Bgra32";
else
return false;
if ((PvAttrEnumSet(Camera.Handle,"PixelFormat", pixelFormat.c_str())==ePvErrSuccess))
{
tPvUint32 currWidth;
tPvUint32 currHeight;
PvAttrUint32Get(Camera.Handle, "Width", &currWidth);
PvAttrUint32Get(Camera.Handle, "Height", &currHeight);
stopCapture();
// Reallocate Frames
if (!resizeCaptureFrame(currWidth, currHeight))
{
startCapture();
return false;
}
startCapture();
return true;
}
else
return false;
}
default:
return false;
}
return true;
}
void CvCaptureCAM_PvAPI::stopCapture()
{
PvCommandRun(Camera.Handle, "AcquisitionStop");
PvCaptureEnd(Camera.Handle);
}
bool CvCaptureCAM_PvAPI::startCapture()
{
// Start the camera
PvCaptureStart(Camera.Handle);
// Set the camera to capture continuously
if(PvAttrEnumSet(Camera.Handle, "AcquisitionMode", "Continuous")!= ePvErrSuccess)
{
fprintf(stderr,"Could not set PvAPI Acquisition Mode\n");
return false;
}
if(PvCommandRun(Camera.Handle, "AcquisitionStart")!= ePvErrSuccess)
{
fprintf(stderr,"Could not start PvAPI acquisition\n");
return false;
}
if(PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun")!= ePvErrSuccess)
{
fprintf(stderr,"Error setting PvAPI trigger to \"Freerun\"");
return false;
}
return true;
}
bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
{
char pixelFormat[256];
tPvUint32 frameSize;
tPvUint32 sensorHeight;
tPvUint32 sensorWidth;
if (frame)
{
cvReleaseImage(&frame);
frame = NULL;
}
if (PvAttrUint32Get(Camera.Handle, "SensorWidth", &sensorWidth) != ePvErrSuccess)
{
return false;
}
if (PvAttrUint32Get(Camera.Handle, "SensorHeight", &sensorHeight) != ePvErrSuccess)
{
return false;
}
// Cap out of bounds widths to the max supported by the sensor
if ((frameWidth < 0) || ((tPvUint32)frameWidth > sensorWidth))
{
frameWidth = sensorWidth;
}
if ((frameHeight < 0) || ((tPvUint32)frameHeight > sensorHeight))
{
frameHeight = sensorHeight;
}
if (PvAttrUint32Set(Camera.Handle, "Height", frameHeight) != ePvErrSuccess)
{
return false;
}
if (PvAttrUint32Set(Camera.Handle, "Width", frameWidth) != ePvErrSuccess)
{
return false;
}
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
PvAttrUint32Get(Camera.Handle, "TotalBytesPerFrame", &frameSize);
if ( (strcmp(pixelFormat, "Mono8")==0) || (strcmp(pixelFormat, "Bayer8")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 1);
frame->widthStep = (int)frameWidth;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
}
else if ( (strcmp(pixelFormat, "Mono16")==0) || (strcmp(pixelFormat, "Bayer16")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_16U, 1);
frame->widthStep = (int)frameWidth*2;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
}
else if ( (strcmp(pixelFormat, "Rgb24")==0) || (strcmp(pixelFormat, "Bgr24")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 3);
frame->widthStep = (int)frameWidth*3;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
}
else if ( (strcmp(pixelFormat, "Rgba32")==0) || (strcmp(pixelFormat, "Bgra32")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 4);
frame->widthStep = (int)frameWidth*4;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
}
else
return false;
return true;
}
cv::Ptr<cv::IVideoCapture> cv::create_PvAPI_capture( int index )
{
CvCaptureCAM_PvAPI* capture = new CvCaptureCAM_PvAPI;
if ( capture->open( index ))
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return NULL;
}
#endif

View File

@ -0,0 +1,514 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
This file adds support for uEye cameras in OpenCV.
Cameras can be opened by ID. If 0 is passed as ID the first available camera
will be used. For any other number, the camera associated with that ID will be
opened (c.f. IDS documentation for is_InitCamera).
Images are double buffered in a ring buffer of size 2 (called 'image memory
sequence' in the uEye SDK c.f. is_AddToSequence). The memory is locked on a
'grab' call and copied and unlocked during 'retrieve'. The image queue provided
in the uEye SDK is not used since it automatically locks the buffers when a new
image arrives, which means the buffer can fill up when frames are retrieved too
slowly.
*/
#include "precomp.hpp"
#include <ueye.h>
#include <array>
#include <chrono>
#include <cstdlib>
#include <memory>
#include <thread>
namespace cv
{
namespace
{
struct image_buffer
{
char* data;
INT id;
};
}
#define ASSERT_UEYE(expr) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_Error_(Error::StsAssert, ("%s %s %d: failed with code %u", #expr, __FILE__, __LINE__, expr_result)); }
#define PRINT_ON_UEYE_ERROR( expr ) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << #expr << " " << __FILE__ << " " << __LINE__ << ": failed with code " << expr_result); }
struct VideoCapture_uEye CV_FINAL: public IVideoCapture
{
int getCaptureDomain() CV_OVERRIDE
{
return cv::CAP_UEYE;
}
VideoCapture_uEye(int camera);
bool isOpened() const CV_OVERRIDE
{
return 255 != cam_id;
}
~VideoCapture_uEye() CV_OVERRIDE
{
close();
}
double getProperty(int property_id) const CV_OVERRIDE;
bool setProperty(int property_id, double value) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
void close();
void start_camera();
void stop_camera();
void unlock_image_buffer();
HIDS cam_id = 255;
SENSORINFO sensor_info;
double fps;
int width;
int height;
int pitch;
std::array<image_buffer, 2> ring_buffer = {{{nullptr, 0}, {nullptr, 0}}};
char* locked_image = nullptr;
};
Ptr<IVideoCapture> create_ueye_camera(int camera)
{
return cv::makePtr<VideoCapture_uEye>(camera);
}
namespace
{
std::vector<IMAGE_FORMAT_INFO> get_freerun_formats(HIDS cam_id)
{
UINT count;
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count)));
UINT sizeof_list = sizeof(IMAGE_FORMAT_LIST) + (count - 1) * sizeof(IMAGE_FORMAT_INFO);
std::unique_ptr<IMAGE_FORMAT_LIST> list(new (std::malloc(sizeof_list)) IMAGE_FORMAT_LIST);
list->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
list->nNumListElements = count;
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_LIST, list.get(), sizeof_list));
// copy to vector and filter out non-live modes
std::vector<IMAGE_FORMAT_INFO> formats;
formats.reserve(count + 1);
std::copy_if(list->FormatInfo, list->FormatInfo+count, std::back_inserter(formats), [](const IMAGE_FORMAT_INFO& format)
{
return (format.nSupportedCaptureModes & CAPTMODE_FREERUN);
});
return formats;
}
void set_matching_format(HIDS cam_id, const SENSORINFO& sensor_info, int width, int height)
{
// uEye camera formats sometimes do not include the native resolution (without binning, subsampling or AOI)
if(width == int(sensor_info.nMaxWidth) && height == int(sensor_info.nMaxHeight))
{
ASSERT_UEYE(is_SetBinning(cam_id, IS_BINNING_DISABLE));
ASSERT_UEYE(is_SetSubSampling(cam_id, IS_SUBSAMPLING_DISABLE));
IS_RECT rectAOI = {0, 0, width, height};
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_SET_AOI, &rectAOI, sizeof(rectAOI)));
return;
}
auto formats = get_freerun_formats(cam_id);
CV_Assert(formats.size() > 0);
auto calc_err = [=](const IMAGE_FORMAT_INFO& format)
{
return format.nWidth - width + format.nHeight - height + (sensor_info.nMaxWidth - width)/2 - format.nX0 + (sensor_info.nMaxHeight - height)/2 - format.nY0;
};
std::sort(formats.begin(), formats.end(), [=](const IMAGE_FORMAT_INFO& f0, const IMAGE_FORMAT_INFO& f1)
{
return calc_err(f0) < calc_err(f1);
});
ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_SET_FORMAT, &formats.front().nFormatID, sizeof(UINT)));
}
}
VideoCapture_uEye::VideoCapture_uEye(int camera)
{
CV_Assert(camera >= 0);
CV_Assert(camera < 255); // max camera id is 254
cam_id = static_cast<HIDS>(camera);
CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): opening...");
ASSERT_UEYE(is_InitCamera(&cam_id, nullptr));
IS_INIT_EVENT init_event = {IS_SET_EVENT_FRAME, FALSE, FALSE};
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_INIT, &init_event, sizeof(init_event)));
UINT frame_event = IS_SET_EVENT_FRAME;
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_ENABLE, &frame_event, sizeof(frame_event)));
ASSERT_UEYE(is_ResetToDefault(cam_id));
ASSERT_UEYE(is_SetFrameRate(cam_id, IS_GET_FRAMERATE, &fps));
start_camera();
}
double VideoCapture_uEye::getProperty(int property_id) const
{
auto value = 0.;
switch (property_id)
{
case CAP_PROP_FRAME_WIDTH:
value = width;
break;
case CAP_PROP_FRAME_HEIGHT:
value = height;
break;
case CAP_PROP_FPS:
value = fps;
break;
case CAP_PROP_EXPOSURE:
ASSERT_UEYE(is_Exposure(cam_id, IS_EXPOSURE_CMD_GET_EXPOSURE, (void*)&value, sizeof(value)));
break;
case CAP_PROP_GAIN:
auto gain = is_SetHWGainFactor(cam_id, IS_GET_MASTER_GAIN_FACTOR, 100);
value = static_cast<double>(gain)/100.0;
break;
}
return value;
}
bool VideoCapture_uEye::setProperty(int property_id, double value)
{
if(!isOpened())
return false;
try
{
bool set_format = false;
switch (property_id)
{
case CAP_PROP_FRAME_WIDTH:
if(width == value)
break;
width = static_cast<int>(value);
set_format = true;
break;
case CAP_PROP_FRAME_HEIGHT:
if(height == value)
break;
height = static_cast<int>(value);
set_format = true;
break;
case CAP_PROP_FPS:
if(fps == value)
break;
ASSERT_UEYE(is_SetFrameRate(cam_id, value, &fps));
break;
case CAP_PROP_EXPOSURE:
ASSERT_UEYE(is_Exposure(cam_id, IS_EXPOSURE_CMD_SET_EXPOSURE, (void*)&value, sizeof(value)));
break;
case CAP_PROP_GAIN:
is_SetHWGainFactor(cam_id, IS_SET_MASTER_GAIN_FACTOR, static_cast<int>(value));
break;
}
if(set_format)
{
set_matching_format(cam_id, sensor_info, width, height);
start_camera();
}
}
catch(const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
return false;
}
return true;
}
bool VideoCapture_uEye::grabFrame()
{
if (!isOpened())
return false;
try
{
IS_WAIT_EVENT wait_event{IS_SET_EVENT_FRAME, static_cast<UINT>(3*1000/fps), 0, 0}; // wait for the time it should take to get 3 frames
ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_WAIT, &wait_event, sizeof(wait_event)));
INT current_buffer_id;
char* current_buffer;
char* last;
ASSERT_UEYE(is_GetActSeqBuf(cam_id, &current_buffer_id, &current_buffer, &last));
const int lock_tries = 4;
std::chrono::milliseconds lock_time_out(static_cast<int>(1000/(fps*4))); // wait for a quarter of a frame if not lockable, should not occur in event mode
UINT ret;
for(int i = 0; i < lock_tries; i++) // try locking the buffer
{
ret = is_LockSeqBuf(cam_id, IS_IGNORE_PARAMETER, last);
if(IS_SEQ_BUFFER_IS_LOCKED == ret)
std::this_thread::sleep_for(lock_time_out);
else
break;
}
ASSERT_UEYE(ret);
locked_image = last;
}
catch(const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
close();
return false;
}
return true;
}
bool VideoCapture_uEye::retrieveFrame(int /*outputType*/, OutputArray frame)
{
if(!locked_image)
return false;
Mat(height, width, CV_8UC3, locked_image, pitch).copyTo(frame);
try
{
unlock_image_buffer();
}
catch(const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
return false;
}
return true;
}
void VideoCapture_uEye::start_camera()
{
stop_camera();
IS_RECT aoi;
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_AOI, &aoi, sizeof(aoi)));
UINT x_is_abs_pos;
UINT y_is_abs_pos;
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_X_ABS, &x_is_abs_pos , sizeof(x_is_abs_pos)));
ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_Y_ABS, &y_is_abs_pos , sizeof(y_is_abs_pos)));
ASSERT_UEYE(is_GetSensorInfo(cam_id, &sensor_info));
width = x_is_abs_pos? sensor_info.nMaxWidth: aoi.s32Width;
height = y_is_abs_pos? sensor_info.nMaxHeight: aoi.s32Height;
// allocate ring_buffer
int bpp = 24;
for(auto& image_memory: ring_buffer)
{
ASSERT_UEYE(is_AllocImageMem(cam_id, width, height, bpp, &image_memory.data, &image_memory.id));
ASSERT_UEYE(is_AddToSequence(cam_id, image_memory.data, image_memory.id));
}
// TODO: this could be set according to sensor_info.nColorMode and CAP_PROP_FOURCC
ASSERT_UEYE(is_SetColorMode(cam_id, IS_CM_BGR8_PACKED));
ASSERT_UEYE(is_GetImageMemPitch (cam_id, &pitch));
ASSERT_UEYE(is_CaptureVideo(cam_id, IS_DONT_WAIT));
}
void VideoCapture_uEye::stop_camera()
{
if(is_CaptureVideo(cam_id, IS_GET_LIVE))
ASSERT_UEYE(is_StopLiveVideo(cam_id, IS_FORCE_VIDEO_STOP));
if(locked_image)
unlock_image_buffer();
ASSERT_UEYE(is_ClearSequence(cam_id));
for(auto buffer: ring_buffer)
{
if(buffer.data)
{
ASSERT_UEYE(is_FreeImageMem(cam_id, buffer.data, buffer.id));
buffer.data = nullptr;
}
}
}
void VideoCapture_uEye::close()
{
if(!isOpened())
return;
CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): closing...");
// During closing we do not care about correct error handling as much.
// Either something has gone wrong already or it has been called from the
// destructor. Just make sure that all calls are done.
try
{
stop_camera();
}
catch(const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what());
}
UINT frame_event = IS_SET_EVENT_FRAME;
PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_DISABLE, &frame_event, sizeof(frame_event)));
PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_EXIT, &frame_event, sizeof(frame_event)));
PRINT_ON_UEYE_ERROR(is_ExitCamera(cam_id));
cam_id = 255;
}
void VideoCapture_uEye::unlock_image_buffer()
{
char* tmp_buffer = nullptr;
std::swap(locked_image, tmp_buffer);
ASSERT_UEYE(is_UnlockSeqBuf(cam_id, IS_IGNORE_PARAMETER, tmp_buffer));
}
} // namespace cv
// plugin glue
#if defined(BUILD_PLUGIN)
#define ABI_VERSION 0
#define API_VERSION 0
#include "plugin_api.hpp"
namespace cv
{
namespace
{
#define CV_PLUGIN_NULL_FAIL(ptr) if(!ptr) return CV_ERROR_FAIL;
#define CV_PLUGIN_CALL_BEGIN CV_PLUGIN_NULL_FAIL(handle) try {
#define CV_PLUGIN_CALL_END } catch (...) { return CV_ERROR_FAIL; }
CvResult CV_API_CALL cv_capture_open(const char*, int cam_id, CV_OUT CvPluginCapture* handle)
{
CV_PLUGIN_CALL_BEGIN
*handle = NULL;
std::unique_ptr<VideoCapture_uEye> cap(new VideoCapture_uEye(cam_id));
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap.release();
return CV_ERROR_OK;
}
return CV_ERROR_FAIL;
CV_PLUGIN_CALL_END
}
CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle)
{
CV_PLUGIN_NULL_FAIL(handle)
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
delete instance;
return CV_ERROR_OK;
}
CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val)
{
CV_PLUGIN_NULL_FAIL(val)
CV_PLUGIN_CALL_BEGIN
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
*val = instance->getProperty(prop);
return CV_ERROR_OK;
CV_PLUGIN_CALL_END
}
CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val)
{
CV_PLUGIN_CALL_BEGIN
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL;
CV_PLUGIN_CALL_END
}
CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle)
{
CV_PLUGIN_CALL_BEGIN
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL;
CV_PLUGIN_CALL_END
}
CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata)
{
CV_PLUGIN_CALL_BEGIN
VideoCapture_uEye* instance = (VideoCapture_uEye*)handle;
Mat img;
if (instance->retrieveFrame(stream_idx, img))
return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata);
return CV_ERROR_FAIL;
CV_PLUGIN_CALL_END
}
CvResult CV_API_CALL cv_writer_open(const char* /*filename*/, int /*fourcc*/, double /*fps*/, int /*width*/, int /*height*/, int /*isColor*/,
CV_OUT CvPluginWriter* /*handle*/)
{
return CV_ERROR_FAIL;
}
CvResult CV_API_CALL cv_writer_release(CvPluginWriter /*handle*/)
{
return CV_ERROR_FAIL;
}
CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/)
{
return CV_ERROR_FAIL;
}
CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/)
{
return CV_ERROR_FAIL;
}
CvResult CV_API_CALL cv_writer_write(CvPluginWriter /*handle*/, const unsigned char* /*data*/, int /*step*/, int /*width*/, int /*height*/, int /*cn*/)
{
return CV_ERROR_FAIL;
}
const OpenCV_VideoIO_Plugin_API_preview plugin_api =
{
{
sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION,
CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS,
"uEye OpenCV Video I/O plugin"
},
{
/* 1*/CAP_UEYE,
/* 2*/cv_capture_open,
/* 3*/cv_capture_release,
/* 4*/cv_capture_get_prop,
/* 5*/cv_capture_set_prop,
/* 6*/cv_capture_grab,
/* 7*/cv_capture_retrieve,
/* 8*/cv_writer_open,
/* 9*/cv_writer_release,
/* 10*/cv_writer_get_prop,
/* 11*/cv_writer_set_prop,
/* 12*/cv_writer_write
}
};
} // namespace
} // namespace cv
const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT
{
if (requested_abi_version == ABI_VERSION && requested_api_version <= API_VERSION)
return &cv::plugin_api;
return NULL;
}
#endif // BUILD_PLUGIN

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
// Copyright (c) Microsoft. All rights reserved.
//
// The MIT License (MIT)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "MediaStreamSink.hpp"
#include "MediaSink.hpp"
#include "CaptureFrameGrabber.hpp"
using namespace Media;
using namespace Platform;
using namespace Windows::Foundation;
using namespace Windows::Media;
using namespace Windows::Media::Capture;
using namespace Windows::Media::MediaProperties;
using namespace concurrency;
using namespace Microsoft::WRL::Details;
using namespace Microsoft::WRL;
task<Media::CaptureFrameGrabber^> Media::CaptureFrameGrabber::CreateAsync(_In_ MediaCapture^ capture, _In_ VideoEncodingProperties^ props, CaptureStreamType streamType)
{
auto reader = ref new Media::CaptureFrameGrabber(capture, props, streamType);
auto profile = ref new MediaEncodingProfile();
profile->Video = props;
task<void> task;
if (reader->_streamType == CaptureStreamType::Preview)
{
task = create_task(capture->StartPreviewToCustomSinkAsync(profile, reader->_mediaExtension));
}
else
{
task = create_task(capture->StartRecordToCustomSinkAsync(profile, reader->_mediaExtension));
}
return task.then([reader]()
{
reader->_state = State::Started;
return reader;
});
}
Media::CaptureFrameGrabber::CaptureFrameGrabber(_In_ MediaCapture^ capture, _In_ VideoEncodingProperties^ props, CaptureStreamType streamType)
: _state(State::Created)
, _streamType(streamType)
, _capture(capture)
{
auto videoSampleHandler = ref new MediaSampleHandler(this, &Media::CaptureFrameGrabber::ProcessSample);
_mediaSink = Make<MediaSink>(nullptr, props, nullptr, videoSampleHandler);
_mediaExtension = reinterpret_cast<IMediaExtension^>(static_cast<AWM::IMediaExtension*>(_mediaSink.Get()));
}
Media::CaptureFrameGrabber::~CaptureFrameGrabber()
{
if (_state == State::Started)
{
if (_streamType == CaptureStreamType::Preview)
{
(void)_capture->StopPreviewAsync();
}
else
{
(void)_capture->StopRecordAsync();
}
}
if (_mediaSink != nullptr)
{
(void)_mediaSink->Shutdown();
_mediaSink = nullptr;
}
_mediaExtension = nullptr;
_capture = nullptr;
}
void Media::CaptureFrameGrabber::ShowCameraSettings()
{
#if (WINAPI_FAMILY != WINAPI_FAMILY_PHONE_APP) && (WINAPI_FAMILY != WINAPI_FAMILY_PC_APP)
if (_state == State::Started)
{
CameraOptionsUI::Show(_capture.Get());
}
#endif
}
task<void> Media::CaptureFrameGrabber::FinishAsync()
{
auto lock = _lock.LockExclusive();
if (_state != State::Started)
{
throw ref new COMException(E_UNEXPECTED, L"State");
}
_state = State::Closing;
if (_mediaSink != nullptr)
{
(void)_mediaSink->Shutdown();
_mediaSink = nullptr;
}
_mediaExtension = nullptr;
task<void> task;
if (_streamType == CaptureStreamType::Preview)
{
task = create_task(_capture->StopPreviewAsync());
}
else
{
task = create_task(_capture->StopRecordAsync());
}
return task.then([this]()
{
auto lock = _lock.LockExclusive();
_state = State::Closed;
_capture = nullptr;
});
}
task<ComPtr<IMF2DBuffer2>> Media::CaptureFrameGrabber::GetFrameAsync()
{
auto lock = _lock.LockExclusive();
if (_state != State::Started)
{
throw ref new COMException(E_UNEXPECTED, L"State");
}
_mediaSink->RequestVideoSample();
task_completion_event<ComPtr<IMF2DBuffer2>> taskEvent;
_videoSampleRequestQueue.push(taskEvent);
return create_task(taskEvent);
}
void Media::CaptureFrameGrabber::ProcessSample(_In_ MediaSample^ sample)
{
task_completion_event<ComPtr<IMF2DBuffer2>> t;
{
auto lock = _lock.LockExclusive();
t = _videoSampleRequestQueue.front();
_videoSampleRequestQueue.pop();
}
ComPtr<IMFMediaBuffer> buffer;
CHK(sample->Sample->ConvertToContiguousBuffer(&buffer));
// Dispatch without the lock taken to avoid deadlocks
t.set(As<IMF2DBuffer2>(buffer));
}

View File

@ -0,0 +1,85 @@
// Copyright (c) Microsoft. All rights reserved.
//
// The MIT License (MIT)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#include "MFIncludes.hpp"
namespace Media {
class MediaSink;
enum class CaptureStreamType
{
Preview = 0,
Record
};
ref class CaptureFrameGrabber sealed
{
public:
// IClosable
virtual ~CaptureFrameGrabber();
virtual void ShowCameraSettings();
internal:
static concurrency::task<CaptureFrameGrabber^> CreateAsync(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props)
{
return CreateAsync(capture, props, CaptureStreamType::Preview);
}
static concurrency::task<CaptureFrameGrabber^> CreateAsync(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props, CaptureStreamType streamType);
concurrency::task<MW::ComPtr<IMF2DBuffer2>> GetFrameAsync();
concurrency::task<void> FinishAsync();
private:
CaptureFrameGrabber(_In_ WMC::MediaCapture^ capture, _In_ WMMp::VideoEncodingProperties^ props, CaptureStreamType streamType);
void ProcessSample(_In_ MediaSample^ sample);
Platform::Agile<WMC::MediaCapture> _capture;
::Windows::Media::IMediaExtension^ _mediaExtension;
MW::ComPtr<MediaSink> _mediaSink;
CaptureStreamType _streamType;
enum class State
{
Created,
Started,
Closing,
Closed
} _state;
std::queue<concurrency::task_completion_event<MW::ComPtr<IMF2DBuffer2>>> _videoSampleRequestQueue;
AutoMF _mf;
MWW::SRWLock _lock;
};
}

View File

@ -0,0 +1,172 @@
// Header for standard system include files.
// Copyright (c) Microsoft. All rights reserved.
//
// The MIT License (MIT)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#pragma once
#include <collection.h>
#include <ppltasks.h>
#include <wrl\implements.h>
#include <wrl\wrappers\corewrappers.h>
#include <Roerrorapi.h>
#include <queue>
#include <sstream>
#include <robuffer.h>
#include <mfapi.h>
#include <mfidl.h>
#include <Mferror.h>
#include <windows.media.h>
#include <windows.media.mediaproperties.h>
namespace AWM = ::ABI::Windows::Media;
namespace AWMMp = ::ABI::Windows::Media::MediaProperties;
namespace AWFC = ::ABI::Windows::Foundation::Collections;
namespace MW = ::Microsoft::WRL;
namespace MWD = ::Microsoft::WRL::Details;
namespace MWW = ::Microsoft::WRL::Wrappers;
namespace WMC = ::Windows::Media::Capture;
namespace WF = ::Windows::Foundation;
namespace WMMp = ::Windows::Media::MediaProperties;
namespace WSS = ::Windows::Storage::Streams;
// Exception-based error handling
#define CHK(statement) {HRESULT _hr = (statement); if (FAILED(_hr)) { throw ref new Platform::COMException(_hr); };}
#define CHKNULL(p) {if ((p) == nullptr) { throw ref new Platform::NullReferenceException(L#p); };}
// Exception-free error handling
#define CHK_RETURN(statement) {hr = (statement); if (FAILED(hr)) { return hr; };}
// Cast a C++/CX msartpointer to an ABI smartpointer
template<typename T, typename U>
MW::ComPtr<T> As(U^ in)
{
MW::ComPtr<T> out;
CHK(reinterpret_cast<IInspectable*>(in)->QueryInterface(IID_PPV_ARGS(&out)));
return out;
}
// Cast an ABI smartpointer
template<typename T, typename U>
Microsoft::WRL::ComPtr<T> As(const Microsoft::WRL::ComPtr<U>& in)
{
Microsoft::WRL::ComPtr<T> out;
CHK(in.As(&out));
return out;
}
// Cast an ABI smartpointer
template<typename T, typename U>
Microsoft::WRL::ComPtr<T> As(U* in)
{
Microsoft::WRL::ComPtr<T> out;
CHK(in->QueryInterface(IID_PPV_ARGS(&out)));
return out;
}
// Get access to bytes in IBuffer
inline unsigned char* GetData(_In_ WSS::IBuffer^ buffer)
{
unsigned char* bytes = nullptr;
CHK(As<WSS::IBufferByteAccess>(buffer)->Buffer(&bytes));
return bytes;
}
// Class to start and shutdown Media Foundation
class AutoMF
{
public:
AutoMF()
: _bInitialized(false)
{
CHK(MFStartup(MF_VERSION));
}
~AutoMF()
{
if (_bInitialized)
{
(void)MFShutdown();
}
}
private:
bool _bInitialized;
};
// Class to track error origin
template <size_t N>
HRESULT OriginateError(__in HRESULT hr, __in wchar_t const (&str)[N])
{
if (FAILED(hr))
{
::RoOriginateErrorW(hr, N - 1, str);
}
return hr;
}
// Class to track error origin
inline HRESULT OriginateError(__in HRESULT hr)
{
if (FAILED(hr))
{
::RoOriginateErrorW(hr, 0, nullptr);
}
return hr;
}
// Converts exceptions into HRESULTs
template <typename Lambda>
HRESULT ExceptionBoundary(Lambda&& lambda)
{
try
{
lambda();
return S_OK;
}
catch (Platform::Exception^ e)
{
return e->HResult;
}
catch (const std::bad_alloc&)
{
return E_OUTOFMEMORY;
}
catch (const std::exception&)
{
return E_FAIL;
}
}
// Wraps an IMFSample in a C++/CX class to be able to define a callback delegate
ref class MediaSample sealed
{
internal:
MW::ComPtr<IMFSample> Sample;
};
delegate void MediaSampleHandler(MediaSample^ sample);

Some files were not shown because too many files have changed in this diff Show More