feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,22 @@
ocv_install_example_src(directx *.cpp *.hpp CMakeLists.txt)
set(OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS
opencv_core
opencv_imgproc
opencv_imgcodecs
opencv_videoio
opencv_highgui)
ocv_check_dependencies(${OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS})
if(NOT BUILD_EXAMPLES OR NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
project("directx_samples")
ocv_include_modules_recurse(${tgt} ${OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS})
file(GLOB all_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
foreach(sample_filename ${all_samples})
ocv_define_sample(tgt ${sample_filename} directx)
ocv_target_link_libraries(${tgt} PRIVATE ${OPENCV_LINKER_LIBS} ${OPENCV_DIRECTX_SAMPLES_REQUIRED_DEPS})
ocv_target_link_libraries(${tgt} PRIVATE "gdi32")
endforeach()

View File

@ -0,0 +1,301 @@
/*
// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface
// At first, the data obtained from video file or camera and placed onto Direct X surface,
// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function.
// The result is mapped back to Direct X surface and rendered through Direct X API.
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <d3d10.h>
#include "opencv2/core.hpp"
#include "opencv2/core/directx.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "d3dsample.hpp"
#pragma comment (lib, "d3d10.lib")
class D3D10WinApp : public D3DSample
{
public:
D3D10WinApp(int width, int height, std::string& window_name, cv::VideoCapture& cap) :
D3DSample(width, height, window_name, cap) {}
~D3D10WinApp() {}
int create(void)
{
// base initialization
D3DSample::create();
// initialize DirectX
HRESULT r;
DXGI_SWAP_CHAIN_DESC scd;
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC));
scd.BufferCount = 1; // one back buffer
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // use 32-bit color
scd.BufferDesc.Width = m_width; // set the back buffer width
scd.BufferDesc.Height = m_height; // set the back buffer height
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // how swap chain is to be used
scd.OutputWindow = m_hWnd; // the window to be used
scd.SampleDesc.Count = 1; // how many multisamples
scd.Windowed = TRUE; // windowed/full-screen mode
scd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // allow full-screen switching
r = ::D3D10CreateDeviceAndSwapChain(
NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&scd,
&m_pD3D10SwapChain,
&m_pD3D10Dev);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D10SwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&m_pBackBuffer);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D10Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget);
if (FAILED(r))
{
return EXIT_FAILURE;
}
m_pD3D10Dev->OMSetRenderTargets(1, &m_pRenderTarget, NULL);
D3D10_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D10_VIEWPORT));
viewport.Width = m_width;
viewport.Height = m_height;
viewport.MinDepth = 0.0f;
viewport.MaxDepth = 0.0f;
m_pD3D10Dev->RSSetViewports(1, &viewport);
D3D10_TEXTURE2D_DESC desc = { 0 };
desc.Width = m_width;
desc.Height = m_height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
desc.Usage = D3D10_USAGE_DYNAMIC;
desc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
r = m_pD3D10Dev->CreateTexture2D(&desc, NULL, &m_pSurface);
if (FAILED(r))
{
std::cerr << "Can't create texture with input image" << std::endl;
return EXIT_FAILURE;
}
// initialize OpenCL context of OpenCV lib from DirectX
if (cv::ocl::haveOpenCL())
{
m_oclCtx = cv::directx::ocl::initializeContextFromD3D10Device(m_pD3D10Dev);
}
m_oclDevName = cv::ocl::useOpenCL() ?
cv::ocl::Context::getDefault().device(0).name() :
"No OpenCL device";
return EXIT_SUCCESS;
} // create()
// get media data on DX surface for further processing
int get_surface(ID3D10Texture2D** ppSurface)
{
HRESULT r;
if (!m_cap.read(m_frame_bgr))
return EXIT_FAILURE;
cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2RGBA);
UINT subResource = ::D3D10CalcSubresource(0, 0, 1);
D3D10_MAPPED_TEXTURE2D mappedTex;
r = m_pSurface->Map(subResource, D3D10_MAP_WRITE_DISCARD, 0, &mappedTex);
if (FAILED(r))
{
return r;
}
cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch);
// copy video frame data to surface
m_frame_rgba.copyTo(m);
m_pSurface->Unmap(subResource);
*ppSurface = m_pSurface;
return EXIT_SUCCESS;
} // get_surface()
// process and render media data
int render()
{
try
{
if (m_shutdown)
return EXIT_SUCCESS;
// capture user input once
MODE mode = (m_mode == MODE_GPU_NV12) ? MODE_GPU_RGBA : m_mode;
HRESULT r;
ID3D10Texture2D* pSurface;
r = get_surface(&pSurface);
if (FAILED(r))
{
return EXIT_FAILURE;
}
m_timer.reset();
m_timer.start();
switch (mode)
{
case MODE_CPU:
{
// process video frame on CPU
UINT subResource = ::D3D10CalcSubresource(0, 0, 1);
D3D10_MAPPED_TEXTURE2D mappedTex;
r = pSurface->Map(subResource, D3D10_MAP_WRITE_DISCARD, 0, &mappedTex);
if (FAILED(r))
{
return r;
}
cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch);
if (m_demo_processing)
{
// blur D3D10 surface with OpenCV on CPU
cv::blur(m, m, cv::Size(15, 15));
}
m_timer.stop();
cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str());
cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame";
cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli());
cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str());
cv::putText(m, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
pSurface->Unmap(subResource);
break;
}
case MODE_GPU_RGBA:
{
// process video frame on GPU
cv::UMat u;
cv::directx::convertFromD3D10Texture2D(pSurface, u);
if (m_demo_processing)
{
// blur D3D10 surface with OpenCV on GPU with OpenCL
cv::blur(u, u, cv::Size(15, 15));
}
m_timer.stop();
cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_GPU_RGBA].c_str());
cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame";
cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli());
cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str());
cv::putText(u, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::directx::convertToD3D10Texture2D(u, pSurface);
break;
}
} // switch
// traditional DX render pipeline:
// BitBlt surface to backBuffer and flip backBuffer to frontBuffer
m_pD3D10Dev->CopyResource(m_pBackBuffer, pSurface);
// present the back buffer contents to the display
// switch the back buffer and the front buffer
r = m_pD3D10SwapChain->Present(0, 0);
if (FAILED(r))
{
return EXIT_FAILURE;
}
} // try
catch (const cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
return 10;
}
return EXIT_SUCCESS;
} // render()
int cleanup(void)
{
SAFE_RELEASE(m_pSurface);
SAFE_RELEASE(m_pBackBuffer);
SAFE_RELEASE(m_pD3D10SwapChain);
SAFE_RELEASE(m_pRenderTarget);
SAFE_RELEASE(m_pD3D10Dev);
D3DSample::cleanup();
return EXIT_SUCCESS;
} // cleanup()
private:
ID3D10Device* m_pD3D10Dev;
IDXGISwapChain* m_pD3D10SwapChain;
ID3D10Texture2D* m_pBackBuffer;
ID3D10Texture2D* m_pSurface;
ID3D10RenderTargetView* m_pRenderTarget;
cv::ocl::Context m_oclCtx;
cv::String m_oclPlatformName;
cv::String m_oclDevName;
};
// main func
int main(int argc, char** argv)
{
std::string title = "D3D10 interop sample";
return d3d_app<D3D10WinApp>(argc, argv, title);
}

View File

@ -0,0 +1,489 @@
/*
// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface
// At first, the data obtained from video file or camera and placed onto Direct X surface,
// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function.
// The result is mapped back to Direct X surface and rendered through Direct X API.
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <d3d11.h>
#include "opencv2/core.hpp"
#include "opencv2/core/directx.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "d3dsample.hpp"
#pragma comment (lib, "d3d11.lib")
class D3D11WinApp : public D3DSample
{
public:
D3D11WinApp(int width, int height, std::string& window_name, cv::VideoCapture& cap)
: D3DSample(width, height, window_name, cap),
m_nv12_available(false)
{}
~D3D11WinApp() {}
int create(void)
{
// base initialization
D3DSample::create();
// initialize DirectX
HRESULT r;
DXGI_SWAP_CHAIN_DESC scd;
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC));
scd.BufferCount = 1; // one back buffer
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // use 32-bit color
scd.BufferDesc.Width = m_width; // set the back buffer width
scd.BufferDesc.Height = m_height; // set the back buffer height
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // how swap chain is to be used
scd.OutputWindow = m_hWnd; // the window to be used
scd.SampleDesc.Count = 1; // how many multisamples
scd.Windowed = TRUE; // windowed/full-screen mode
scd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // allow full-screen switching
r = ::D3D11CreateDeviceAndSwapChain(
NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
0,
NULL,
0,
D3D11_SDK_VERSION,
&scd,
&m_pD3D11SwapChain,
&m_pD3D11Dev,
NULL,
&m_pD3D11Ctx);
if (FAILED(r))
{
throw std::runtime_error("D3D11CreateDeviceAndSwapChain() failed!");
}
#if defined(_WIN32_WINNT_WIN8) && _WIN32_WINNT >= _WIN32_WINNT_WIN8
UINT fmt = 0;
r = m_pD3D11Dev->CheckFormatSupport(DXGI_FORMAT_NV12, &fmt);
if (SUCCEEDED(r))
{
m_nv12_available = true;
}
#endif
r = m_pD3D11SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&m_pBackBuffer);
if (FAILED(r))
{
throw std::runtime_error("GetBuffer() failed!");
}
r = m_pD3D11Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget);
if (FAILED(r))
{
throw std::runtime_error("CreateRenderTargetView() failed!");
}
m_pD3D11Ctx->OMSetRenderTargets(1, &m_pRenderTarget, NULL);
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.Width = (float)m_width;
viewport.Height = (float)m_height;
viewport.MinDepth = 0.0f;
viewport.MaxDepth = 0.0f;
m_pD3D11Ctx->RSSetViewports(1, &viewport);
m_pSurfaceRGBA = 0;
m_pSurfaceNV12 = 0;
m_pSurfaceNV12_cpu_copy = 0;
D3D11_TEXTURE2D_DESC desc_rgba;
desc_rgba.Width = m_width;
desc_rgba.Height = m_height;
desc_rgba.MipLevels = 1;
desc_rgba.ArraySize = 1;
desc_rgba.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc_rgba.SampleDesc.Count = 1;
desc_rgba.SampleDesc.Quality = 0;
desc_rgba.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc_rgba.Usage = D3D11_USAGE_DYNAMIC;
desc_rgba.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc_rgba.MiscFlags = 0;
r = m_pD3D11Dev->CreateTexture2D(&desc_rgba, 0, &m_pSurfaceRGBA);
if (FAILED(r))
{
throw std::runtime_error("Can't create DX texture");
}
#if defined(_WIN32_WINNT_WIN8) && _WIN32_WINNT >= _WIN32_WINNT_WIN8
if(m_nv12_available)
{
D3D11_TEXTURE2D_DESC desc_nv12;
desc_nv12.Width = m_width;
desc_nv12.Height = m_height;
desc_nv12.MipLevels = 1;
desc_nv12.ArraySize = 1;
desc_nv12.Format = DXGI_FORMAT_NV12;
desc_nv12.SampleDesc.Count = 1;
desc_nv12.SampleDesc.Quality = 0;
desc_nv12.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc_nv12.Usage = D3D11_USAGE_DEFAULT;
desc_nv12.CPUAccessFlags = 0;
desc_nv12.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
r = m_pD3D11Dev->CreateTexture2D(&desc_nv12, 0, &m_pSurfaceNV12);
if (FAILED(r))
{
throw std::runtime_error("Can't create DX NV12 texture");
}
D3D11_TEXTURE2D_DESC desc_nv12_cpu_copy;
desc_nv12_cpu_copy.Width = m_width;
desc_nv12_cpu_copy.Height = m_height;
desc_nv12_cpu_copy.MipLevels = 1;
desc_nv12_cpu_copy.ArraySize = 1;
desc_nv12_cpu_copy.Format = DXGI_FORMAT_NV12;
desc_nv12_cpu_copy.SampleDesc.Count = 1;
desc_nv12_cpu_copy.SampleDesc.Quality = 0;
desc_nv12_cpu_copy.BindFlags = 0;
desc_nv12_cpu_copy.Usage = D3D11_USAGE_STAGING;
desc_nv12_cpu_copy.CPUAccessFlags = /*D3D11_CPU_ACCESS_WRITE | */D3D11_CPU_ACCESS_READ;
desc_nv12_cpu_copy.MiscFlags = 0;
r = m_pD3D11Dev->CreateTexture2D(&desc_nv12_cpu_copy, 0, &m_pSurfaceNV12_cpu_copy);
if (FAILED(r))
{
throw std::runtime_error("Can't create DX NV12 texture");
}
}
#endif
// initialize OpenCL context of OpenCV lib from DirectX
if (cv::ocl::haveOpenCL())
{
m_oclCtx = cv::directx::ocl::initializeContextFromD3D11Device(m_pD3D11Dev);
}
m_oclDevName = cv::ocl::useOpenCL() ?
cv::ocl::Context::getDefault().device(0).name() :
"No OpenCL device";
return EXIT_SUCCESS;
} // create()
// get media data on DX surface for further processing
int get_surface(ID3D11Texture2D** ppSurface, bool use_nv12)
{
HRESULT r;
if (!m_cap.read(m_frame_bgr))
return EXIT_FAILURE;
if (use_nv12)
{
cv::cvtColor(m_frame_bgr, m_frame_i420, cv::COLOR_BGR2YUV_I420);
convert_I420_to_NV12(m_frame_i420, m_frame_nv12, m_width, m_height);
m_pD3D11Ctx->UpdateSubresource(m_pSurfaceNV12, 0, 0, m_frame_nv12.data, (UINT)m_frame_nv12.step[0], (UINT)m_frame_nv12.total());
}
else
{
cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2RGBA);
// process video frame on CPU
UINT subResource = ::D3D11CalcSubresource(0, 0, 1);
D3D11_MAPPED_SUBRESOURCE mappedTex;
r = m_pD3D11Ctx->Map(m_pSurfaceRGBA, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex);
if (FAILED(r))
{
throw std::runtime_error("surface mapping failed!");
}
cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, mappedTex.RowPitch);
m_frame_rgba.copyTo(m);
m_pD3D11Ctx->Unmap(m_pSurfaceRGBA, subResource);
}
*ppSurface = use_nv12 ? m_pSurfaceNV12 : m_pSurfaceRGBA;
return EXIT_SUCCESS;
} // get_surface()
// process and render media data
int render()
{
try
{
if (m_shutdown)
return EXIT_SUCCESS;
// capture user input once
MODE mode = (m_mode == MODE_GPU_NV12 && !m_nv12_available) ? MODE_GPU_RGBA : m_mode;
HRESULT r;
ID3D11Texture2D* pSurface = 0;
r = get_surface(&pSurface, mode == MODE_GPU_NV12);
if (FAILED(r))
{
throw std::runtime_error("get_surface() failed!");
}
m_timer.reset();
m_timer.start();
switch (mode)
{
case MODE_CPU:
{
// process video frame on CPU
UINT subResource = ::D3D11CalcSubresource(0, 0, 1);
D3D11_MAPPED_SUBRESOURCE mappedTex;
r = m_pD3D11Ctx->Map(pSurface, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex);
if (FAILED(r))
{
throw std::runtime_error("surface mapping failed!");
}
cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch);
if (m_demo_processing)
{
// blur data from D3D11 surface with OpenCV on CPU
cv::blur(m, m, cv::Size(15, 15));
}
m_timer.stop();
cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str());
cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame";
cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli());
cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str());
cv::putText(m, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(m, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
m_pD3D11Ctx->Unmap(pSurface, subResource);
break;
}
case MODE_GPU_RGBA:
case MODE_GPU_NV12:
{
// process video frame on GPU
cv::UMat u;
cv::directx::convertFromD3D11Texture2D(pSurface, u);
if (m_demo_processing)
{
// blur data from D3D11 surface with OpenCV on GPU with OpenCL
cv::blur(u, u, cv::Size(15, 15));
}
m_timer.stop();
cv::String strMode = cv::format("mode: %s", m_modeStr[mode].c_str());
cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame";
cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli());
cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str());
cv::putText(u, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::putText(u, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2);
cv::directx::convertToD3D11Texture2D(u, pSurface);
if (mode == MODE_GPU_NV12)
{
// just for rendering, we need to convert NV12 to RGBA.
m_pD3D11Ctx->CopyResource(m_pSurfaceNV12_cpu_copy, m_pSurfaceNV12);
// process video frame on CPU
{
UINT subResource = ::D3D11CalcSubresource(0, 0, 1);
D3D11_MAPPED_SUBRESOURCE mappedTex;
r = m_pD3D11Ctx->Map(m_pSurfaceNV12_cpu_copy, subResource, D3D11_MAP_READ, 0, &mappedTex);
if (FAILED(r))
{
throw std::runtime_error("surface mapping failed!");
}
cv::Mat frame_nv12(m_height + (m_height / 2), m_width, CV_8UC1, mappedTex.pData, mappedTex.RowPitch);
cv::cvtColor(frame_nv12, m_frame_rgba, cv::COLOR_YUV2RGBA_NV12);
m_pD3D11Ctx->Unmap(m_pSurfaceNV12_cpu_copy, subResource);
}
{
UINT subResource = ::D3D11CalcSubresource(0, 0, 1);
D3D11_MAPPED_SUBRESOURCE mappedTex;
r = m_pD3D11Ctx->Map(m_pSurfaceRGBA, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex);
if (FAILED(r))
{
throw std::runtime_error("surface mapping failed!");
}
cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, mappedTex.RowPitch);
m_frame_rgba.copyTo(m);
m_pD3D11Ctx->Unmap(m_pSurfaceRGBA, subResource);
}
pSurface = m_pSurfaceRGBA;
}
break;
}
} // switch
// traditional DX render pipeline:
// BitBlt surface to backBuffer and flip backBuffer to frontBuffer
m_pD3D11Ctx->CopyResource(m_pBackBuffer, pSurface);
// present the back buffer contents to the display
// switch the back buffer and the front buffer
r = m_pD3D11SwapChain->Present(0, 0);
if (FAILED(r))
{
throw std::runtime_error("switch betweem fronat and back buffers failed!");
}
} // try
catch (const cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
cleanup();
return 10;
}
catch (const std::exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
cleanup();
return 11;
}
return EXIT_SUCCESS;
} // render()
int cleanup(void)
{
SAFE_RELEASE(m_pSurfaceRGBA);
SAFE_RELEASE(m_pSurfaceNV12);
SAFE_RELEASE(m_pSurfaceNV12_cpu_copy);
SAFE_RELEASE(m_pBackBuffer);
SAFE_RELEASE(m_pD3D11SwapChain);
SAFE_RELEASE(m_pRenderTarget);
SAFE_RELEASE(m_pD3D11Dev);
SAFE_RELEASE(m_pD3D11Ctx);
D3DSample::cleanup();
return EXIT_SUCCESS;
} // cleanup()
protected:
void convert_I420_to_NV12(cv::Mat& i420, cv::Mat& nv12, int width, int height)
{
nv12.create(i420.rows, i420.cols, CV_8UC1);
unsigned char* pSrcY = i420.data;
unsigned char* pDstY = nv12.data;
size_t srcStep = i420.step[0];
size_t dstStep = nv12.step[0];
{
unsigned char* src;
unsigned char* dst;
// copy Y plane
for (int i = 0; i < height; i++)
{
src = pSrcY + i*srcStep;
dst = pDstY + i*dstStep;
for (int j = 0; j < width; j++)
{
dst[j] = src[j];
}
}
}
{
// copy U/V planes to UV plane
unsigned char* pSrcU;
unsigned char* pSrcV;
unsigned char* pDstUV;
size_t uv_offset = height * dstStep;
for (int i = 0; i < height / 2; i++)
{
pSrcU = pSrcY + height*width + i*(width / 2);
pSrcV = pSrcY + height*width + (height / 2) * (width / 2) + i*(width / 2);
pDstUV = pDstY + uv_offset + i*dstStep;
for (int j = 0; j < width / 2; j++)
{
pDstUV[j*2 + 0] = pSrcU[j];
pDstUV[j*2 + 1] = pSrcV[j];
}
}
}
return;
}
private:
ID3D11Device* m_pD3D11Dev;
IDXGISwapChain* m_pD3D11SwapChain;
ID3D11DeviceContext* m_pD3D11Ctx;
ID3D11Texture2D* m_pBackBuffer;
ID3D11Texture2D* m_pSurfaceRGBA;
ID3D11Texture2D* m_pSurfaceNV12;
ID3D11Texture2D* m_pSurfaceNV12_cpu_copy;
ID3D11RenderTargetView* m_pRenderTarget;
cv::ocl::Context m_oclCtx;
cv::String m_oclPlatformName;
cv::String m_oclDevName;
bool m_nv12_available;
cv::Mat m_frame_i420;
cv::Mat m_frame_nv12;
};
// main func
int main(int argc, char** argv)
{
std::string title = "D3D11 interop sample";
return d3d_app<D3D11WinApp>(argc, argv, title);
}

View File

@ -0,0 +1,314 @@
/*
// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface
// At first, the data obtained from video file or camera and placed onto Direct X surface,
// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function.
// The result is mapped back to Direct X surface and rendered through Direct X API.
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <d3d9.h>
#include "opencv2/core.hpp"
#include "opencv2/core/directx.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "d3dsample.hpp"
#pragma comment (lib, "d3d9.lib")
class D3D9WinApp : public D3DSample
{
public:
D3D9WinApp(int width, int height, std::string& window_name, cv::VideoCapture& cap) :
D3DSample(width, height, window_name, cap) {}
~D3D9WinApp() {}
int create(void)
{
// base initialization
D3DSample::create();
// initialize DirectX
HRESULT r;
m_pD3D9 = ::Direct3DCreate9(D3D_SDK_VERSION);
if (NULL == m_pD3D9)
{
return EXIT_FAILURE;
}
DWORD flags = D3DCREATE_HARDWARE_VERTEXPROCESSING |
D3DCREATE_PUREDEVICE |
D3DCREATE_NOWINDOWCHANGES |
D3DCREATE_MULTITHREADED |
D3DCREATE_FPU_PRESERVE;
D3DPRESENT_PARAMETERS d3dpp;
::ZeroMemory(&d3dpp, sizeof(D3DPRESENT_PARAMETERS));
d3dpp.Windowed = true;
d3dpp.Flags = 0;
d3dpp.BackBufferCount = 0;
d3dpp.BackBufferFormat = D3DFMT_A8R8G8B8;
d3dpp.BackBufferHeight = m_height;
d3dpp.BackBufferWidth = m_width;
d3dpp.MultiSampleType = D3DMULTISAMPLE_NONE;
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dpp.hDeviceWindow = m_hWnd;
d3dpp.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
d3dpp.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
r = m_pD3D9->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, m_hWnd, flags, &d3dpp, &m_pD3D9Dev);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D9Dev->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &m_pBackBuffer);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D9Dev->CreateOffscreenPlainSurface(m_width, m_height, D3DFMT_A8R8G8B8, D3DPOOL_DEFAULT, &m_pSurface, NULL);
if (FAILED(r))
{
std::cerr << "Can't create surface for result" << std::endl;
return EXIT_FAILURE;
}
// initialize OpenCL context of OpenCV lib from DirectX
if (cv::ocl::haveOpenCL())
{
m_oclCtx = cv::directx::ocl::initializeContextFromDirect3DDevice9(m_pD3D9Dev);
}
m_oclDevName = cv::ocl::useOpenCL() ?
cv::ocl::Context::getDefault().device(0).name() :
"No OpenCL device";
return EXIT_SUCCESS;
} // create()
// get media data on DX surface for further processing
int get_surface(LPDIRECT3DSURFACE9* ppSurface)
{
HRESULT r;
if (!m_cap.read(m_frame_bgr))
return EXIT_FAILURE;
cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2BGRA);
D3DLOCKED_RECT memDesc = { 0, NULL };
RECT rc = { 0, 0, m_width, m_height };
r = m_pSurface->LockRect(&memDesc, &rc, 0);
if (FAILED(r))
{
return r;
}
cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch);
// copy video frame data to surface
m_frame_rgba.copyTo(m);
r = m_pSurface->UnlockRect();
if (FAILED(r))
{
return r;
}
*ppSurface = m_pSurface;
return EXIT_SUCCESS;
} // get_surface()
// process and render media data
int render()
{
try
{
if (m_shutdown)
return EXIT_SUCCESS;
// capture user input once
MODE mode = (m_mode == MODE_GPU_NV12) ? MODE_GPU_RGBA : m_mode;
HRESULT r;
LPDIRECT3DSURFACE9 pSurface;
r = get_surface(&pSurface);
if (FAILED(r))
{
return EXIT_FAILURE;
}
m_timer.reset();
m_timer.start();
switch (mode)
{
case MODE_CPU:
{
// process video frame on CPU
D3DLOCKED_RECT memDesc = { 0, NULL };
RECT rc = { 0, 0, m_width, m_height };
r = pSurface->LockRect(&memDesc, &rc, 0);
if (FAILED(r))
{
return EXIT_FAILURE;
}
cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch);
if (m_demo_processing)
{
// blur D3D9 surface with OpenCV on CPU
cv::blur(m, m, cv::Size(15, 15));
}
r = pSurface->UnlockRect();
if (FAILED(r))
{
return EXIT_FAILURE;
}
break;
}
case MODE_GPU_RGBA:
{
// process video frame on GPU
cv::UMat u;
cv::directx::convertFromDirect3DSurface9(pSurface, u);
if (m_demo_processing)
{
// blur D3D9 surface with OpenCV on GPU with OpenCL
cv::blur(u, u, cv::Size(15, 15));
}
cv::directx::convertToDirect3DSurface9(u, pSurface);
break;
}
} // switch
m_timer.stop();
print_info(pSurface, mode, m_timer.getTimeMilli(), m_oclDevName);
// traditional DX render pipeline:
// BitBlt surface to backBuffer and flip backBuffer to frontBuffer
r = m_pD3D9Dev->StretchRect(pSurface, NULL, m_pBackBuffer, NULL, D3DTEXF_NONE);
if (FAILED(r))
{
return EXIT_FAILURE;
}
// present the back buffer contents to the display
r = m_pD3D9Dev->Present(NULL, NULL, NULL, NULL);
if (FAILED(r))
{
return EXIT_FAILURE;
}
} // try
catch (const cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
return 10;
}
return EXIT_SUCCESS;
} // render()
void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, double time, cv::String oclDevName)
{
HDC hDC;
HRESULT r = pSurface->GetDC(&hDC);
if (FAILED(r))
{
return;
}
HFONT hFont = (HFONT)::GetStockObject(SYSTEM_FONT);
HFONT hOldFont = (HFONT)::SelectObject(hDC, hFont);
if (hOldFont)
{
TEXTMETRIC tm;
::GetTextMetrics(hDC, &tm);
char buf[256];
int y = 0;
buf[0] = 0;
sprintf(buf, "mode: %s", m_modeStr[mode].c_str());
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, m_demo_processing ? "blur frame" : "copy frame");
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, "time: %4.1f msec", time);
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, "OpenCL device: %s", oclDevName.c_str());
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
::SelectObject(hDC, hOldFont);
}
r = pSurface->ReleaseDC(hDC);
return;
} // print_info()
int cleanup(void)
{
SAFE_RELEASE(m_pSurface);
SAFE_RELEASE(m_pBackBuffer);
SAFE_RELEASE(m_pD3D9Dev);
SAFE_RELEASE(m_pD3D9);
D3DSample::cleanup();
return EXIT_SUCCESS;
} // cleanup()
private:
LPDIRECT3D9 m_pD3D9;
LPDIRECT3DDEVICE9 m_pD3D9Dev;
LPDIRECT3DSURFACE9 m_pBackBuffer;
LPDIRECT3DSURFACE9 m_pSurface;
cv::ocl::Context m_oclCtx;
cv::String m_oclPlatformName;
cv::String m_oclDevName;
};
// main func
int main(int argc, char** argv)
{
std::string title = "D3D9 interop sample";
return d3d_app<D3D9WinApp>(argc, argv, title);
}

View File

@ -0,0 +1,315 @@
/*
// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface
// At first, the data obtained from video file or camera and placed onto Direct X surface,
// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function.
// The result is mapped back to Direct X surface and rendered through Direct X API.
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <d3d9.h>
#include "opencv2/core.hpp"
#include "opencv2/core/directx.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "d3dsample.hpp"
#pragma comment (lib, "d3d9.lib")
class D3D9ExWinApp : public D3DSample
{
public:
D3D9ExWinApp(int width, int height, std::string& window_name, cv::VideoCapture& cap) :
D3DSample(width, height, window_name, cap) {}
~D3D9ExWinApp() {}
int create(void)
{
// base initialization
D3DSample::create();
// initialize DirectX
HRESULT r;
r = ::Direct3DCreate9Ex(D3D_SDK_VERSION, &m_pD3D9Ex);
if (FAILED(r))
{
return EXIT_FAILURE;
}
DWORD flags = D3DCREATE_HARDWARE_VERTEXPROCESSING |
D3DCREATE_PUREDEVICE |
D3DCREATE_NOWINDOWCHANGES |
D3DCREATE_MULTITHREADED |
D3DCREATE_FPU_PRESERVE;
D3DPRESENT_PARAMETERS d3dpp;
::ZeroMemory(&d3dpp, sizeof(D3DPRESENT_PARAMETERS));
d3dpp.Windowed = true;
d3dpp.Flags = 0;
d3dpp.BackBufferCount = 0;
d3dpp.BackBufferFormat = D3DFMT_A8R8G8B8;
d3dpp.BackBufferHeight = m_height;
d3dpp.BackBufferWidth = m_width;
d3dpp.MultiSampleType = D3DMULTISAMPLE_NONE;
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dpp.hDeviceWindow = m_hWnd;
d3dpp.PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE;
d3dpp.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
r = m_pD3D9Ex->CreateDeviceEx(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, m_hWnd, flags, &d3dpp, NULL, &m_pD3D9DevEx);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D9DevEx->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &m_pBackBuffer);
if (FAILED(r))
{
return EXIT_FAILURE;
}
r = m_pD3D9DevEx->CreateOffscreenPlainSurface(m_width, m_height, D3DFMT_A8R8G8B8, D3DPOOL_DEFAULT, &m_pSurface, NULL);
if (FAILED(r))
{
std::cerr << "Can't create surface for result" << std::endl;
return EXIT_FAILURE;
}
// initialize OpenCL context of OpenCV lib from DirectX
if (cv::ocl::haveOpenCL())
{
m_oclCtx = cv::directx::ocl::initializeContextFromDirect3DDevice9(m_pD3D9DevEx);
}
m_oclDevName = cv::ocl::useOpenCL() ?
cv::ocl::Context::getDefault().device(0).name() :
"No OpenCL device";
return EXIT_SUCCESS;
} // create()
// get media data on DX surface for further processing
int get_surface(LPDIRECT3DSURFACE9* ppSurface)
{
HRESULT r;
if (!m_cap.read(m_frame_bgr))
return EXIT_FAILURE;
cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2BGRA);
D3DLOCKED_RECT memDesc = { 0, NULL };
RECT rc = { 0, 0, m_width, m_height };
r = m_pSurface->LockRect(&memDesc, &rc, 0);
if (FAILED(r))
{
return r;
}
cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch);
// copy video frame data to surface
m_frame_rgba.copyTo(m);
r = m_pSurface->UnlockRect();
if (FAILED(r))
{
return r;
}
*ppSurface = m_pSurface;
return EXIT_SUCCESS;
} // get_surface()
// process and render media data
int render()
{
try
{
if (m_shutdown)
return EXIT_SUCCESS;
// capture user input once
MODE mode = m_mode == MODE_GPU_NV12 ? MODE_GPU_RGBA : m_mode;
HRESULT r;
LPDIRECT3DSURFACE9 pSurface;
r = get_surface(&pSurface);
if (FAILED(r))
{
return EXIT_FAILURE;
}
m_timer.reset();
m_timer.start();
switch (mode)
{
case MODE_CPU:
{
// process video frame on CPU
D3DLOCKED_RECT memDesc = { 0, NULL };
RECT rc = { 0, 0, m_width, m_height };
r = pSurface->LockRect(&memDesc, &rc, 0);
if (FAILED(r))
{
return EXIT_FAILURE;
}
cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch);
if (m_demo_processing)
{
// blur D3D9 surface with OpenCV on CPU
cv::blur(m, m, cv::Size(15, 15));
}
r = pSurface->UnlockRect();
if (FAILED(r))
{
return EXIT_FAILURE;
}
break;
}
case MODE_GPU_RGBA:
{
// process video frame on GPU
cv::UMat u;
cv::directx::convertFromDirect3DSurface9(pSurface, u);
if (m_demo_processing)
{
// blur D3D9 surface with OpenCV on GPU with OpenCL
cv::blur(u, u, cv::Size(15, 15));
}
cv::directx::convertToDirect3DSurface9(u, pSurface);
break;
}
} // switch
m_timer.stop();
print_info(pSurface, m_mode, m_timer.getTimeMilli(), m_oclDevName);
// traditional DX render pipeline:
// BitBlt surface to backBuffer and flip backBuffer to frontBuffer
r = m_pD3D9DevEx->StretchRect(pSurface, NULL, m_pBackBuffer, NULL, D3DTEXF_NONE);
if (FAILED(r))
{
return EXIT_FAILURE;
}
// present the back buffer contents to the display
r = m_pD3D9DevEx->Present(NULL, NULL, NULL, NULL);
if (FAILED(r))
{
return EXIT_FAILURE;
}
} // try
catch (const cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
return 10;
}
return EXIT_SUCCESS;
} // render()
void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, double time, cv::String oclDevName)
{
HDC hDC;
HRESULT r = pSurface->GetDC(&hDC);
if (FAILED(r))
{
return;
}
HFONT hFont = (HFONT)::GetStockObject(SYSTEM_FONT);
HFONT hOldFont = (HFONT)::SelectObject(hDC, hFont);
if (hOldFont)
{
TEXTMETRIC tm;
::GetTextMetrics(hDC, &tm);
char buf[256];
int y = 0;
buf[0] = 0;
sprintf(buf, "mode: %s", m_modeStr[mode].c_str());
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, m_demo_processing ? "blur frame" : "copy frame");
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, "time: %4.1f msec", time);
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
y += tm.tmHeight;
buf[0] = 0;
sprintf(buf, "OpenCL device: %s", oclDevName.c_str());
::TextOut(hDC, 0, y, buf, (int)strlen(buf));
::SelectObject(hDC, hOldFont);
}
r = pSurface->ReleaseDC(hDC);
return;
} // print_info()
int cleanup(void)
{
SAFE_RELEASE(m_pSurface);
SAFE_RELEASE(m_pBackBuffer);
SAFE_RELEASE(m_pD3D9DevEx);
SAFE_RELEASE(m_pD3D9Ex);
D3DSample::cleanup();
return EXIT_SUCCESS;
} // cleanup()
private:
LPDIRECT3D9EX m_pD3D9Ex;
LPDIRECT3DDEVICE9EX m_pD3D9DevEx;
LPDIRECT3DSURFACE9 m_pBackBuffer;
LPDIRECT3DSURFACE9 m_pSurface;
cv::ocl::Context m_oclCtx;
cv::String m_oclPlatformName;
cv::String m_oclDevName;
};
// main func
int main(int argc, char** argv)
{
std::string title = "D3D9Ex interop sample";
return d3d_app<D3D9ExWinApp>(argc, argv, title);
}

View File

@ -0,0 +1,172 @@
/*
// Sample demonstrating interoperability of OpenCV UMat with Direct X surface
// Base class for Direct X application
*/
#include <string>
#include <iostream>
#include <queue>
#include "opencv2/core.hpp"
#include "opencv2/core/directx.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "winapp.hpp"
#define SAFE_RELEASE(p) if (p) { p->Release(); p = NULL; }
class D3DSample : public WinApp
{
public:
enum MODE
{
MODE_CPU,
MODE_GPU_RGBA,
MODE_GPU_NV12
};
D3DSample(int width, int height, std::string& window_name, cv::VideoCapture& cap) :
WinApp(width, height, window_name)
{
m_shutdown = false;
m_mode = MODE_CPU;
m_modeStr[0] = cv::String("Processing on CPU");
m_modeStr[1] = cv::String("Processing on GPU RGBA");
m_modeStr[2] = cv::String("Processing on GPU NV12");
m_demo_processing = false;
m_cap = cap;
}
~D3DSample() {}
virtual int create() { return WinApp::create(); }
virtual int render() = 0;
virtual int cleanup()
{
m_shutdown = true;
return WinApp::cleanup();
}
protected:
virtual LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
{
switch (message)
{
case WM_CHAR:
if (wParam == '1')
{
m_mode = MODE_CPU;
return EXIT_SUCCESS;
}
if (wParam == '2')
{
m_mode = MODE_GPU_RGBA;
return EXIT_SUCCESS;
}
if (wParam == '3')
{
m_mode = MODE_GPU_NV12;
return EXIT_SUCCESS;
}
else if (wParam == VK_SPACE)
{
m_demo_processing = !m_demo_processing;
return EXIT_SUCCESS;
}
else if (wParam == VK_ESCAPE)
{
return cleanup();
}
break;
case WM_CLOSE:
return cleanup();
case WM_DESTROY:
::PostQuitMessage(0);
return EXIT_SUCCESS;
}
return ::DefWindowProc(hWnd, message, wParam, lParam);
}
// do render at idle
virtual int idle() { return render(); }
protected:
bool m_shutdown;
bool m_demo_processing;
MODE m_mode;
cv::String m_modeStr[3];
cv::VideoCapture m_cap;
cv::Mat m_frame_bgr;
cv::Mat m_frame_rgba;
cv::TickMeter m_timer;
};
static const char* keys =
{
"{c camera | 0 | camera id }"
"{f file | | movie file name }"
};
template <typename TApp>
int d3d_app(int argc, char** argv, std::string& title)
{
cv::CommandLineParser parser(argc, argv, keys);
std::string file = parser.get<std::string>("file");
int camera_id = parser.get<int>("camera");
parser.about(
"\nA sample program demonstrating interoperability of DirectX and OpenCL with OpenCV.\n\n"
"Hot keys: \n"
" SPACE - turn processing on/off\n"
" 1 - process DX surface through OpenCV on CPU\n"
" 2 - process DX RGBA surface through OpenCV on GPU (via OpenCL)\n"
" 3 - process DX NV12 surface through OpenCV on GPU (via OpenCL)\n"
" ESC - exit\n\n");
parser.printMessage();
cv::VideoCapture cap;
if (file.empty())
cap.open(camera_id);
else
cap.open(file.c_str());
if (!cap.isOpened())
{
printf("can not open camera or video file\n");
return EXIT_FAILURE;
}
int width = (int)cap.get(cv::CAP_PROP_FRAME_WIDTH);
int height = (int)cap.get(cv::CAP_PROP_FRAME_HEIGHT);
std::string wndname = title;
TApp app(width, height, wndname, cap);
try
{
app.create();
return app.run();
}
catch (const cv::Exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
return 10;
}
catch (...)
{
std::cerr << "FATAL ERROR: Unknown exception" << std::endl;
return 11;
}
}

View File

@ -0,0 +1,132 @@
/*
// Sample demonstrating interoperability of OpenCV UMat with Direct X surface
// Base class for Windows application
*/
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <string>
#define WINCLASS "WinAppWnd"
class WinApp
{
public:
WinApp(int width, int height, std::string& window_name)
{
m_width = width;
m_height = height;
m_window_name = window_name;
m_hInstance = ::GetModuleHandle(NULL);
m_hWnd = 0;
}
virtual ~WinApp() {}
virtual int create()
{
WNDCLASSEX wcex;
wcex.cbSize = sizeof(WNDCLASSEX);
wcex.style = CS_HREDRAW | CS_VREDRAW;
wcex.lpfnWndProc = &WinApp::StaticWndProc;
wcex.cbClsExtra = 0;
wcex.cbWndExtra = 0;
wcex.hInstance = m_hInstance;
wcex.hIcon = LoadIcon(0, IDI_APPLICATION);
wcex.hCursor = LoadCursor(0, IDC_ARROW);
wcex.hbrBackground = 0;
wcex.lpszMenuName = 0L;
wcex.lpszClassName = WINCLASS;
wcex.hIconSm = 0;
ATOM wc = ::RegisterClassEx(&wcex);
if (!wc)
return -1;
RECT rc = { 0, 0, m_width, m_height };
if(!::AdjustWindowRect(&rc, WS_OVERLAPPEDWINDOW, false))
return -1;
m_hWnd = ::CreateWindow(
(LPCTSTR)wc, m_window_name.c_str(),
WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT,
rc.right - rc.left, rc.bottom - rc.top,
NULL, NULL, m_hInstance, (void*)this);
if (!m_hWnd)
return -1;
::ShowWindow(m_hWnd, SW_SHOW);
::UpdateWindow(m_hWnd);
::SetFocus(m_hWnd);
return 0;
} // create()
int run()
{
MSG msg;
::ZeroMemory(&msg, sizeof(msg));
while (msg.message != WM_QUIT)
{
if (::PeekMessage(&msg, NULL, 0U, 0U, PM_REMOVE))
{
::TranslateMessage(&msg);
::DispatchMessage(&msg);
}
else
{
idle();
}
}
return static_cast<int>(msg.wParam);
} // run()
virtual int cleanup()
{
::DestroyWindow(m_hWnd);
::UnregisterClass(WINCLASS, m_hInstance);
return 0;
} // cleanup()
protected:
// dispatch message handling to method of class
static LRESULT CALLBACK StaticWndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
{
WinApp* pWnd;
if (message == WM_NCCREATE)
{
LPCREATESTRUCT pCreateStruct = reinterpret_cast<LPCREATESTRUCT>(lParam);
pWnd = static_cast<WinApp*>(pCreateStruct->lpCreateParams);
::SetWindowLongPtr(hWnd, GWLP_USERDATA, reinterpret_cast<LONG_PTR>(pWnd));
}
pWnd = GetObjectFromWindow(hWnd);
if (pWnd)
return pWnd->WndProc(hWnd, message, wParam, lParam);
else
return ::DefWindowProc(hWnd, message, wParam, lParam);
} // StaticWndProc()
inline static WinApp* GetObjectFromWindow(HWND hWnd) { return (WinApp*)::GetWindowLongPtr(hWnd, GWLP_USERDATA); }
// actual wnd message handling
virtual LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) = 0;
// idle processing
virtual int idle() = 0;
HINSTANCE m_hInstance;
HWND m_hWnd;
int m_width;
int m_height;
std::string m_window_name;
};