feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,270 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Note: all tests here are DISABLED by default due specific requirements.
// Don't use #if 0 - these tests should be tested for compilation at least.
//
// Usage: opencv_test_videoio --gtest_also_run_disabled_tests --gtest_filter=*videoio_camera*<tested case>*
#include "test_precomp.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
namespace opencv_test { namespace {
static void test_readFrames(/*const*/ VideoCapture& capture, const int N = 100, Mat* lastFrame = NULL, bool testTimestamps = true)
{
Mat frame;
int64 time0 = cv::getTickCount();
int64 sysTimePrev = time0;
const double cvTickFreq = cv::getTickFrequency();
double camTimePrev = 0.0;
const double fps = capture.get(cv::CAP_PROP_FPS);
const double framePeriod = fps == 0.0 ? 1. : 1.0 / fps;
const bool validTickAndFps = cvTickFreq != 0 && fps != 0.;
testTimestamps &= validTickAndFps;
for (int i = 0; i < N; i++)
{
SCOPED_TRACE(cv::format("frame=%d", i));
capture >> frame;
const int64 sysTimeCurr = cv::getTickCount();
const double camTimeCurr = capture.get(cv::CAP_PROP_POS_MSEC);
ASSERT_FALSE(frame.empty());
// Do we have a previous frame?
if (i > 0 && testTimestamps)
{
const double sysTimeElapsedSecs = (sysTimeCurr - sysTimePrev) / cvTickFreq;
const double camTimeElapsedSecs = (camTimeCurr - camTimePrev) / 1000.;
// Check that the time between two camera frames and two system time calls
// are within 1.5 frame periods of one another.
//
// 1.5x is chosen to accomodate for a dropped frame, and an additional 50%
// to account for drift in the scale of the camera and system time domains.
EXPECT_NEAR(sysTimeElapsedSecs, camTimeElapsedSecs, framePeriod * 1.5);
}
EXPECT_GT(cvtest::norm(frame, NORM_INF), 0) << "Complete black image has been received";
sysTimePrev = sysTimeCurr;
camTimePrev = camTimeCurr;
}
int64 time1 = cv::getTickCount();
printf("Processed %d frames on %.2f FPS\n", N, (N * cvTickFreq) / (time1 - time0 + 1));
if (lastFrame) *lastFrame = frame.clone();
}
TEST(DISABLED_videoio_camera, basic)
{
VideoCapture capture(0);
ASSERT_TRUE(capture.isOpened());
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
test_readFrames(capture);
capture.release();
}
// Test that CAP_PROP_CONVERT_RGB remain to false (default is true) after other supported property are set.
// The test use odd value to be almost sure to trigger code responsible for recreating the device.
TEST(DISABLED_videoio_camera, dshow_convert_rgb_persistency)
{
VideoCapture capture(CAP_DSHOW);
ASSERT_TRUE(capture.isOpened());
ASSERT_TRUE(capture.set(CAP_PROP_CONVERT_RGB, 0));
ASSERT_DOUBLE_EQ(capture.get(CAP_PROP_CONVERT_RGB), 0);
capture.set(CAP_PROP_FRAME_WIDTH, 641);
capture.set(CAP_PROP_FRAME_HEIGHT, 481);
capture.set(CAP_PROP_FPS, 31);
capture.set(CAP_PROP_CHANNEL, 1);
capture.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('Y', '1', '6', ' '));
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
ASSERT_DOUBLE_EQ(capture.get(CAP_PROP_CONVERT_RGB), 0);
capture.release();
}
TEST(DISABLED_videoio_camera, v4l_read_mjpg)
{
VideoCapture capture(CAP_V4L2);
ASSERT_TRUE(capture.isOpened());
ASSERT_TRUE(capture.set(CAP_PROP_FOURCC, VideoWriter::fourcc('M', 'J', 'P', 'G')));
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
int fourcc = (int)capture.get(CAP_PROP_FOURCC);
std::cout << "FOURCC code: " << cv::format("0x%8x", fourcc) << std::endl;
test_readFrames(capture);
capture.release();
}
TEST(DISABLED_videoio_camera, v4l_open_mjpg)
{
VideoCapture capture;
capture.open(0, CAP_V4L2, {
CAP_PROP_FOURCC, VideoWriter::fourcc('M', 'J', 'P', 'G')
});
ASSERT_TRUE(capture.isOpened());
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
int fourcc = (int)capture.get(CAP_PROP_FOURCC);
std::cout << "FOURCC code: " << cv::format("0x%8x", fourcc) << std::endl;
test_readFrames(capture);
capture.release();
}
TEST(DISABLED_videoio_camera, v4l_open_mjpg_1280x720)
{
VideoCapture capture(0, CAP_V4L2, {
CAP_PROP_FOURCC, VideoWriter::fourcc('M', 'J', 'P', 'G'),
CAP_PROP_FRAME_WIDTH, 1280,
CAP_PROP_FRAME_HEIGHT, 720,
});
ASSERT_TRUE(capture.isOpened());
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
int fourcc = (int)capture.get(CAP_PROP_FOURCC);
std::cout << "FOURCC code: " << cv::format("0x%8x", fourcc) << std::endl;
test_readFrames(capture);
capture.release();
}
//Following test if for capture device using PhysConn_Video_SerialDigital as crossbar input pin
TEST(DISABLED_videoio_camera, channel6)
{
VideoCapture capture(0);
ASSERT_TRUE(capture.isOpened());
capture.set(CAP_PROP_CHANNEL, 6);
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
test_readFrames(capture);
capture.release();
}
TEST(DISABLED_videoio_camera, v4l_read_framesize)
{
VideoCapture capture(CAP_V4L2);
ASSERT_TRUE(capture.isOpened());
std::cout << "Camera 0 via " << capture.getBackendName() << " backend" << std::endl;
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
int fourcc = (int)capture.get(CAP_PROP_FOURCC);
std::cout << "FOURCC code: " << cv::format("0x%8x", fourcc) << std::endl;
test_readFrames(capture, 30);
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_WIDTH, 640));
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_HEIGHT, 480));
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
Mat frame640x480;
test_readFrames(capture, 30, &frame640x480);
EXPECT_EQ(640, frame640x480.cols);
EXPECT_EQ(480, frame640x480.rows);
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_WIDTH, 1280));
EXPECT_TRUE(capture.set(CAP_PROP_FRAME_HEIGHT, 720));
std::cout << "Frame width: " << capture.get(CAP_PROP_FRAME_WIDTH) << std::endl;
std::cout << " height: " << capture.get(CAP_PROP_FRAME_HEIGHT) << std::endl;
std::cout << "Capturing FPS: " << capture.get(CAP_PROP_FPS) << std::endl;
Mat frame1280x720;
test_readFrames(capture, 30, &frame1280x720);
EXPECT_EQ(1280, frame1280x720.cols);
EXPECT_EQ(720, frame1280x720.rows);
capture.release();
}
static
utils::Paths getTestCameras()
{
static utils::Paths cameras = utils::getConfigurationParameterPaths("OPENCV_TEST_CAMERA_LIST");
return cameras;
}
TEST(DISABLED_videoio_camera, waitAny_V4L)
{
auto cameraNames = getTestCameras();
if (cameraNames.empty())
throw SkipTestException("No list of tested cameras. Use OPENCV_TEST_CAMERA_LIST parameter");
const int totalFrames = 50; // number of expected frames (summary for all cameras)
const int64 timeoutNS = 100 * 1000000;
const Size frameSize(640, 480);
const int fpsDefaultEven = 30;
const int fpsDefaultOdd = 15;
std::vector<VideoCapture> cameras;
for (size_t i = 0; i < cameraNames.size(); ++i)
{
const auto& name = cameraNames[i];
int fps = (int)utils::getConfigurationParameterSizeT(cv::format("OPENCV_TEST_CAMERA%d_FPS", (int)i).c_str(), (i & 1) ? fpsDefaultOdd : fpsDefaultEven);
std::cout << "Camera[" << i << "] = '" << name << "', fps=" << fps << std::endl;
VideoCapture cap(name, CAP_V4L);
ASSERT_TRUE(cap.isOpened()) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FRAME_WIDTH, frameSize.width)) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FRAME_HEIGHT, frameSize.height)) << name;
EXPECT_TRUE(cap.set(CAP_PROP_FPS, fps)) << name;
//launch cameras
Mat firstFrame;
EXPECT_TRUE(cap.read(firstFrame));
EXPECT_EQ(frameSize.width, firstFrame.cols);
EXPECT_EQ(frameSize.height, firstFrame.rows);
cameras.push_back(cap);
}
std::vector<size_t> frameFromCamera(cameraNames.size(), 0);
{
int counter = 0;
std::vector<int> cameraReady;
do
{
EXPECT_TRUE(VideoCapture::waitAny(cameras, cameraReady, timeoutNS));
EXPECT_FALSE(cameraReady.empty());
for (int idx : cameraReady)
{
//std::cout << "Reading frame from camera: " << idx << std::endl;
ASSERT_TRUE(idx >= 0 && (size_t)idx < cameras.size()) << idx;
VideoCapture& c = cameras[idx];
Mat frame;
#if 1
ASSERT_TRUE(c.retrieve(frame)) << idx;
#else
ASSERT_TRUE(c.read(frame)) << idx;
#endif
EXPECT_EQ(frameSize.width, frame.cols) << idx;
EXPECT_EQ(frameSize.height, frame.rows) << idx;
++frameFromCamera[idx];
++counter;
}
}
while(counter < totalFrames);
}
for (size_t i = 0; i < cameraNames.size(); ++i)
{
EXPECT_GT(frameFromCamera[i], (size_t)0) << i;
}
}
}} // namespace

View File

@ -0,0 +1,88 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/videoio/container_avi.private.hpp"
#include <cstdio>
using namespace cv;
namespace opencv_test { namespace {
TEST(videoio_builtin, basic_avi)
{
String filename = BunnyParameters::getFilename(".mjpg.avi");
AVIReadContainer in;
in.initStream(filename);
frame_list frames;
ASSERT_TRUE(in.parseRiff(frames));
EXPECT_EQ(frames.size(), static_cast<unsigned>(BunnyParameters::getCount()));
EXPECT_EQ(in.getWidth(), static_cast<unsigned>(BunnyParameters::getWidth()));
EXPECT_EQ(in.getHeight(), static_cast<unsigned>(BunnyParameters::getHeight()));
EXPECT_EQ(in.getFps(), static_cast<unsigned>(BunnyParameters::getFps()));
}
TEST(videoio_builtin, invalid_avi)
{
String filename = BunnyParameters::getFilename(".avi");
AVIReadContainer in;
in.initStream(filename);
frame_list frames;
EXPECT_FALSE(in.parseRiff(frames));
EXPECT_EQ(frames.size(), static_cast<unsigned>(0));
}
TEST(videoio_builtin, read_write_avi)
{
const String filename = cv::tempfile("test.avi");
const double fps = 100;
const Size sz(800, 600);
const size_t count = 10;
const uchar data[count] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA};
const Codecs codec = MJPEG;
{
AVIWriteContainer out;
ASSERT_TRUE(out.initContainer(filename, fps, sz, true));
ASSERT_TRUE(out.isOpenedStream());
EXPECT_EQ(out.getWidth(), sz.width);
EXPECT_EQ(out.getHeight(), sz.height);
EXPECT_EQ(out.getChannels(), 3);
out.startWriteAVI(1);
{
out.writeStreamHeader(codec); // starts LIST chunk
size_t chunkPointer = out.getStreamPos();
int avi_index = out.getAVIIndex(0, dc);
{
out.startWriteChunk(avi_index);
out.putStreamBytes(data, count);
size_t tempChunkPointer = out.getStreamPos();
size_t moviPointer = out.getMoviPointer();
out.pushFrameOffset(chunkPointer - moviPointer);
out.pushFrameSize(tempChunkPointer - chunkPointer - 8);
out.endWriteChunk();
}
out.endWriteChunk(); // ends LIST chunk
}
out.writeIndex(0, dc);
out.finishWriteAVI();
}
{
AVIReadContainer in;
in.initStream(filename);
frame_list frames;
ASSERT_TRUE(in.parseRiff(frames));
EXPECT_EQ(in.getFps(), fps);
EXPECT_EQ(in.getWidth(), static_cast<unsigned>(sz.width));
EXPECT_EQ(in.getHeight(), static_cast<unsigned>(sz.height));
ASSERT_EQ(frames.size(), static_cast<unsigned>(1));
std::vector<char> actual = in.readFrame(frames.begin());
ASSERT_EQ(actual.size(), count);
for (size_t i = 0; i < count; ++i)
EXPECT_EQ(actual.at(i), data[i]) << "at index " << i;
}
remove(filename.c_str());
}
}} // opencv_test::<anonymous>::

View File

@ -0,0 +1,129 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
using namespace std;
namespace opencv_test { namespace {
const int FRAME_COUNT = 120;
inline void generateFrame(int i, Mat & frame)
{
::generateFrame(i, FRAME_COUNT, frame);
}
TEST(videoio_dynamic, basic_write)
{
const Size FRAME_SIZE(640, 480);
const double FPS = 100;
const String filename = cv::tempfile(".avi");
const int fourcc = VideoWriter::fourcc('M', 'J', 'P', 'G');
bool fileExists = false;
{
vector<VideoCaptureAPIs> backends = videoio_registry::getWriterBackends();
for (VideoCaptureAPIs be : backends)
{
VideoWriter writer;
writer.open(filename, be, fourcc, FPS, FRAME_SIZE, true);
if (writer.isOpened())
{
Mat frame(FRAME_SIZE, CV_8UC3);
for (int j = 0; j < FRAME_COUNT; ++j)
{
generateFrame(j, frame);
writer << frame;
}
writer.release();
fileExists = true;
}
EXPECT_FALSE(writer.isOpened());
}
}
if (!fileExists)
{
cout << "None of backends has been able to write video file - SKIP reading part" << endl;
return;
}
{
vector<VideoCaptureAPIs> backends = videoio_registry::getStreamBackends();
for (VideoCaptureAPIs be : backends)
{
VideoCapture cap;
cap.open(filename, be);
if(cap.isOpened())
{
int count = 0;
while (true)
{
Mat frame;
if (cap.grab())
{
if (cap.retrieve(frame))
{
++count;
continue;
}
}
break;
}
EXPECT_EQ(count, FRAME_COUNT);
cap.release();
}
EXPECT_FALSE(cap.isOpened());
}
}
remove(filename.c_str());
}
TEST(videoio_dynamic, write_invalid)
{
vector<VideoCaptureAPIs> backends = videoio_registry::getWriterBackends();
for (VideoCaptureAPIs be : backends)
{
SCOPED_TRACE(be);
const string filename = cv::tempfile(".mkv");
VideoWriter writer;
bool res = true;
// Bad FourCC
EXPECT_NO_THROW(res = writer.open(filename, be, VideoWriter::fourcc('A', 'B', 'C', 'D'), 1, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
// Empty filename
EXPECT_NO_THROW(res = writer.open(String(), be, VideoWriter::fourcc('H', '2', '6', '4'), 1, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
EXPECT_NO_THROW(res = writer.open(String(), be, VideoWriter::fourcc('M', 'J', 'P', 'G'), 1, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
// zero FPS
EXPECT_NO_THROW(res = writer.open(filename, be, VideoWriter::fourcc('H', '2', '6', '4'), 0, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
// cleanup
EXPECT_NO_THROW(writer.release());
remove(filename.c_str());
}
// Generic
{
VideoWriter writer;
bool res = true;
EXPECT_NO_THROW(res = writer.open(std::string(), VideoWriter::fourcc('H', '2', '6', '4'), 1, Size(640, 480)));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
EXPECT_NO_THROW(res = writer.open(std::string(), VideoWriter::fourcc('M', 'J', 'P', 'G'), 1, Size(640, 480)));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
}
}
}} // opencv_test::<anonymous>::

View File

@ -0,0 +1,519 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
using namespace std;
namespace opencv_test { namespace {
static inline long long getFileSize(const string &filename)
{
ifstream f(filename, ios_base::in | ios_base::binary);
f.seekg(0, ios_base::end);
return f.tellg();
}
typedef tuple<string, string, Size> FourCC_Ext_Size;
typedef testing::TestWithParam< FourCC_Ext_Size > videoio_ffmpeg;
TEST_P(videoio_ffmpeg, write_big)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
const string fourcc = get<0>(GetParam());
const string ext = get<1>(GetParam());
const Size sz = get<2>(GetParam());
const double time_sec = 1;
const double fps = 25;
ostringstream buf;
buf << "write_big_" << fourcc << "." << ext;
const string filename = tempfile(buf.str().c_str());
VideoWriter writer(filename, CAP_FFMPEG, fourccFromString(fourcc), fps, sz);
if (ext == "mp4" && fourcc == "H264" && !writer.isOpened())
{
throw cvtest::SkipTestException("H264/mp4 codec is not supported - SKIP");
}
ASSERT_TRUE(writer.isOpened());
Mat img(sz, CV_8UC3, Scalar::all(0));
const int coeff = cvRound(min(sz.width, sz.height)/(fps * time_sec));
for (int i = 0 ; i < static_cast<int>(fps * time_sec); i++ )
{
rectangle(img,
Point2i(coeff * i, coeff * i),
Point2i(coeff * (i + 1), coeff * (i + 1)),
Scalar::all(255 * (1.0 - static_cast<double>(i) / (fps * time_sec * 2))),
-1);
writer << img;
}
writer.release();
EXPECT_GT(getFileSize(filename), 8192);
remove(filename.c_str());
}
static const Size bigSize(4096, 4096);
const FourCC_Ext_Size entries[] =
{
make_tuple("", "avi", bigSize),
make_tuple("DX50", "avi", bigSize),
make_tuple("FLV1", "avi", bigSize),
make_tuple("H261", "avi", Size(352, 288)),
make_tuple("H263", "avi", Size(704, 576)),
make_tuple("I420", "avi", bigSize),
make_tuple("MJPG", "avi", bigSize),
make_tuple("mp4v", "avi", bigSize),
make_tuple("MPEG", "avi", Size(720, 576)),
make_tuple("XVID", "avi", bigSize),
make_tuple("H264", "mp4", Size(4096, 2160))
};
INSTANTIATE_TEST_CASE_P(videoio, videoio_ffmpeg, testing::ValuesIn(entries));
//==========================================================================
TEST(videoio_ffmpeg, image)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
const string filename = findDataFile("readwrite/ordinary.bmp");
Mat image = imread(filename, IMREAD_COLOR);
ASSERT_FALSE(image.empty());
VideoCapture cap(filename, CAP_FFMPEG);
ASSERT_TRUE(cap.isOpened());
Mat frame1, frame2;
cap >> frame1 >> frame2;
ASSERT_FALSE(frame1.empty());
ASSERT_TRUE(frame2.empty());
ASSERT_EQ(0, cvtest::norm(image, frame1, NORM_INF));
}
//==========================================================================
typedef tuple<VideoCaptureAPIs, string, string, string, string, string> videoio_container_params_t;
typedef testing::TestWithParam< videoio_container_params_t > videoio_container;
TEST_P(videoio_container, read)
{
const VideoCaptureAPIs api = get<0>(GetParam());
if (!videoio_registry::hasBackend(api))
throw SkipTestException("Backend was not found");
const string path = get<1>(GetParam());
const string ext = get<2>(GetParam());
const string ext_raw = get<3>(GetParam());
const string codec = get<4>(GetParam());
const string pixelFormat = get<5>(GetParam());
const string fileName = path + "." + ext;
const string fileNameOut = tempfile(cv::format("test_container_stream.%s", ext_raw.c_str()).c_str());
// Write encoded video read using VideoContainer to tmp file
size_t totalBytes = 0;
{
VideoCapture container(findDataFile(fileName), api);
if (!container.isOpened())
throw SkipTestException("Video stream is not supported");
if (!container.set(CAP_PROP_FORMAT, -1)) // turn off video decoder (extract stream)
throw SkipTestException("Fetching of RAW video streams is not supported");
ASSERT_EQ(-1.f, container.get(CAP_PROP_FORMAT)); // check
EXPECT_EQ(codec, fourccToString((int)container.get(CAP_PROP_FOURCC)));
EXPECT_EQ(pixelFormat, fourccToString((int)container.get(CAP_PROP_CODEC_PIXEL_FORMAT)));
std::ofstream file(fileNameOut.c_str(), ios::out | ios::trunc | std::ios::binary);
Mat raw_data;
while (true)
{
container >> raw_data;
size_t size = raw_data.total();
if (raw_data.empty())
break;
ASSERT_EQ(CV_8UC1, raw_data.type());
ASSERT_LE(raw_data.dims, 2);
ASSERT_EQ(raw_data.rows, 1);
ASSERT_EQ((size_t)raw_data.cols, raw_data.total());
ASSERT_TRUE(raw_data.isContinuous());
totalBytes += size;
file.write(reinterpret_cast<char*>(raw_data.data), size);
ASSERT_FALSE(file.fail());
}
ASSERT_GE(totalBytes, (size_t)65536) << "Encoded stream is too small";
}
std::cout << "Checking extracted video stream: " << fileNameOut << " (size: " << totalBytes << " bytes)" << std::endl;
// Check decoded frames read from original media are equal to frames decoded from tmp file
{
VideoCapture capReference(findDataFile(fileName), api);
ASSERT_TRUE(capReference.isOpened());
VideoCapture capActual(fileNameOut.c_str(), api);
ASSERT_TRUE(capActual.isOpened());
Mat reference, actual;
int nframes = 0, n_err = 0;
while (capReference.read(reference) && n_err < 3)
{
nframes++;
ASSERT_TRUE(capActual.read(actual)) << nframes;
EXPECT_EQ(0, cvtest::norm(actual, reference, NORM_INF)) << "frame=" << nframes << " err=" << ++n_err;
}
ASSERT_GT(nframes, 0);
}
ASSERT_EQ(0, remove(fileNameOut.c_str()));
}
const videoio_container_params_t videoio_container_params[] =
{
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264", "h264", "h264", "I420"),
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265", "h265", "hevc", "I420"),
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "mjpg.avi", "mjpg", "MJPG", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264.mkv", "mkv.h264", "h264", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265.mkv", "mkv.h265", "hevc", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264.mp4", "mp4.avc1", "avc1", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265.mp4", "mp4.hev1", "hev1", "I420"),
};
INSTANTIATE_TEST_CASE_P(/**/, videoio_container, testing::ValuesIn(videoio_container_params));
typedef tuple<string, string, int> videoio_skip_params_t;
typedef testing::TestWithParam< videoio_skip_params_t > videoio_skip;
TEST_P(videoio_skip, DISABLED_read) // optional test, may fail in some configurations
{
#if CV_VERSION_MAJOR >= 4
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("Backend was not found");
#endif
const string path = get<0>(GetParam());
const string env = get<1>(GetParam());
const int expectedFrameNumber = get<2>(GetParam());
#ifdef _WIN32
_putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", env.c_str());
#else
setenv("OPENCV_FFMPEG_CAPTURE_OPTIONS", env.c_str(), 1);
#endif
VideoCapture container(findDataFile(path), CAP_FFMPEG);
#ifdef _WIN32
_putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", "");
#else
setenv("OPENCV_FFMPEG_CAPTURE_OPTIONS", "", 1);
#endif
ASSERT_TRUE(container.isOpened());
Mat reference;
int nframes = 0, n_err = 0;
while (container.isOpened())
{
if (container.read(reference))
nframes++;
else if (++n_err > 3)
break;
}
EXPECT_EQ(expectedFrameNumber, nframes);
}
const videoio_skip_params_t videoio_skip_params[] =
{
videoio_skip_params_t("video/big_buck_bunny.mp4", "", 125),
videoio_skip_params_t("video/big_buck_bunny.mp4", "avdiscard;nonkey", 11)
};
INSTANTIATE_TEST_CASE_P(/**/, videoio_skip, testing::ValuesIn(videoio_skip_params));
//==========================================================================
static void generateFrame(Mat &frame, unsigned int i, const Point &center, const Scalar &color)
{
frame = Scalar::all(i % 255);
stringstream buf(ios::out);
buf << "frame #" << i;
putText(frame, buf.str(), Point(50, center.y), FONT_HERSHEY_SIMPLEX, 5.0, color, 5, CV_AA);
circle(frame, center, i + 2, color, 2, CV_AA);
}
TEST(videoio_ffmpeg, parallel)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
const int NUM = 4;
const int GRAN = 4;
const Range R(0, NUM);
const Size sz(1020, 900);
const int frameNum = 300;
const Scalar color(Scalar::all(0));
const Point center(sz.height / 2, sz.width / 2);
// Generate filenames
vector<string> files;
for (int i = 0; i < NUM; ++i)
{
ostringstream stream;
stream << i << ".avi";
files.push_back(tempfile(stream.str().c_str()));
}
// Write videos
{
vector< Ptr<VideoWriter> > writers(NUM);
auto makeWriters = [&](const Range &r)
{
for (int i = r.start; i != r.end; ++i)
writers[i] = makePtr<VideoWriter>(files[i],
CAP_FFMPEG,
VideoWriter::fourcc('X','V','I','D'),
25.0f,
sz);
};
parallel_for_(R, makeWriters, GRAN);
for(int i = 0; i < NUM; ++i)
{
ASSERT_TRUE(writers[i]);
ASSERT_TRUE(writers[i]->isOpened());
}
auto writeFrames = [&](const Range &r)
{
for (int j = r.start; j < r.end; ++j)
{
Mat frame(sz, CV_8UC3);
for (int i = 0; i < frameNum; ++i)
{
generateFrame(frame, i, center, color);
writers[j]->write(frame);
}
}
};
parallel_for_(R, writeFrames, GRAN);
}
// Read videos
{
vector< Ptr<VideoCapture> > readers(NUM);
auto makeCaptures = [&](const Range &r)
{
for (int i = r.start; i != r.end; ++i)
readers[i] = makePtr<VideoCapture>(files[i], CAP_FFMPEG);
};
parallel_for_(R, makeCaptures, GRAN);
for(int i = 0; i < NUM; ++i)
{
ASSERT_TRUE(readers[i]);
ASSERT_TRUE(readers[i]->isOpened());
}
auto readFrames = [&](const Range &r)
{
for (int j = r.start; j < r.end; ++j)
{
Mat reference(sz, CV_8UC3);
for (int i = 0; i < frameNum; ++i)
{
Mat actual;
EXPECT_TRUE(readers[j]->read(actual));
EXPECT_FALSE(actual.empty());
generateFrame(reference, i, center, color);
EXPECT_EQ(reference.size(), actual.size());
EXPECT_EQ(reference.depth(), actual.depth());
EXPECT_EQ(reference.channels(), actual.channels());
EXPECT_GE(cvtest::PSNR(actual, reference), 35.0) << "cap" << j << ", frame " << i;
}
}
};
parallel_for_(R, readFrames, GRAN);
}
// Remove files
for(int i = 0; i < NUM; ++i)
{
remove(files[i].c_str());
}
}
typedef std::pair<VideoCaptureProperties, double> cap_property_t;
typedef std::vector<cap_property_t> cap_properties_t;
typedef std::pair<std::string, cap_properties_t> ffmpeg_cap_properties_param_t;
typedef testing::TestWithParam<ffmpeg_cap_properties_param_t> ffmpeg_cap_properties;
#ifdef _WIN32
namespace {
::testing::AssertionResult IsOneOf(double value, double expected1, double expected2)
{
// internal floating point class is used to perform accurate floating point types comparison
typedef ::testing::internal::FloatingPoint<double> FloatingPoint;
FloatingPoint val(value);
if (val.AlmostEquals(FloatingPoint(expected1)) || val.AlmostEquals(FloatingPoint(expected2)))
{
return ::testing::AssertionSuccess();
}
else
{
return ::testing::AssertionFailure()
<< value << " is neither equal to " << expected1 << " nor " << expected2;
}
}
}
#endif
TEST_P(ffmpeg_cap_properties, can_read_property)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
ffmpeg_cap_properties_param_t parameters = GetParam();
const std::string path = parameters.first;
const cap_properties_t properties = parameters.second;
VideoCapture cap(findDataFile(path), CAP_FFMPEG);
ASSERT_TRUE(cap.isOpened()) << "Can not open " << findDataFile(path);
for (std::size_t i = 0; i < properties.size(); ++i)
{
const cap_property_t& prop = properties[i];
const double actualValue = cap.get(static_cast<int>(prop.first));
#ifndef _WIN32
EXPECT_DOUBLE_EQ(actualValue, prop.second)
<< "Property " << static_cast<int>(prop.first) << " has wrong value";
#else
EXPECT_TRUE(IsOneOf(actualValue, prop.second, 0.0))
<< "Property " << static_cast<int>(prop.first) << " has wrong value";
#endif
}
}
cap_properties_t loadBigBuckBunnyFFProbeResults() {
cap_property_t properties[] = { cap_property_t(CAP_PROP_BITRATE, 5851.),
cap_property_t(CAP_PROP_FPS, 24.),
cap_property_t(CAP_PROP_FRAME_HEIGHT, 384.),
cap_property_t(CAP_PROP_FRAME_WIDTH, 672.) };
return cap_properties_t(properties, properties + sizeof(properties) / sizeof(cap_property_t));
}
const ffmpeg_cap_properties_param_t videoio_ffmpeg_properties[] = {
ffmpeg_cap_properties_param_t("video/big_buck_bunny.avi", loadBigBuckBunnyFFProbeResults())
};
INSTANTIATE_TEST_CASE_P(videoio, ffmpeg_cap_properties, testing::ValuesIn(videoio_ffmpeg_properties));
// related issue: https://github.com/opencv/opencv/issues/15499
TEST(videoio, mp4_orientation_meta_auto)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
string video_file = string(cvtest::TS::ptr()->get_data_path()) + "video/big_buck_bunny_rotated.mp4";
VideoCapture cap;
EXPECT_NO_THROW(cap.open(video_file, CAP_FFMPEG));
ASSERT_TRUE(cap.isOpened()) << "Can't open the video: " << video_file << " with backend " << CAP_FFMPEG << std::endl;
cap.set(CAP_PROP_ORIENTATION_AUTO, true);
if (cap.get(CAP_PROP_ORIENTATION_AUTO) == 0)
throw SkipTestException("FFmpeg frame rotation metadata is not supported");
Size actual;
EXPECT_NO_THROW(actual = Size((int)cap.get(CAP_PROP_FRAME_WIDTH),
(int)cap.get(CAP_PROP_FRAME_HEIGHT)));
EXPECT_EQ(384, actual.width);
EXPECT_EQ(672, actual.height);
Mat frame;
cap >> frame;
ASSERT_EQ(384, frame.cols);
ASSERT_EQ(672, frame.rows);
}
// related issue: https://github.com/opencv/opencv/issues/15499
TEST(videoio, mp4_orientation_no_rotation)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
string video_file = string(cvtest::TS::ptr()->get_data_path()) + "video/big_buck_bunny_rotated.mp4";
VideoCapture cap;
EXPECT_NO_THROW(cap.open(video_file, CAP_FFMPEG));
cap.set(CAP_PROP_ORIENTATION_AUTO, 0);
ASSERT_TRUE(cap.isOpened()) << "Can't open the video: " << video_file << " with backend " << CAP_FFMPEG << std::endl;
ASSERT_FALSE(cap.get(CAP_PROP_ORIENTATION_AUTO));
Size actual;
EXPECT_NO_THROW(actual = Size((int)cap.get(CAP_PROP_FRAME_WIDTH),
(int)cap.get(CAP_PROP_FRAME_HEIGHT)));
EXPECT_EQ(672, actual.width);
EXPECT_EQ(384, actual.height);
Mat frame;
cap >> frame;
ASSERT_EQ(672, frame.cols);
ASSERT_EQ(384, frame.rows);
}
static void ffmpeg_check_read_raw(VideoCapture& cap)
{
ASSERT_TRUE(cap.isOpened()) << "Can't open the video";
Mat data;
cap >> data;
EXPECT_EQ(CV_8UC1, data.type()) << "CV_8UC1 != " << typeToString(data.type());
EXPECT_TRUE(data.rows == 1 || data.cols == 1) << data.size;
EXPECT_EQ((size_t)29729, data.total());
cap >> data;
EXPECT_EQ(CV_8UC1, data.type()) << "CV_8UC1 != " << typeToString(data.type());
EXPECT_TRUE(data.rows == 1 || data.cols == 1) << data.size;
EXPECT_EQ((size_t)37118, data.total());
}
TEST(videoio_ffmpeg, open_with_property)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
string video_file = findDataFile("video/big_buck_bunny.mp4");
VideoCapture cap;
EXPECT_NO_THROW(cap.open(video_file, CAP_FFMPEG, {
CAP_PROP_FORMAT, -1 // demux only
}));
ffmpeg_check_read_raw(cap);
}
TEST(videoio_ffmpeg, create_with_property)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
string video_file = findDataFile("video/big_buck_bunny.mp4");
VideoCapture cap(video_file, CAP_FFMPEG, {
CAP_PROP_FORMAT, -1 // demux only
});
ffmpeg_check_read_raw(cap);
}
TEST(videoio_ffmpeg, create_with_property_badarg)
{
if (!videoio_registry::hasBackend(CAP_FFMPEG))
throw SkipTestException("FFmpeg backend was not found");
string video_file = findDataFile("video/big_buck_bunny.mp4");
VideoCapture cap(video_file, CAP_FFMPEG, {
CAP_PROP_FORMAT, -2 // invalid
});
EXPECT_FALSE(cap.isOpened());
}
}} // namespace

View File

@ -0,0 +1,154 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
typedef tuple< string, Size, Size, int > Param;
typedef testing::TestWithParam< Param > videoio_gstreamer;
TEST_P(videoio_gstreamer, read_check)
{
if (!videoio_registry::hasBackend(CAP_GSTREAMER))
throw SkipTestException("GStreamer backend was not found");
string format = get<0>(GetParam());
Size frame_size = get<1>(GetParam());
Size mat_size = get<2>(GetParam());
int convertToRGB = get<3>(GetParam());
int count_frames = 10;
std::ostringstream pipeline;
pipeline << "videotestsrc pattern=ball num-buffers=" << count_frames << " ! " << format;
pipeline << ", width=" << frame_size.width << ", height=" << frame_size.height << " ! appsink";
VideoCapture cap;
ASSERT_NO_THROW(cap.open(pipeline.str(), CAP_GSTREAMER));
ASSERT_TRUE(cap.isOpened());
Mat buffer, decode_frame, gray_frame, rgb_frame;
for (int i = 0; i < count_frames; ++i)
{
cap >> buffer;
decode_frame = (format == "jpegenc ! image/jpeg") ? imdecode(buffer, IMREAD_UNCHANGED) : buffer;
EXPECT_EQ(mat_size, decode_frame.size());
cvtColor(decode_frame, rgb_frame, convertToRGB);
cvtColor(rgb_frame, gray_frame, COLOR_RGB2GRAY);
if (gray_frame.depth() == CV_16U)
{
gray_frame.convertTo(gray_frame, CV_8U, 255.0/65535);
}
vector<Vec3f> circles;
HoughCircles(gray_frame, circles, HOUGH_GRADIENT, 1, gray_frame.rows/16, 100, 30, 1, 30 );
if (circles.size() == 1)
{
EXPECT_NEAR(18.5, circles[0][2], 1.0);
}
else
{
ADD_FAILURE() << "Found " << circles.size() << " on frame " << i ;
}
}
{
Mat frame;
cap >> frame;
EXPECT_TRUE(frame.empty());
}
cap.release();
ASSERT_FALSE(cap.isOpened());
}
static const Param test_data[] = {
make_tuple("video/x-raw, format=BGR" , Size(640, 480), Size(640, 480), COLOR_BGR2RGB),
make_tuple("video/x-raw, format=BGRA" , Size(640, 480), Size(640, 480), COLOR_BGRA2RGB),
make_tuple("video/x-raw, format=RGBA" , Size(640, 480), Size(640, 480), COLOR_RGBA2RGB),
make_tuple("video/x-raw, format=BGRx" , Size(640, 480), Size(640, 480), COLOR_BGRA2RGB),
make_tuple("video/x-raw, format=RGBx" , Size(640, 480), Size(640, 480), COLOR_RGBA2RGB),
make_tuple("video/x-raw, format=GRAY8", Size(640, 480), Size(640, 480), COLOR_GRAY2RGB),
make_tuple("video/x-raw, format=UYVY" , Size(640, 480), Size(640, 480), COLOR_YUV2RGB_UYVY),
make_tuple("video/x-raw, format=YUY2" , Size(640, 480), Size(640, 480), COLOR_YUV2RGB_YUY2),
make_tuple("video/x-raw, format=YVYU" , Size(640, 480), Size(640, 480), COLOR_YUV2RGB_YVYU),
make_tuple("video/x-raw, format=NV12" , Size(640, 480), Size(640, 720), COLOR_YUV2RGB_NV12),
make_tuple("video/x-raw, format=NV21" , Size(640, 480), Size(640, 720), COLOR_YUV2RGB_NV21),
make_tuple("video/x-raw, format=YV12" , Size(640, 480), Size(640, 720), COLOR_YUV2RGB_YV12),
make_tuple("video/x-raw, format=I420" , Size(640, 480), Size(640, 720), COLOR_YUV2RGB_I420),
make_tuple("video/x-bayer" , Size(640, 480), Size(640, 480), COLOR_BayerBG2RGB),
make_tuple("jpegenc ! image/jpeg" , Size(640, 480), Size(640, 480), COLOR_BGR2RGB),
// unaligned cases, strides information must be used
make_tuple("video/x-raw, format=BGR" , Size(322, 242), Size(322, 242), COLOR_BGR2RGB),
make_tuple("video/x-raw, format=GRAY8", Size(322, 242), Size(322, 242), COLOR_GRAY2RGB),
make_tuple("video/x-raw, format=NV12" , Size(322, 242), Size(322, 363), COLOR_YUV2RGB_NV12),
make_tuple("video/x-raw, format=NV21" , Size(322, 242), Size(322, 363), COLOR_YUV2RGB_NV21),
make_tuple("video/x-raw, format=YV12" , Size(322, 242), Size(322, 363), COLOR_YUV2RGB_YV12),
make_tuple("video/x-raw, format=I420" , Size(322, 242), Size(322, 363), COLOR_YUV2RGB_I420),
// 16 bit
make_tuple("video/x-raw, format=GRAY16_LE", Size(640, 480), Size(640, 480), COLOR_GRAY2RGB),
make_tuple("video/x-raw, format=GRAY16_BE", Size(640, 480), Size(640, 480), COLOR_GRAY2RGB),
};
INSTANTIATE_TEST_CASE_P(videoio, videoio_gstreamer, testing::ValuesIn(test_data));
TEST(videoio_gstreamer, unsupported_pipeline)
{
if (!videoio_registry::hasBackend(CAP_GSTREAMER))
throw SkipTestException("GStreamer backend was not found");
// could not link videoconvert0 to matroskamux0, matroskamux0 can't handle caps video/x-raw, format=(string)RGBA
std::string pipeline = "appsrc ! videoconvert ! video/x-raw, format=(string)RGBA ! matroskamux ! filesink location=test.mkv";
Size frame_size(640, 480);
VideoWriter writer;
EXPECT_NO_THROW(writer.open(pipeline, CAP_GSTREAMER, 0/*fourcc*/, 30/*fps*/, frame_size, true));
EXPECT_FALSE(writer.isOpened());
// no frames
EXPECT_NO_THROW(writer.release());
}
TEST(videoio_gstreamer, gray16_writing)
{
if (!videoio_registry::hasBackend(CAP_GSTREAMER))
throw SkipTestException("GStreamer backend was not found");
Size frame_size(320, 240);
// generate a noise frame
Mat frame = Mat(frame_size, CV_16U);
randu(frame, 0, 65535);
// generate a temp filename, and fix path separators to how GStreamer expects them
cv::String temp_file = cv::tempfile(".raw");
std::replace(temp_file.begin(), temp_file.end(), '\\', '/');
// write noise frame to file using GStreamer
std::ostringstream writer_pipeline;
writer_pipeline << "appsrc ! filesink location=" << temp_file;
std::vector<int> params {
VIDEOWRITER_PROP_IS_COLOR, 0/*false*/,
VIDEOWRITER_PROP_DEPTH, CV_16U
};
VideoWriter writer;
ASSERT_NO_THROW(writer.open(writer_pipeline.str(), CAP_GSTREAMER, 0/*fourcc*/, 30/*fps*/, frame_size, params));
ASSERT_TRUE(writer.isOpened());
ASSERT_NO_THROW(writer.write(frame));
ASSERT_NO_THROW(writer.release());
// read noise frame back in
Mat written_frame(frame_size, CV_16U);
std::ifstream fs(temp_file, std::ios::in | std::ios::binary);
fs.read((char*)written_frame.ptr(0), frame_size.width * frame_size.height * 2);
ASSERT_TRUE(fs);
fs.close();
// compare to make sure it's identical
EXPECT_EQ(0, cv::norm(frame, written_frame, NORM_INF));
// remove temp file
EXPECT_EQ(0, remove(temp_file.c_str()));
}
}} // namespace

View File

@ -0,0 +1,25 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include <opencv2/core/utils/logger.hpp>
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
static
void initTests()
{
#ifndef WINRT // missing getenv
const std::vector<cv::VideoCaptureAPIs> backends = cv::videoio_registry::getStreamBackends();
const char* requireFFmpeg = getenv("OPENCV_TEST_VIDEOIO_BACKEND_REQUIRE_FFMPEG");
if (requireFFmpeg && !isBackendAvailable(cv::CAP_FFMPEG, backends))
{
CV_LOG_FATAL(NULL, "OpenCV-Test: required FFmpeg backend is not available (broken plugin?). STOP.");
exit(1);
}
#endif
}
CV_TEST_MAIN("highgui", initTests())

View File

@ -0,0 +1,165 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(videoio_mfx, read_invalid)
{
if (!videoio_registry::hasBackend(CAP_INTEL_MFX))
throw SkipTestException("MediaSDK backend was not found");
VideoCapture cap;
ASSERT_NO_THROW(cap.open("nonexistent-file", CAP_INTEL_MFX));
ASSERT_FALSE(cap.isOpened());
Mat img;
ASSERT_NO_THROW(cap >> img);
ASSERT_TRUE(img.empty());
}
TEST(videoio_mfx, write_invalid)
{
if (!videoio_registry::hasBackend(CAP_INTEL_MFX))
throw SkipTestException("MediaSDK backend was not found");
const string filename = cv::tempfile(".264");
VideoWriter writer;
bool res = true;
ASSERT_NO_THROW(res = writer.open(filename, CAP_INTEL_MFX, VideoWriter::fourcc('H', '2', '6', '4'), 1, Size(641, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
ASSERT_NO_THROW(res = writer.open(filename, CAP_INTEL_MFX, VideoWriter::fourcc('H', '2', '6', '4'), 1, Size(640, 481), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
ASSERT_NO_THROW(res = writer.open(filename, CAP_INTEL_MFX, VideoWriter::fourcc('A', 'B', 'C', 'D'), 1, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
ASSERT_NO_THROW(res = writer.open(String(), CAP_INTEL_MFX, VideoWriter::fourcc('H', '2', '6', '4'), 1, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
ASSERT_NO_THROW(res = writer.open(filename, CAP_INTEL_MFX, VideoWriter::fourcc('H', '2', '6', '4'), 0, Size(640, 480), true));
EXPECT_FALSE(res);
EXPECT_FALSE(writer.isOpened());
ASSERT_NO_THROW(res = writer.open(filename, CAP_INTEL_MFX, VideoWriter::fourcc('H', '2', '6', '4'), 30, Size(640, 480), true));
ASSERT_TRUE(res);
ASSERT_TRUE(writer.isOpened());
Mat t;
// write some bad frames
t = Mat(Size(1024, 768), CV_8UC3);
EXPECT_NO_THROW(writer << t);
t = Mat(Size(320, 240), CV_8UC3);
EXPECT_NO_THROW(writer << t);
t = Mat(Size(640, 480), CV_8UC2);
EXPECT_NO_THROW(writer << t);
// cleanup
ASSERT_NO_THROW(writer.release());
remove(filename.c_str());
}
//==================================================================================================
const int FRAME_COUNT = 20;
inline void generateFrame(int i, Mat & frame)
{
::generateFrame(i, FRAME_COUNT, frame);
}
inline int fourccByExt(const String &ext)
{
if (ext == ".mpeg2")
return VideoWriter::fourcc('M', 'P', 'G', '2');
else if (ext == ".264")
return VideoWriter::fourcc('H', '2', '6', '4');
else if (ext == ".265")
return VideoWriter::fourcc('H', '2', '6', '5');
return -1;
}
//==================================================================================================
typedef tuple<Size, double, const char *> Size_FPS_Ext;
typedef testing::TestWithParam< Size_FPS_Ext > videoio_mfx;
TEST_P(videoio_mfx, read_write_raw)
{
if (!videoio_registry::hasBackend(CAP_INTEL_MFX))
throw SkipTestException("MediaSDK backend was not found");
const Size FRAME_SIZE = get<0>(GetParam());
const double FPS = get<1>(GetParam());
const char *ext = get<2>(GetParam());
const String filename = cv::tempfile(ext);
const int fourcc = fourccByExt(ext);
bool isColor = true;
std::queue<Mat> goodFrames;
// Write video
VideoWriter writer;
writer.open(filename, CAP_INTEL_MFX, fourcc, FPS, FRAME_SIZE, isColor);
ASSERT_TRUE(writer.isOpened());
Mat frame(FRAME_SIZE, CV_8UC3);
for (int i = 0; i < FRAME_COUNT; ++i)
{
generateFrame(i, frame);
goodFrames.push(frame.clone());
writer << frame;
}
writer.release();
EXPECT_FALSE(writer.isOpened());
// Read video
VideoCapture cap;
cap.open(filename, CAP_INTEL_MFX);
ASSERT_TRUE(cap.isOpened());
EXPECT_EQ(FRAME_SIZE.width, cap.get(CAP_PROP_FRAME_WIDTH));
EXPECT_EQ(FRAME_SIZE.height, cap.get(CAP_PROP_FRAME_HEIGHT));
for (int i = 0; i < FRAME_COUNT; ++i)
{
ASSERT_TRUE(cap.read(frame));
ASSERT_FALSE(frame.empty());
ASSERT_EQ(FRAME_SIZE.width, frame.cols);
ASSERT_EQ(FRAME_SIZE.height, frame.rows);
// verify
ASSERT_NE(goodFrames.size(), 0u);
const Mat &goodFrame = goodFrames.front();
EXPECT_EQ(goodFrame.depth(), frame.depth());
EXPECT_EQ(goodFrame.channels(), frame.channels());
EXPECT_EQ(goodFrame.type(), frame.type());
double psnr = cvtest::PSNR(goodFrame, frame);
if (fourcc == VideoWriter::fourcc('M', 'P', 'G', '2'))
EXPECT_GT(psnr, 31); // experimentally chosen value
else
EXPECT_GT(psnr, 33); // experimentally chosen value
goodFrames.pop();
}
EXPECT_FALSE(cap.read(frame));
EXPECT_TRUE(frame.empty());
cap.release();
EXPECT_FALSE(cap.isOpened());
remove(filename.c_str());
}
inline static std::string videoio_mfx_name_printer(const testing::TestParamInfo<videoio_mfx::ParamType>& info)
{
std::ostringstream out;
const Size sz = get<0>(info.param);
const std::string ext = get<2>(info.param);
out << sz.width << "x" << sz.height << "x" << get<1>(info.param) << "x" << ext.substr(1, ext.size() - 1);
return out.str();
}
INSTANTIATE_TEST_CASE_P(videoio, videoio_mfx,
testing::Combine(
testing::Values(Size(640, 480), Size(638, 478), Size(636, 476), Size(1920, 1080)),
testing::Values(1, 30, 100),
testing::Values(".mpeg2", ".264", ".265")),
videoio_mfx_name_printer);
}} // namespace

View File

@ -0,0 +1,105 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
enum VideoBackendMode
{
MODE_CAMERA,
MODE_STREAM,
MODE_WRITER,
};
static
void dumpBackendInfo(VideoCaptureAPIs backend, enum VideoBackendMode mode)
{
std::string name;
try
{
name = videoio_registry::getBackendName(backend);
}
catch (const std::exception& e)
{
ADD_FAILURE() << "Can't query name of backend=" << backend << ": " << e.what();
}
catch (...)
{
ADD_FAILURE() << "Can't query name of backend=" << backend << ": unknown C++ exception";
}
bool isBuiltIn = true;
try
{
isBuiltIn = videoio_registry::isBackendBuiltIn(backend);
}
catch (const std::exception& e)
{
ADD_FAILURE() << "Failed isBackendBuiltIn(backend=" << backend << "): " << e.what();
cout << name << " - UNKNOWN TYPE" << endl;
return;
}
if (isBuiltIn)
{
cout << name << " - BUILTIN" << endl;
return;
}
std::string description = "NO_DESCRIPTION";
int version_ABI = 0;
int version_API = 0;
try
{
if (mode == MODE_CAMERA)
description = videoio_registry::getCameraBackendPluginVersion(backend, version_ABI, version_API);
else if (mode == MODE_STREAM)
description = videoio_registry::getStreamBackendPluginVersion(backend, version_ABI, version_API);
else if (mode == MODE_WRITER)
description = videoio_registry::getWriterBackendPluginVersion(backend, version_ABI, version_API);
else
CV_Error(Error::StsInternal, "");
cout << name << " - PLUGIN (" << description << ") ABI=" << version_ABI << " API=" << version_API << endl;
return;
}
catch (const cv::Exception& e)
{
if (e.code == Error::StsNotImplemented)
{
cout << name << " - PLUGIN - NOT LOADED" << endl;
return;
}
ADD_FAILURE() << "Failed getBackendPluginDescription(backend=" << backend << "): " << e.what();
}
catch (const std::exception& e)
{
ADD_FAILURE() << "Failed getBackendPluginDescription(backend=" << backend << "): " << e.what();
}
cout << name << " - PLUGIN (ERROR on quering information)" << endl;
}
TEST(VideoIO_Plugins, query)
{
const std::vector<cv::VideoCaptureAPIs> camera_backends = cv::videoio_registry::getCameraBackends();
cout << "== Camera APIs (" << camera_backends.size() << "):" << endl;
for (auto backend : camera_backends)
{
dumpBackendInfo(backend, MODE_CAMERA);
}
const std::vector<cv::VideoCaptureAPIs> stream_backends = cv::videoio_registry::getStreamBackends();
cout << "== Stream capture APIs (" << stream_backends.size() << "):" << endl;
for (auto backend : stream_backends)
{
dumpBackendInfo(backend, MODE_STREAM);
}
const std::vector<cv::VideoCaptureAPIs> writer_backends = cv::videoio_registry::getWriterBackends();
cout << "== Writer APIs (" << writer_backends.size() << "):" << endl;
for (auto backend : writer_backends)
{
dumpBackendInfo(backend, MODE_WRITER);
}
}
}}

View File

@ -0,0 +1,111 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <sstream>
#include "opencv2/ts.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/videoio/registry.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/private.hpp"
namespace cv {
static inline
std::ostream& operator<<(std::ostream& out, const VideoCaptureAPIs& api)
{
out << cv::videoio_registry::getBackendName(api); return out;
}
static inline
std::ostream& operator<<(std::ostream& out, const VideoAccelerationType& va_type)
{
struct {
VideoAccelerationType va_type;
const char* str;
} va_types[] = {
{VIDEO_ACCELERATION_ANY, "ANY"},
{VIDEO_ACCELERATION_NONE, "NONE"},
{VIDEO_ACCELERATION_D3D11, "D3D11"},
{VIDEO_ACCELERATION_VAAPI, "VAAPI"},
{VIDEO_ACCELERATION_MFX, "MFX"},
};
for (const auto& va : va_types) {
if (va_type == va.va_type) {
out << va.str;
return out;
}
}
out << cv::format("UNKNOWN(0x%ux)", static_cast<unsigned int>(va_type));
return out;
}
static inline void PrintTo(const cv::VideoCaptureAPIs& api, std::ostream* os)
{
*os << cv::videoio_registry::getBackendName(api);
}
} // namespace
inline std::string fourccToString(int fourcc)
{
return cv::format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
}
inline int fourccFromString(const std::string &fourcc)
{
if (fourcc.size() != 4) return 0;
return cv::VideoWriter::fourcc(fourcc[0], fourcc[1], fourcc[2], fourcc[3]);
}
inline void generateFrame(int i, int FRAME_COUNT, cv::Mat & frame)
{
using namespace cv;
using namespace std;
int offset = (((i * 5) % FRAME_COUNT) - FRAME_COUNT / 2) * (frame.cols / 2) / FRAME_COUNT;
frame(cv::Rect(0, 0, frame.cols / 2 + offset, frame.rows)) = Scalar(255, 255, 255);
frame(cv::Rect(frame.cols / 2 + offset, 0, frame.cols - frame.cols / 2 - offset, frame.rows)) = Scalar(0, 0, 0);
ostringstream buf; buf << "Frame " << setw(2) << setfill('0') << i + 1;
int baseLine = 0;
Size box = getTextSize(buf.str(), FONT_HERSHEY_COMPLEX, 2, 5, &baseLine);
putText(frame, buf.str(), Point((frame.cols - box.width) / 2, (frame.rows - box.height) / 2 + baseLine),
FONT_HERSHEY_COMPLEX, 2, Scalar(0, 0, 255), 5, LINE_AA);
Point p(i * frame.cols / (FRAME_COUNT - 1), i * frame.rows / (FRAME_COUNT - 1));
circle(frame, p, 50, Scalar(200, 25, 55), 8, LINE_AA);
#if 0
imshow("frame", frame);
waitKey();
#endif
}
class BunnyParameters
{
public:
inline static int getWidth() { return 672; };
inline static int getHeight() { return 384; };
inline static int getFps() { return 24; };
inline static double getTime() { return 5.21; };
inline static int getCount() { return cvRound(getFps() * getTime()); };
inline static std::string getFilename(const std::string &ext)
{
return cvtest::TS::ptr()->get_data_path() + "video/big_buck_bunny" + ext;
}
};
static inline bool isBackendAvailable(cv::VideoCaptureAPIs api, const std::vector<cv::VideoCaptureAPIs>& api_list)
{
for (size_t i = 0; i < api_list.size(); i++)
{
if (api_list[i] == api)
return true;
}
return false;
}
#endif

File diff suppressed because it is too large Load Diff