feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,16 @@
{
"missing_consts": {
"Video" : {
"private" : [
["CV_LKFLOW_INITIAL_GUESSES", 4 ],
["CV_LKFLOW_GET_MIN_EIGENVALS", 8 ]
]
}
},
"func_arg_fix" : {
"calcOpticalFlowPyrLK" : { "prevPts" : {"ctype" : "vector_Point2f"},
"nextPts" : {"ctype" : "vector_Point2f"},
"status" : {"ctype" : "vector_uchar"},
"err" : {"ctype" : "vector_float"} }
}
}

View File

@ -0,0 +1,43 @@
package org.opencv.test.video;
import org.opencv.test.OpenCVTestCase;
public class BackgroundSubtractorMOGTest extends OpenCVTestCase {
public void testApplyMatMat() {
fail("Not yet implemented");
/*
BackgroundSubtractorMOG backGroundSubtract = new BackgroundSubtractorMOG();
Point bottomRight = new Point(rgbLena.cols() / 2, rgbLena.rows() / 2);
Point topLeft = new Point(0, 0);
Scalar color = new Scalar(128);
Mat mask = new Mat(rgbLena.size(), CvType.CV_16UC3, new Scalar(1));
Imgproc.rectangle(rgbLena, bottomRight, topLeft, color, Imgproc.FILLED);
backGroundSubtract.apply(rgbLena, mask);
Mat truth = new Mat(rgbLena.size(), rgbLena.type(), new Scalar(0));
Imgproc.rectangle(truth, bottomRight, topLeft, color, Imgproc.FILLED);
assertMatEqual(truth, rgbLena);
*/
}
public void testApplyMatMatDouble() {
fail("Not yet implemented");
}
public void testBackgroundSubtractorMOG() {
fail("Not yet implemented");
}
public void testBackgroundSubtractorMOGIntIntDouble() {
fail("Not yet implemented");
}
public void testBackgroundSubtractorMOGIntIntDoubleDouble() {
fail("Not yet implemented");
}
}

View File

@ -0,0 +1,38 @@
package org.opencv.test.video;
import org.opencv.test.OpenCVTestCase;
import org.opencv.video.KalmanFilter;
public class KalmanFilterTest extends OpenCVTestCase {
public void testCorrect() {
fail("Not yet implemented");
}
public void testKalmanFilter() {
KalmanFilter kf = new KalmanFilter();
assertNotNull(kf);
}
public void testKalmanFilterIntInt() {
fail("Not yet implemented");
}
public void testKalmanFilterIntIntInt() {
fail("Not yet implemented");
}
public void testKalmanFilterIntIntIntInt() {
fail("Not yet implemented");
}
public void testPredict() {
fail("Not yet implemented");
}
public void testPredictMat() {
fail("Not yet implemented");
}
}

View File

@ -0,0 +1,39 @@
package org.opencv.test.video;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.CvException;
import org.opencv.core.Mat;
import org.opencv.core.Rect;
import org.opencv.test.OpenCVTestCase;
import org.opencv.video.Tracker;
import org.opencv.video.TrackerGOTURN;
import org.opencv.video.TrackerMIL;
public class TrackerCreateTest extends OpenCVTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
}
public void testCreateTrackerGOTURN() {
try {
Tracker tracker = TrackerGOTURN.create();
assert(tracker != null);
} catch (CvException e) {
// expected, model files may be missing
}
}
public void testCreateTrackerMIL() {
Tracker tracker = TrackerMIL.create();
assert(tracker != null);
Mat mat = new Mat(100, 100, CvType.CV_8UC1);
Rect rect = new Rect(10, 10, 30, 30);
tracker.init(mat, rect); // should not crash (https://github.com/opencv/opencv/issues/19915)
}
}

View File

@ -0,0 +1,99 @@
package org.opencv.test.video;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.test.OpenCVTestCase;
import org.opencv.video.Video;
public class VideoTest extends OpenCVTestCase {
private MatOfFloat err = null;
private int h;
private MatOfPoint2f nextPts = null;
private MatOfPoint2f prevPts = null;
private int shift1;
private int shift2;
private MatOfByte status = null;
private Mat subLena1 = null;
private Mat subLena2 = null;
private int w;
@Override
protected void setUp() throws Exception {
super.setUp();
shift1 = 10;
shift2 = 17;
w = (int)(rgbLena.cols() / 2);
h = (int)(rgbLena.rows() / 2);
subLena1 = rgbLena.submat(shift1, h + shift1, shift1, w + shift1);
subLena2 = rgbLena.submat(shift2, h + shift2, shift2, w + shift2);
prevPts = new MatOfPoint2f(new Point(11d, 8d), new Point(5d, 5d), new Point(10d, 10d));
nextPts = new MatOfPoint2f();
status = new MatOfByte();
err = new MatOfFloat();
}
public void testCalcGlobalOrientation() {
fail("Not yet implemented");
}
public void testCalcMotionGradientMatMatMatDoubleDouble() {
fail("Not yet implemented");
}
public void testCalcMotionGradientMatMatMatDoubleDoubleInt() {
fail("Not yet implemented");
}
public void testCalcOpticalFlowFarneback() {
fail("Not yet implemented");
}
public void testCalcOpticalFlowPyrLKMatMatListOfPointListOfPointListOfByteListOfFloat() {
Video.calcOpticalFlowPyrLK(subLena1, subLena2, prevPts, nextPts, status, err);
assertEquals(3, Core.countNonZero(status));
}
public void testCalcOpticalFlowPyrLKMatMatListOfPointListOfPointListOfByteListOfFloatSize() {
Size sz = new Size(3, 3);
Video.calcOpticalFlowPyrLK(subLena1, subLena2, prevPts, nextPts, status, err, sz, 3);
assertEquals(0, Core.countNonZero(status));
}
public void testCalcOpticalFlowPyrLKMatMatListOfPointListOfPointListOfByteListOfFloatSizeIntTermCriteriaDoubleIntDouble() {
fail("Not yet implemented");
}
public void testCamShift() {
fail("Not yet implemented");
}
public void testEstimateRigidTransform() {
fail("Not yet implemented");
}
public void testMeanShift() {
fail("Not yet implemented");
}
public void testSegmentMotion() {
fail("Not yet implemented");
}
public void testUpdateMotionHistory() {
fail("Not yet implemented");
}
}

View File

@ -0,0 +1,5 @@
#ifdef HAVE_OPENCV_VIDEO
typedef TrackerMIL::Params TrackerMIL_Params;
typedef TrackerGOTURN::Params TrackerGOTURN_Params;
typedef TrackerDaSiamRPN::Params TrackerDaSiamRPN_Params;
#endif

View File

@ -0,0 +1,100 @@
#!/usr/bin/env python
'''
Lucas-Kanade homography tracker test
===============================
Uses goodFeaturesToTrack for track initialization and back-tracking for match verification
between frames. Finds homography between reference and current views.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
from tests_common import NewOpenCVTests, isPointInRect
lk_params = dict( winSize = (19, 19),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 1000,
qualityLevel = 0.01,
minDistance = 8,
blockSize = 19 )
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status
class lk_homography_test(NewOpenCVTests):
render = None
framesCounter = 0
frame = frame0 = None
p0 = None
p1 = None
gray0 = gray1 = None
numFeaturesInRectOnStart = 0
def test_lk_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0)
frame = self.render.getNextFrame()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self.frame0 = frame.copy()
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
isForegroundHomographyFound = False
if self.p0 is not None:
self.p1 = self.p0
self.gray0 = frame_gray
self.gray1 = frame_gray
currRect = self.render.getCurrentRect()
for (x,y) in self.p0[:,0]:
if isPointInRect((x,y), currRect):
self.numFeaturesInRectOnStart += 1
while self.framesCounter < 200:
self.framesCounter += 1
frame = self.render.getNextFrame()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if self.p0 is not None:
p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)
self.p1 = p2[trace_status].copy()
self.p0 = self.p0[trace_status].copy()
self.gray1 = frame_gray
if len(self.p0) < 4:
self.p0 = None
continue
_H, status = cv.findHomography(self.p0, self.p1, cv.RANSAC, 5.0)
goodPointsInRect = 0
goodPointsOutsideRect = 0
for (_x0, _y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
if good:
if isPointInRect((x1,y1), self.render.getCurrentRect()):
goodPointsInRect += 1
else: goodPointsOutsideRect += 1
if goodPointsOutsideRect < goodPointsInRect:
isForegroundHomographyFound = True
self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
else:
self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params)
self.assertEqual(isForegroundHomographyFound, True)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,115 @@
#!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade sparse optical flow demo. Uses goodFeaturesToTrack
for track initialization and back-tracking for match verification
between frames.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
#local modules
from tst_scene_render import TestSceneRender
from tests_common import NewOpenCVTests, intersectionRate, isPointInRect
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
def getRectFromPoints(points):
distances = []
for point in points:
distances.append(cv.norm(point, cv.NORM_L2))
x0, y0 = points[np.argmin(distances)]
x1, y1 = points[np.argmax(distances)]
return np.array([x0, y0, x1, y1])
class lk_track_test(NewOpenCVTests):
track_len = 10
detect_interval = 5
tracks = []
frame_idx = 0
render = None
def test_lk_track(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png'))
self.runTracker()
def runTracker(self):
foregroundPointsNum = 0
while True:
frame = self.render.getNextFrame()
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if len(self.tracks) > 0:
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append([(x, y), self.frame_idx])
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
self.tracks = new_tracks
if self.frame_idx % self.detect_interval == 0:
goodTracksCount = 0
for tr in self.tracks:
oldRect = self.render.getRectInTime(self.render.timeStep * tr[0][1])
newRect = self.render.getRectInTime(self.render.timeStep * tr[-1][1])
if isPointInRect(tr[0][0], oldRect) and isPointInRect(tr[-1][0], newRect):
goodTracksCount += 1
if self.frame_idx == self.detect_interval:
foregroundPointsNum = goodTracksCount
fgIndex = float(foregroundPointsNum) / (foregroundPointsNum + 1)
fgRate = float(goodTracksCount) / (len(self.tracks) + 1)
if self.frame_idx > 0:
self.assertGreater(fgIndex, 0.9)
self.assertGreater(fgRate, 0.2)
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]:
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([[(x, y), self.frame_idx]])
self.frame_idx += 1
self.prev_gray = frame_gray
if self.frame_idx > 300:
break
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,19 @@
#!/usr/bin/env python
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests, unittest
class tracking_test(NewOpenCVTests):
def test_createTracker(self):
t = cv.TrackerMIL_create()
try:
t = cv.TrackerGOTURN_create()
except cv.error as e:
pass # may fail due to missing DL model files
if __name__ == '__main__':
NewOpenCVTests.bootstrap()