feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,92 @@
#!/usr/bin/env python
'''
face detection using haar cascades
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.275, minNeighbors=4, minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png']
faces = []
eyes = []
testFaces = [
#lena
[[218, 200, 389, 371],
[ 244, 240, 294, 290],
[ 309, 246, 352, 289]],
#lisa
[[167, 119, 307, 259],
[188, 153, 229, 194],
[236, 153, 277, 194]]
]
for sample in samples:
img = self.get_sample( sample)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (5, 5), 0)
rects = detect(gray, cascade)
faces.append(rects)
if not nested.empty():
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
subrects = detect(roi.copy(), nested)
for rect in subrects:
rect[0] += x1
rect[2] += x1
rect[1] += y1
rect[3] += y1
eyes.append(subrects)
faces_matches = 0
eyes_matches = 0
eps = 0.8
for i in range(len(faces)):
for j in range(len(testFaces)):
if intersectionRate(faces[i][0], testFaces[j][0]) > eps:
faces_matches += 1
#check eyes
if len(eyes[i]) == 2:
if intersectionRate(eyes[i][0], testFaces[j][1]) > eps and intersectionRate(eyes[i][1] , testFaces[j][2]) > eps:
eyes_matches += 1
elif intersectionRate(eyes[i][1], testFaces[j][1]) > eps and intersectionRate(eyes[i][0], testFaces[j][2]) > eps:
eyes_matches += 1
self.assertEqual(faces_matches, 2)
self.assertEqual(eyes_matches, 2)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
'''
example to detect upright people in images using HOG features
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
from tests_common import NewOpenCVTests, intersectionRate
class peopledetect_test(NewOpenCVTests):
def test_peopledetect(self):
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
dirPath = 'samples/data/'
samples = ['basketball1.png', 'basketball2.png']
testPeople = [
[[23, 76, 164, 477], [440, 22, 637, 478]],
[[23, 76, 164, 477], [440, 22, 637, 478]]
]
eps = 0.5
for sample in samples:
img = self.get_sample(dirPath + sample, 0)
found, _w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
matches = 0
for i in range(len(found_filtered)):
for j in range(len(testPeople)):
found_rect = (found_filtered[i][0], found_filtered[i][1],
found_filtered[i][0] + found_filtered[i][2],
found_filtered[i][1] + found_filtered[i][3])
if intersectionRate(found_rect, testPeople[j][0]) > eps or intersectionRate(found_rect, testPeople[j][1]) > eps:
matches += 1
self.assertGreater(matches, 0)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,52 @@
#!/usr/bin/env python
'''
===============================================================================
QR code detect and decode pipeline.
===============================================================================
'''
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class qrcode_detector_test(NewOpenCVTests):
def test_detect(self):
img = cv.imread(os.path.join(self.extraTestDataPath, 'cv/qrcode/link_ocv.jpg'))
self.assertFalse(img is None)
detector = cv.QRCodeDetector()
retval, points = detector.detect(img)
self.assertTrue(retval)
self.assertEqual(points.shape, (1, 4, 2))
def test_detect_and_decode(self):
img = cv.imread(os.path.join(self.extraTestDataPath, 'cv/qrcode/link_ocv.jpg'))
self.assertFalse(img is None)
detector = cv.QRCodeDetector()
retval, points, straight_qrcode = detector.detectAndDecode(img)
self.assertEqual(retval, "https://opencv.org/")
self.assertEqual(points.shape, (1, 4, 2))
def test_detect_multi(self):
img = cv.imread(os.path.join(self.extraTestDataPath, 'cv/qrcode/multiple/6_qrcodes.png'))
self.assertFalse(img is None)
detector = cv.QRCodeDetector()
retval, points = detector.detectMulti(img)
self.assertTrue(retval)
self.assertEqual(points.shape, (6, 4, 2))
def test_detect_and_decode_multi(self):
img = cv.imread(os.path.join(self.extraTestDataPath, 'cv/qrcode/multiple/6_qrcodes.png'))
self.assertFalse(img is None)
detector = cv.QRCodeDetector()
retval, decoded_data, points, straight_qrcode = detector.detectAndDecodeMulti(img)
self.assertTrue(retval)
self.assertEqual(len(decoded_data), 6)
self.assertEqual(decoded_data[0], "TWO STEPS FORWARD")
self.assertEqual(decoded_data[1], "EXTRA")
self.assertEqual(decoded_data[2], "SKIP")
self.assertEqual(decoded_data[3], "STEP FORWARD")
self.assertEqual(decoded_data[4], "STEP BACK")
self.assertEqual(decoded_data[5], "QUESTION")
self.assertEqual(points.shape, (6, 4, 2))