feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
|
||||
def basicPanoramaStitching(img1Path, img2Path):
|
||||
img1 = cv.imread(cv.samples.findFile(img1Path))
|
||||
img2 = cv.imread(cv.samples.findFile(img2Path))
|
||||
|
||||
# [camera-pose-from-Blender-at-location-1]
|
||||
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
|
||||
[ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
|
||||
[-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
|
||||
[0, 0, 0, 1]],dtype=np.float64)
|
||||
# [camera-pose-from-Blender-at-location-1]
|
||||
|
||||
# [camera-pose-from-Blender-at-location-2]
|
||||
c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112],
|
||||
[-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
|
||||
[0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
|
||||
[0, 0, 0, 1]],dtype=np.float64)
|
||||
# [camera-pose-from-Blender-at-location-2]
|
||||
|
||||
# [camera-intrinsics-from-Blender]
|
||||
cameraMatrix = np.array([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0, 0, 1]], dtype=np.float32)
|
||||
# [camera-intrinsics-from-Blender]
|
||||
|
||||
# [extract-rotation]
|
||||
R1 = c1Mo[0:3, 0:3]
|
||||
R2 = c2Mo[0:3, 0:3]
|
||||
#[extract-rotation]
|
||||
|
||||
# [compute-rotation-displacement]
|
||||
R2 = R2.transpose()
|
||||
R_2to1 = np.dot(R1,R2)
|
||||
# [compute-rotation-displacement]
|
||||
|
||||
# [compute-homography]
|
||||
H = cameraMatrix.dot(R_2to1).dot(np.linalg.inv(cameraMatrix))
|
||||
H = H / H[2][2]
|
||||
# [compute-homography]
|
||||
|
||||
# [stitch]
|
||||
img_stitch = cv.warpPerspective(img2, H, (img2.shape[1]*2, img2.shape[0]))
|
||||
img_stitch[0:img1.shape[0], 0:img1.shape[1]] = img1
|
||||
# [stitch]
|
||||
|
||||
img_space = np.zeros((img1.shape[0],50,3), dtype=np.uint8)
|
||||
img_compare = cv.hconcat([img1,img_space, img2])
|
||||
|
||||
cv.imshow("Final", img_compare)
|
||||
cv.imshow("Panorama", img_stitch)
|
||||
cv.waitKey(0)
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Code for homography tutorial. Example 5: basic panorama stitching from a rotating camera.")
|
||||
parser.add_argument("-I1","--image1", help = "path to first image", default="Blender_Suzanne1.jpg")
|
||||
parser.add_argument("-I2","--image2", help = "path to second image", default="Blender_Suzanne2.jpg")
|
||||
args = parser.parse_args()
|
||||
print("Panorama Stitching Started")
|
||||
basicPanoramaStitching(args.image1, args.image2)
|
||||
print("Panorama Stitching Completed Successfully")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Python 2/3 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import sys
|
||||
|
||||
|
||||
def randomColor():
|
||||
color = np.random.randint(0, 255,(1, 3))
|
||||
return color[0].tolist()
|
||||
|
||||
def perspectiveCorrection(img1Path, img2Path ,patternSize ):
|
||||
img1 = cv.imread(cv.samples.findFile(img1Path))
|
||||
img2 = cv.imread(cv.samples.findFile(img2Path))
|
||||
|
||||
# [find-corners]
|
||||
ret1, corners1 = cv.findChessboardCorners(img1, patternSize)
|
||||
ret2, corners2 = cv.findChessboardCorners(img2, patternSize)
|
||||
# [find-corners]
|
||||
|
||||
if not ret1 or not ret2:
|
||||
print("Error, cannot find the chessboard corners in both images.")
|
||||
sys.exit(-1)
|
||||
|
||||
# [estimate-homography]
|
||||
H, _ = cv.findHomography(corners1, corners2)
|
||||
print(H)
|
||||
# [estimate-homography]
|
||||
|
||||
# [warp-chessboard]
|
||||
img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0]))
|
||||
# [warp-chessboard]
|
||||
|
||||
img_draw_warp = cv.hconcat([img2, img1_warp])
|
||||
cv.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp )
|
||||
|
||||
corners1 = corners1.tolist()
|
||||
corners1 = [a[0] for a in corners1]
|
||||
|
||||
# [compute-transformed-corners]
|
||||
img_draw_matches = cv.hconcat([img1, img2])
|
||||
for i in range(len(corners1)):
|
||||
pt1 = np.array([corners1[i][0], corners1[i][1], 1])
|
||||
pt1 = pt1.reshape(3, 1)
|
||||
pt2 = np.dot(H, pt1)
|
||||
pt2 = pt2/pt2[2]
|
||||
end = (int(img1.shape[1] + pt2[0]), int(pt2[1]))
|
||||
cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2)
|
||||
|
||||
cv.imshow("Draw matches", img_draw_matches)
|
||||
cv.waitKey(0)
|
||||
# [compute-transformed-corners]
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-I1', "--image1", help="Path to the first image", default="left02.jpg")
|
||||
parser.add_argument('-I2', "--image2", help="Path to the second image", default="left01.jpg")
|
||||
parser.add_argument('-H', "--height", help="Height of pattern size", default=6)
|
||||
parser.add_argument('-W', "--width", help="Width of pattern size", default=9)
|
||||
args = parser.parse_args()
|
||||
|
||||
img1Path = args.image1
|
||||
img2Path = args.image2
|
||||
h = args.height
|
||||
w = args.width
|
||||
perspectiveCorrection(img1Path, img2Path, (w, h))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
81
3rdparty/opencv-4.5.4/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py
vendored
Normal file
81
3rdparty/opencv-4.5.4/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
from math import sqrt
|
||||
|
||||
## [load]
|
||||
parser = argparse.ArgumentParser(description='Code for AKAZE local features matching tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png')
|
||||
parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml')
|
||||
args = parser.parse_args()
|
||||
|
||||
img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
|
||||
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
|
||||
if img1 is None or img2 is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
fs = cv.FileStorage(cv.samples.findFile(args.homography), cv.FILE_STORAGE_READ)
|
||||
homography = fs.getFirstTopLevelNode().mat()
|
||||
## [load]
|
||||
|
||||
## [AKAZE]
|
||||
akaze = cv.AKAZE_create()
|
||||
kpts1, desc1 = akaze.detectAndCompute(img1, None)
|
||||
kpts2, desc2 = akaze.detectAndCompute(img2, None)
|
||||
## [AKAZE]
|
||||
|
||||
## [2-nn matching]
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)
|
||||
nn_matches = matcher.knnMatch(desc1, desc2, 2)
|
||||
## [2-nn matching]
|
||||
|
||||
## [ratio test filtering]
|
||||
matched1 = []
|
||||
matched2 = []
|
||||
nn_match_ratio = 0.8 # Nearest neighbor matching ratio
|
||||
for m, n in nn_matches:
|
||||
if m.distance < nn_match_ratio * n.distance:
|
||||
matched1.append(kpts1[m.queryIdx])
|
||||
matched2.append(kpts2[m.trainIdx])
|
||||
## [ratio test filtering]
|
||||
|
||||
## [homography check]
|
||||
inliers1 = []
|
||||
inliers2 = []
|
||||
good_matches = []
|
||||
inlier_threshold = 2.5 # Distance threshold to identify inliers with homography check
|
||||
for i, m in enumerate(matched1):
|
||||
col = np.ones((3,1), dtype=np.float64)
|
||||
col[0:2,0] = m.pt
|
||||
|
||||
col = np.dot(homography, col)
|
||||
col /= col[2,0]
|
||||
dist = sqrt(pow(col[0,0] - matched2[i].pt[0], 2) +\
|
||||
pow(col[1,0] - matched2[i].pt[1], 2))
|
||||
|
||||
if dist < inlier_threshold:
|
||||
good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0))
|
||||
inliers1.append(matched1[i])
|
||||
inliers2.append(matched2[i])
|
||||
## [homography check]
|
||||
|
||||
## [draw final matches]
|
||||
res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res)
|
||||
cv.imwrite("akaze_result.png", res)
|
||||
|
||||
inlier_ratio = len(inliers1) / float(len(matched1))
|
||||
print('A-KAZE Matching Results')
|
||||
print('*******************************')
|
||||
print('# Keypoints 1: \t', len(kpts1))
|
||||
print('# Keypoints 2: \t', len(kpts2))
|
||||
print('# Matches: \t', len(matched1))
|
||||
print('# Inliers: \t', len(inliers1))
|
||||
print('# Inliers Ratio: \t', inlier_ratio)
|
||||
|
||||
cv.imshow('result', res)
|
||||
cv.waitKey()
|
||||
## [draw final matches]
|
@ -0,0 +1,35 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
|
||||
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
|
||||
if img1 is None or img2 is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
|
||||
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a brute force matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE)
|
||||
matches = matcher.match(descriptors1, descriptors2)
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Matches', img_matches)
|
||||
|
||||
cv.waitKey()
|
@ -0,0 +1,27 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
|
||||
parser.add_argument('--input', help='Path to input image.', default='box.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
src = cv.imread(cv.samples.findFile(args.input), cv.IMREAD_GRAYSCALE)
|
||||
if src is None:
|
||||
print('Could not open or find the image:', args.input)
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints = detector.detect(src)
|
||||
|
||||
#-- Draw keypoints
|
||||
img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawKeypoints(src, keypoints, img_keypoints)
|
||||
|
||||
#-- Show detected (drawn) keypoints
|
||||
cv.imshow('SURF Keypoints', img_keypoints)
|
||||
|
||||
cv.waitKey()
|
@ -0,0 +1,42 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img1 = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
|
||||
img2 = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
|
||||
if img1 is None or img2 is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
|
||||
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
|
||||
knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)
|
||||
|
||||
#-- Filter matches using the Lowe's ratio test
|
||||
ratio_thresh = 0.7
|
||||
good_matches = []
|
||||
for m,n in knn_matches:
|
||||
if m.distance < ratio_thresh * n.distance:
|
||||
good_matches.append(m)
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Good Matches', img_matches)
|
||||
|
||||
cv.waitKey()
|
@ -0,0 +1,77 @@
|
||||
from __future__ import print_function
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
|
||||
parser.add_argument('--input1', help='Path to input image 1.', default='box.png')
|
||||
parser.add_argument('--input2', help='Path to input image 2.', default='box_in_scene.png')
|
||||
args = parser.parse_args()
|
||||
|
||||
img_object = cv.imread(cv.samples.findFile(args.input1), cv.IMREAD_GRAYSCALE)
|
||||
img_scene = cv.imread(cv.samples.findFile(args.input2), cv.IMREAD_GRAYSCALE)
|
||||
if img_object is None or img_scene is None:
|
||||
print('Could not open or find the images!')
|
||||
exit(0)
|
||||
|
||||
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
|
||||
minHessian = 400
|
||||
detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
|
||||
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
|
||||
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
|
||||
|
||||
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
|
||||
# Since SURF is a floating-point descriptor NORM_L2 is used
|
||||
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
|
||||
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
|
||||
|
||||
#-- Filter matches using the Lowe's ratio test
|
||||
ratio_thresh = 0.75
|
||||
good_matches = []
|
||||
for m,n in knn_matches:
|
||||
if m.distance < ratio_thresh * n.distance:
|
||||
good_matches.append(m)
|
||||
|
||||
#-- Draw matches
|
||||
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
|
||||
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
||||
|
||||
#-- Localize the object
|
||||
obj = np.empty((len(good_matches),2), dtype=np.float32)
|
||||
scene = np.empty((len(good_matches),2), dtype=np.float32)
|
||||
for i in range(len(good_matches)):
|
||||
#-- Get the keypoints from the good matches
|
||||
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
|
||||
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
|
||||
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
|
||||
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
|
||||
|
||||
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
|
||||
|
||||
#-- Get the corners from the image_1 ( the object to be "detected" )
|
||||
obj_corners = np.empty((4,1,2), dtype=np.float32)
|
||||
obj_corners[0,0,0] = 0
|
||||
obj_corners[0,0,1] = 0
|
||||
obj_corners[1,0,0] = img_object.shape[1]
|
||||
obj_corners[1,0,1] = 0
|
||||
obj_corners[2,0,0] = img_object.shape[1]
|
||||
obj_corners[2,0,1] = img_object.shape[0]
|
||||
obj_corners[3,0,0] = 0
|
||||
obj_corners[3,0,1] = img_object.shape[0]
|
||||
|
||||
scene_corners = cv.perspectiveTransform(obj_corners, H)
|
||||
|
||||
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
|
||||
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
|
||||
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
|
||||
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
|
||||
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
|
||||
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
|
||||
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
|
||||
|
||||
#-- Show detected matches
|
||||
cv.imshow('Good Matches & Object detection', img_matches)
|
||||
|
||||
cv.waitKey()
|
Reference in New Issue
Block a user