feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
219
3rdparty/opencv-4.5.4/modules/dnn/misc/python/pyopencv_dnn.hpp
vendored
Normal file
219
3rdparty/opencv-4.5.4/modules/dnn/misc/python/pyopencv_dnn.hpp
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
#ifdef HAVE_OPENCV_DNN
|
||||
typedef dnn::DictValue LayerId;
|
||||
typedef std::vector<dnn::MatShape> vector_MatShape;
|
||||
typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape;
|
||||
|
||||
template<>
|
||||
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const ArgInfo& info)
|
||||
{
|
||||
CV_UNUSED(info);
|
||||
if (!o || o == Py_None)
|
||||
return true; //Current state will be used
|
||||
else if (PyLong_Check(o))
|
||||
{
|
||||
dv = dnn::DictValue((int64)PyLong_AsLongLong(o));
|
||||
return true;
|
||||
}
|
||||
else if (PyInt_Check(o))
|
||||
{
|
||||
dv = dnn::DictValue((int64)PyInt_AS_LONG(o));
|
||||
return true;
|
||||
}
|
||||
else if (PyFloat_Check(o))
|
||||
{
|
||||
dv = dnn::DictValue(PyFloat_AsDouble(o));
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string str;
|
||||
if (getUnicodeString(o, str))
|
||||
{
|
||||
dv = dnn::DictValue(str);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
PyObject* pyopencv_from(const dnn::DictValue &dv)
|
||||
{
|
||||
if (dv.size() > 1)
|
||||
{
|
||||
std::vector<T> vec(dv.size());
|
||||
for (int i = 0; i < dv.size(); ++i)
|
||||
vec[i] = dv.get<T>(i);
|
||||
return pyopencv_from_generic_vec(vec);
|
||||
}
|
||||
else
|
||||
return pyopencv_from(dv.get<T>());
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const dnn::DictValue &dv)
|
||||
{
|
||||
if (dv.isInt()) return pyopencv_from<int>(dv);
|
||||
if (dv.isReal()) return pyopencv_from<float>(dv);
|
||||
if (dv.isString()) return pyopencv_from<String>(dv);
|
||||
CV_Error(Error::StsNotImplemented, "Unknown value type");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const dnn::LayerParams& lp)
|
||||
{
|
||||
PyObject* dict = PyDict_New();
|
||||
for (std::map<String, dnn::DictValue>::const_iterator it = lp.begin(); it != lp.end(); ++it)
|
||||
{
|
||||
CV_Assert(!PyDict_SetItemString(dict, it->first.c_str(), pyopencv_from(it->second)));
|
||||
}
|
||||
return dict;
|
||||
}
|
||||
|
||||
template<>
|
||||
PyObject* pyopencv_from(const std::vector<dnn::Target> &t)
|
||||
{
|
||||
return pyopencv_from(std::vector<int>(t.begin(), t.end()));
|
||||
}
|
||||
|
||||
class pycvLayer CV_FINAL : public dnn::Layer
|
||||
{
|
||||
public:
|
||||
pycvLayer(const dnn::LayerParams ¶ms, PyObject* pyLayer) : Layer(params)
|
||||
{
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
PyObject* args = PyTuple_New(2);
|
||||
CV_Assert(!PyTuple_SetItem(args, 0, pyopencv_from(params)));
|
||||
CV_Assert(!PyTuple_SetItem(args, 1, pyopencv_from(params.blobs)));
|
||||
o = PyObject_CallObject(pyLayer, args);
|
||||
|
||||
Py_DECREF(args);
|
||||
PyGILState_Release(gstate);
|
||||
if (!o)
|
||||
CV_Error(Error::StsError, "Failed to create an instance of custom layer");
|
||||
}
|
||||
|
||||
static void registerLayer(const std::string& type, PyObject* o)
|
||||
{
|
||||
std::map<std::string, std::vector<PyObject*> >::iterator it = pyLayers.find(type);
|
||||
if (it != pyLayers.end())
|
||||
it->second.push_back(o);
|
||||
else
|
||||
pyLayers[type] = std::vector<PyObject*>(1, o);
|
||||
}
|
||||
|
||||
static void unregisterLayer(const std::string& type)
|
||||
{
|
||||
std::map<std::string, std::vector<PyObject*> >::iterator it = pyLayers.find(type);
|
||||
if (it != pyLayers.end())
|
||||
{
|
||||
if (it->second.size() > 1)
|
||||
it->second.pop_back();
|
||||
else
|
||||
pyLayers.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
static Ptr<dnn::Layer> create(dnn::LayerParams ¶ms)
|
||||
{
|
||||
std::map<std::string, std::vector<PyObject*> >::iterator it = pyLayers.find(params.type);
|
||||
if (it == pyLayers.end())
|
||||
CV_Error(Error::StsNotImplemented, "Layer with a type \"" + params.type +
|
||||
"\" is not implemented");
|
||||
CV_Assert(!it->second.empty());
|
||||
return Ptr<dnn::Layer>(new pycvLayer(params, it->second.back()));
|
||||
}
|
||||
|
||||
virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
|
||||
const int,
|
||||
std::vector<std::vector<int> > &outputs,
|
||||
std::vector<std::vector<int> > &) const CV_OVERRIDE
|
||||
{
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
PyObject* args = PyList_New(inputs.size());
|
||||
for(size_t i = 0; i < inputs.size(); ++i)
|
||||
PyList_SetItem(args, i, pyopencv_from_generic_vec(inputs[i]));
|
||||
|
||||
PyObject* res = PyObject_CallMethodObjArgs(o, PyString_FromString("getMemoryShapes"), args, NULL);
|
||||
Py_DECREF(args);
|
||||
PyGILState_Release(gstate);
|
||||
if (!res)
|
||||
CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method");
|
||||
CV_Assert(pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)));
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
|
||||
{
|
||||
PyGILState_STATE gstate;
|
||||
gstate = PyGILState_Ensure();
|
||||
|
||||
std::vector<Mat> inputs, outputs;
|
||||
inputs_arr.getMatVector(inputs);
|
||||
outputs_arr.getMatVector(outputs);
|
||||
|
||||
PyObject* args = pyopencv_from(inputs);
|
||||
PyObject* res = PyObject_CallMethodObjArgs(o, PyString_FromString("forward"), args, NULL);
|
||||
Py_DECREF(args);
|
||||
if (!res)
|
||||
CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method");
|
||||
|
||||
std::vector<Mat> pyOutputs;
|
||||
CV_Assert(pyopencv_to(res, pyOutputs, ArgInfo("", 0)));
|
||||
Py_DECREF(res);
|
||||
PyGILState_Release(gstate);
|
||||
|
||||
CV_Assert(pyOutputs.size() == outputs.size());
|
||||
for (size_t i = 0; i < outputs.size(); ++i)
|
||||
{
|
||||
CV_Assert(pyOutputs[i].size == outputs[i].size);
|
||||
CV_Assert(pyOutputs[i].type() == outputs[i].type());
|
||||
pyOutputs[i].copyTo(outputs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// Map layers types to python classes.
|
||||
static std::map<std::string, std::vector<PyObject*> > pyLayers;
|
||||
PyObject* o; // Instance of implemented python layer.
|
||||
};
|
||||
|
||||
std::map<std::string, std::vector<PyObject*> > pycvLayer::pyLayers;
|
||||
|
||||
static PyObject *pyopencv_cv_dnn_registerLayer(PyObject*, PyObject *args, PyObject *kw)
|
||||
{
|
||||
const char *keywords[] = { "type", "class", NULL };
|
||||
char* layerType;
|
||||
PyObject *classInstance;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kw, "sO", (char**)keywords, &layerType, &classInstance))
|
||||
return NULL;
|
||||
if (!PyCallable_Check(classInstance)) {
|
||||
PyErr_SetString(PyExc_TypeError, "class must be callable");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pycvLayer::registerLayer(layerType, classInstance);
|
||||
dnn::LayerFactory::registerLayer(layerType, pycvLayer::create);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *pyopencv_cv_dnn_unregisterLayer(PyObject*, PyObject *args, PyObject *kw)
|
||||
{
|
||||
const char *keywords[] = { "type", NULL };
|
||||
char* layerType;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kw, "s", (char**)keywords, &layerType))
|
||||
return NULL;
|
||||
|
||||
pycvLayer::unregisterLayer(layerType);
|
||||
dnn::LayerFactory::unregisterLayer(layerType);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
#endif // HAVE_OPENCV_DNN
|
415
3rdparty/opencv-4.5.4/modules/dnn/misc/python/test/test_dnn.py
vendored
Normal file
415
3rdparty/opencv-4.5.4/modules/dnn/misc/python/test/test_dnn.py
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
|
||||
from tests_common import NewOpenCVTests, unittest
|
||||
|
||||
def normAssert(test, a, b, msg=None, lInf=1e-5):
|
||||
test.assertLess(np.max(np.abs(a - b)), lInf, msg)
|
||||
|
||||
def inter_area(box1, box2):
|
||||
x_min, x_max = max(box1[0], box2[0]), min(box1[2], box2[2])
|
||||
y_min, y_max = max(box1[1], box2[1]), min(box1[3], box2[3])
|
||||
return (x_max - x_min) * (y_max - y_min)
|
||||
|
||||
def area(box):
|
||||
return (box[2] - box[0]) * (box[3] - box[1])
|
||||
|
||||
def box2str(box):
|
||||
left, top = box[0], box[1]
|
||||
width, height = box[2] - left, box[3] - top
|
||||
return '[%f x %f from (%f, %f)]' % (width, height, left, top)
|
||||
|
||||
def normAssertDetections(test, refClassIds, refScores, refBoxes, testClassIds, testScores, testBoxes,
|
||||
confThreshold=0.0, scores_diff=1e-5, boxes_iou_diff=1e-4):
|
||||
matchedRefBoxes = [False] * len(refBoxes)
|
||||
errMsg = ''
|
||||
for i in range(len(testBoxes)):
|
||||
testScore = testScores[i]
|
||||
if testScore < confThreshold:
|
||||
continue
|
||||
|
||||
testClassId, testBox = testClassIds[i], testBoxes[i]
|
||||
matched = False
|
||||
for j in range(len(refBoxes)):
|
||||
if (not matchedRefBoxes[j]) and testClassId == refClassIds[j] and \
|
||||
abs(testScore - refScores[j]) < scores_diff:
|
||||
interArea = inter_area(testBox, refBoxes[j])
|
||||
iou = interArea / (area(testBox) + area(refBoxes[j]) - interArea)
|
||||
if abs(iou - 1.0) < boxes_iou_diff:
|
||||
matched = True
|
||||
matchedRefBoxes[j] = True
|
||||
if not matched:
|
||||
errMsg += '\nUnmatched prediction: class %d score %f box %s' % (testClassId, testScore, box2str(testBox))
|
||||
|
||||
for i in range(len(refBoxes)):
|
||||
if (not matchedRefBoxes[i]) and refScores[i] > confThreshold:
|
||||
errMsg += '\nUnmatched reference: class %d score %f box %s' % (refClassIds[i], refScores[i], box2str(refBoxes[i]))
|
||||
if errMsg:
|
||||
test.fail(errMsg)
|
||||
|
||||
def printParams(backend, target):
|
||||
backendNames = {
|
||||
cv.dnn.DNN_BACKEND_OPENCV: 'OCV',
|
||||
cv.dnn.DNN_BACKEND_INFERENCE_ENGINE: 'DLIE'
|
||||
}
|
||||
targetNames = {
|
||||
cv.dnn.DNN_TARGET_CPU: 'CPU',
|
||||
cv.dnn.DNN_TARGET_OPENCL: 'OCL',
|
||||
cv.dnn.DNN_TARGET_OPENCL_FP16: 'OCL_FP16',
|
||||
cv.dnn.DNN_TARGET_MYRIAD: 'MYRIAD'
|
||||
}
|
||||
print('%s/%s' % (backendNames[backend], targetNames[target]))
|
||||
|
||||
def getDefaultThreshold(target):
|
||||
if target == cv.dnn.DNN_TARGET_OPENCL_FP16 or target == cv.dnn.DNN_TARGET_MYRIAD:
|
||||
return 4e-3
|
||||
else:
|
||||
return 1e-5
|
||||
|
||||
testdata_required = bool(os.environ.get('OPENCV_DNN_TEST_REQUIRE_TESTDATA', False))
|
||||
|
||||
g_dnnBackendsAndTargets = None
|
||||
|
||||
class dnn_test(NewOpenCVTests):
|
||||
|
||||
def setUp(self):
|
||||
super(dnn_test, self).setUp()
|
||||
|
||||
global g_dnnBackendsAndTargets
|
||||
if g_dnnBackendsAndTargets is None:
|
||||
g_dnnBackendsAndTargets = self.initBackendsAndTargets()
|
||||
self.dnnBackendsAndTargets = g_dnnBackendsAndTargets
|
||||
|
||||
def initBackendsAndTargets(self):
|
||||
self.dnnBackendsAndTargets = [
|
||||
[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
|
||||
]
|
||||
|
||||
if self.checkIETarget(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_CPU):
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_CPU])
|
||||
if self.checkIETarget(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_MYRIAD):
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_MYRIAD])
|
||||
|
||||
if cv.ocl.haveOpenCL() and cv.ocl.useOpenCL():
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_OPENCL])
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_OPENCL_FP16])
|
||||
if cv.ocl_Device.getDefault().isIntel():
|
||||
if self.checkIETarget(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_OPENCL):
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_OPENCL])
|
||||
if self.checkIETarget(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_OPENCL_FP16):
|
||||
self.dnnBackendsAndTargets.append([cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_TARGET_OPENCL_FP16])
|
||||
return self.dnnBackendsAndTargets
|
||||
|
||||
def find_dnn_file(self, filename, required=True):
|
||||
if not required:
|
||||
required = testdata_required
|
||||
return self.find_file(filename, [os.environ.get('OPENCV_DNN_TEST_DATA_PATH', os.getcwd()),
|
||||
os.environ['OPENCV_TEST_DATA_PATH']],
|
||||
required=required)
|
||||
|
||||
def checkIETarget(self, backend, target):
|
||||
proto = self.find_dnn_file('dnn/layers/layer_convolution.prototxt')
|
||||
model = self.find_dnn_file('dnn/layers/layer_convolution.caffemodel')
|
||||
net = cv.dnn.readNet(proto, model)
|
||||
net.setPreferableBackend(backend)
|
||||
net.setPreferableTarget(target)
|
||||
inp = np.random.standard_normal([1, 2, 10, 11]).astype(np.float32)
|
||||
try:
|
||||
net.setInput(inp)
|
||||
net.forward()
|
||||
except BaseException as e:
|
||||
return False
|
||||
return True
|
||||
|
||||
def test_getAvailableTargets(self):
|
||||
targets = cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_OPENCV)
|
||||
self.assertTrue(cv.dnn.DNN_TARGET_CPU in targets)
|
||||
|
||||
def test_blobFromImage(self):
|
||||
np.random.seed(324)
|
||||
|
||||
width = 6
|
||||
height = 7
|
||||
scale = 1.0/127.5
|
||||
mean = (10, 20, 30)
|
||||
|
||||
# Test arguments names.
|
||||
img = np.random.randint(0, 255, [4, 5, 3]).astype(np.uint8)
|
||||
blob = cv.dnn.blobFromImage(img, scale, (width, height), mean, True, False)
|
||||
blob_args = cv.dnn.blobFromImage(img, scalefactor=scale, size=(width, height),
|
||||
mean=mean, swapRB=True, crop=False)
|
||||
normAssert(self, blob, blob_args)
|
||||
|
||||
# Test values.
|
||||
target = cv.resize(img, (width, height), interpolation=cv.INTER_LINEAR)
|
||||
target = target.astype(np.float32)
|
||||
target = target[:,:,[2, 1, 0]] # BGR2RGB
|
||||
target[:,:,0] -= mean[0]
|
||||
target[:,:,1] -= mean[1]
|
||||
target[:,:,2] -= mean[2]
|
||||
target *= scale
|
||||
target = target.transpose(2, 0, 1).reshape(1, 3, height, width) # to NCHW
|
||||
normAssert(self, blob, target)
|
||||
|
||||
|
||||
def test_model(self):
|
||||
img_path = self.find_dnn_file("dnn/street.png")
|
||||
weights = self.find_dnn_file("dnn/MobileNetSSD_deploy.caffemodel", required=False)
|
||||
config = self.find_dnn_file("dnn/MobileNetSSD_deploy.prototxt", required=False)
|
||||
if weights is None or config is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/MobileNetSSD_deploy.{prototxt/caffemodel}). Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
frame = cv.imread(img_path)
|
||||
model = cv.dnn_DetectionModel(weights, config)
|
||||
model.setInputParams(size=(300, 300), mean=(127.5, 127.5, 127.5), scale=1.0/127.5)
|
||||
|
||||
iouDiff = 0.05
|
||||
confThreshold = 0.0001
|
||||
nmsThreshold = 0
|
||||
scoreDiff = 1e-3
|
||||
|
||||
classIds, confidences, boxes = model.detect(frame, confThreshold, nmsThreshold)
|
||||
|
||||
refClassIds = (7, 15)
|
||||
refConfidences = (0.9998, 0.8793)
|
||||
refBoxes = ((328, 238, 85, 102), (101, 188, 34, 138))
|
||||
|
||||
normAssertDetections(self, refClassIds, refConfidences, refBoxes,
|
||||
classIds, confidences, boxes,confThreshold, scoreDiff, iouDiff)
|
||||
|
||||
for box in boxes:
|
||||
cv.rectangle(frame, box, (0, 255, 0))
|
||||
cv.rectangle(frame, np.array(box), (0, 255, 0))
|
||||
cv.rectangle(frame, tuple(box), (0, 255, 0))
|
||||
cv.rectangle(frame, list(box), (0, 255, 0))
|
||||
|
||||
|
||||
def test_classification_model(self):
|
||||
img_path = self.find_dnn_file("dnn/googlenet_0.png")
|
||||
weights = self.find_dnn_file("dnn/squeezenet_v1.1.caffemodel", required=False)
|
||||
config = self.find_dnn_file("dnn/squeezenet_v1.1.prototxt")
|
||||
ref = np.load(self.find_dnn_file("dnn/squeezenet_v1.1_prob.npy"))
|
||||
if weights is None or config is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/squeezenet_v1.1.{prototxt/caffemodel}). Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
frame = cv.imread(img_path)
|
||||
model = cv.dnn_ClassificationModel(config, weights)
|
||||
model.setInputSize(227, 227)
|
||||
model.setInputCrop(True)
|
||||
|
||||
out = model.predict(frame)
|
||||
normAssert(self, out, ref)
|
||||
|
||||
|
||||
def test_textdetection_model(self):
|
||||
img_path = self.find_dnn_file("dnn/text_det_test1.png")
|
||||
weights = self.find_dnn_file("dnn/onnx/models/DB_TD500_resnet50.onnx", required=False)
|
||||
if weights is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (onnx/models/DB_TD500_resnet50.onnx). Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
frame = cv.imread(img_path)
|
||||
scale = 1.0 / 255.0
|
||||
size = (736, 736)
|
||||
mean = (122.67891434, 116.66876762, 104.00698793)
|
||||
|
||||
model = cv.dnn_TextDetectionModel_DB(weights)
|
||||
model.setInputParams(scale, size, mean)
|
||||
out, _ = model.detect(frame)
|
||||
|
||||
self.assertTrue(type(out) == tuple, msg='actual type {}'.format(str(type(out))))
|
||||
self.assertTrue(np.array(out).shape == (2, 4, 2))
|
||||
|
||||
|
||||
def test_face_detection(self):
|
||||
proto = self.find_dnn_file('dnn/opencv_face_detector.prototxt')
|
||||
model = self.find_dnn_file('dnn/opencv_face_detector.caffemodel', required=False)
|
||||
if proto is None or model is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/opencv_face_detector.{prototxt/caffemodel}). Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
img = self.get_sample('gpu/lbpcascade/er.png')
|
||||
blob = cv.dnn.blobFromImage(img, mean=(104, 177, 123), swapRB=False, crop=False)
|
||||
|
||||
ref = [[0, 1, 0.99520785, 0.80997437, 0.16379407, 0.87996572, 0.26685631],
|
||||
[0, 1, 0.9934696, 0.2831718, 0.50738752, 0.345781, 0.5985168],
|
||||
[0, 1, 0.99096733, 0.13629119, 0.24892329, 0.19756334, 0.3310290],
|
||||
[0, 1, 0.98977017, 0.23901358, 0.09084064, 0.29902688, 0.1769477],
|
||||
[0, 1, 0.97203469, 0.67965847, 0.06876482, 0.73999709, 0.1513494],
|
||||
[0, 1, 0.95097077, 0.51901293, 0.45863652, 0.5777427, 0.5347801]]
|
||||
|
||||
print('\n')
|
||||
for backend, target in self.dnnBackendsAndTargets:
|
||||
printParams(backend, target)
|
||||
|
||||
net = cv.dnn.readNet(proto, model)
|
||||
net.setPreferableBackend(backend)
|
||||
net.setPreferableTarget(target)
|
||||
net.setInput(blob)
|
||||
out = net.forward().reshape(-1, 7)
|
||||
|
||||
scoresDiff = 4e-3 if target in [cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD] else 1e-5
|
||||
iouDiff = 2e-2 if target in [cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD] else 1e-4
|
||||
|
||||
ref = np.array(ref, np.float32)
|
||||
refClassIds, testClassIds = ref[:, 1], out[:, 1]
|
||||
refScores, testScores = ref[:, 2], out[:, 2]
|
||||
refBoxes, testBoxes = ref[:, 3:], out[:, 3:]
|
||||
|
||||
normAssertDetections(self, refClassIds, refScores, refBoxes, testClassIds,
|
||||
testScores, testBoxes, 0.5, scoresDiff, iouDiff)
|
||||
|
||||
def test_async(self):
|
||||
timeout = 10*1000*10**6 # in nanoseconds (10 sec)
|
||||
proto = self.find_dnn_file('dnn/layers/layer_convolution.prototxt')
|
||||
model = self.find_dnn_file('dnn/layers/layer_convolution.caffemodel')
|
||||
if proto is None or model is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/layers/layer_convolution.{prototxt/caffemodel}). Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
print('\n')
|
||||
for backend, target in self.dnnBackendsAndTargets:
|
||||
if backend != cv.dnn.DNN_BACKEND_INFERENCE_ENGINE:
|
||||
continue
|
||||
|
||||
printParams(backend, target)
|
||||
|
||||
netSync = cv.dnn.readNet(proto, model)
|
||||
netSync.setPreferableBackend(backend)
|
||||
netSync.setPreferableTarget(target)
|
||||
|
||||
netAsync = cv.dnn.readNet(proto, model)
|
||||
netAsync.setPreferableBackend(backend)
|
||||
netAsync.setPreferableTarget(target)
|
||||
|
||||
# Generate inputs
|
||||
numInputs = 10
|
||||
inputs = []
|
||||
for _ in range(numInputs):
|
||||
inputs.append(np.random.standard_normal([2, 6, 75, 113]).astype(np.float32))
|
||||
|
||||
# Run synchronously
|
||||
refs = []
|
||||
for i in range(numInputs):
|
||||
netSync.setInput(inputs[i])
|
||||
refs.append(netSync.forward())
|
||||
|
||||
# Run asynchronously. To make test more robust, process inputs in the reversed order.
|
||||
outs = []
|
||||
for i in reversed(range(numInputs)):
|
||||
netAsync.setInput(inputs[i])
|
||||
outs.insert(0, netAsync.forwardAsync())
|
||||
|
||||
for i in reversed(range(numInputs)):
|
||||
ret, result = outs[i].get(timeoutNs=float(timeout))
|
||||
self.assertTrue(ret)
|
||||
normAssert(self, refs[i], result, 'Index: %d' % i, 1e-10)
|
||||
|
||||
def test_nms(self):
|
||||
confs = (1, 1)
|
||||
rects = ((0, 0, 0.4, 0.4), (0, 0, 0.2, 0.4)) # 0.5 overlap
|
||||
|
||||
self.assertTrue(all(cv.dnn.NMSBoxes(rects, confs, 0, 0.6).ravel() == (0, 1)))
|
||||
|
||||
def test_custom_layer(self):
|
||||
class CropLayer(object):
|
||||
def __init__(self, params, blobs):
|
||||
self.xstart = 0
|
||||
self.xend = 0
|
||||
self.ystart = 0
|
||||
self.yend = 0
|
||||
# Our layer receives two inputs. We need to crop the first input blob
|
||||
# to match a shape of the second one (keeping batch size and number of channels)
|
||||
def getMemoryShapes(self, inputs):
|
||||
inputShape, targetShape = inputs[0], inputs[1]
|
||||
batchSize, numChannels = inputShape[0], inputShape[1]
|
||||
height, width = targetShape[2], targetShape[3]
|
||||
self.ystart = (inputShape[2] - targetShape[2]) // 2
|
||||
self.xstart = (inputShape[3] - targetShape[3]) // 2
|
||||
self.yend = self.ystart + height
|
||||
self.xend = self.xstart + width
|
||||
return [[batchSize, numChannels, height, width]]
|
||||
def forward(self, inputs):
|
||||
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
|
||||
|
||||
cv.dnn_registerLayer('CropCaffe', CropLayer)
|
||||
proto = '''
|
||||
name: "TestCrop"
|
||||
input: "input"
|
||||
input_shape
|
||||
{
|
||||
dim: 1
|
||||
dim: 2
|
||||
dim: 5
|
||||
dim: 5
|
||||
}
|
||||
input: "roi"
|
||||
input_shape
|
||||
{
|
||||
dim: 1
|
||||
dim: 2
|
||||
dim: 3
|
||||
dim: 3
|
||||
}
|
||||
layer {
|
||||
name: "Crop"
|
||||
type: "CropCaffe"
|
||||
bottom: "input"
|
||||
bottom: "roi"
|
||||
top: "Crop"
|
||||
}'''
|
||||
|
||||
net = cv.dnn.readNetFromCaffe(bytearray(proto.encode()))
|
||||
for backend, target in self.dnnBackendsAndTargets:
|
||||
if backend != cv.dnn.DNN_BACKEND_OPENCV:
|
||||
continue
|
||||
|
||||
printParams(backend, target)
|
||||
|
||||
net.setPreferableBackend(backend)
|
||||
net.setPreferableTarget(target)
|
||||
src_shape = [1, 2, 5, 5]
|
||||
dst_shape = [1, 2, 3, 3]
|
||||
inp = np.arange(0, np.prod(src_shape), dtype=np.float32).reshape(src_shape)
|
||||
roi = np.empty(dst_shape, dtype=np.float32)
|
||||
net.setInput(inp, "input")
|
||||
net.setInput(roi, "roi")
|
||||
out = net.forward()
|
||||
ref = inp[:, :, 1:4, 1:4]
|
||||
normAssert(self, out, ref)
|
||||
|
||||
cv.dnn_unregisterLayer('CropCaffe')
|
||||
|
||||
# check that dnn module can work with 3D tensor as input for network
|
||||
def test_input_3d(self):
|
||||
model = self.find_dnn_file('dnn/onnx/models/hidden_lstm.onnx')
|
||||
input_file = self.find_dnn_file('dnn/onnx/data/input_hidden_lstm.npy')
|
||||
output_file = self.find_dnn_file('dnn/onnx/data/output_hidden_lstm.npy')
|
||||
if model is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/onnx/models/hidden_lstm.onnx). "
|
||||
"Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
if input_file is None or output_file is None:
|
||||
raise unittest.SkipTest("Missing DNN test files (dnn/onnx/data/{input/output}_hidden_lstm.npy). "
|
||||
"Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
|
||||
|
||||
input = np.load(input_file)
|
||||
# we have to expand the shape of input tensor because Python bindings cut 3D tensors to 2D
|
||||
# it should be fixed in future. see : https://github.com/opencv/opencv/issues/19091
|
||||
# please remove `expand_dims` after that
|
||||
input = np.expand_dims(input, axis=3)
|
||||
gold_output = np.load(output_file)
|
||||
|
||||
for backend, target in self.dnnBackendsAndTargets:
|
||||
printParams(backend, target)
|
||||
|
||||
net = cv.dnn.readNet(model)
|
||||
|
||||
net.setPreferableBackend(backend)
|
||||
net.setPreferableTarget(target)
|
||||
|
||||
net.setInput(input)
|
||||
real_output = net.forward()
|
||||
|
||||
normAssert(self, real_output, gold_output, "", getDefaultThreshold(target))
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
Reference in New Issue
Block a user