feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,23 @@
from abc import ABC, ABCMeta, abstractmethod
class AbstractModel(ABC):
@abstractmethod
def get_prepared_models(self):
pass
class Framework(object):
in_blob_name = ''
out_blob_name = ''
__metaclass__ = ABCMeta
@abstractmethod
def get_name(self):
pass
@abstractmethod
def get_output(self, input_blob):
pass

View File

@ -0,0 +1,96 @@
import sys
import time
import numpy as np
from ...utils import get_final_summary_info
class ClsAccEvaluation:
log = sys.stdout
img_classes = {}
batch_size = 0
def __init__(self, log_path, img_classes_file, batch_size):
self.log = open(log_path, 'w')
self.img_classes = self.read_classes(img_classes_file)
self.batch_size = batch_size
# collect the accuracies for both models
self.general_quality_metric = []
self.general_inference_time = []
@staticmethod
def read_classes(img_classes_file):
result = {}
with open(img_classes_file) as file:
for l in file.readlines():
result[l.split()[0]] = int(l.split()[1])
return result
def get_correct_answers(self, img_list, net_output_blob):
correct_answers = 0
for i in range(len(img_list)):
indexes = np.argsort(net_output_blob[i])[-5:]
correct_index = self.img_classes[img_list[i]]
if correct_index in indexes:
correct_answers += 1
return correct_answers
def process(self, frameworks, data_fetcher):
sorted_imgs_names = sorted(self.img_classes.keys())
correct_answers = [0] * len(frameworks)
samples_handled = 0
blobs_l1_diff = [0] * len(frameworks)
blobs_l1_diff_count = [0] * len(frameworks)
blobs_l_inf_diff = [sys.float_info.min] * len(frameworks)
inference_time = [0.0] * len(frameworks)
for x in range(0, len(sorted_imgs_names), self.batch_size):
sublist = sorted_imgs_names[x:x + self.batch_size]
batch = data_fetcher.get_batch(sublist)
samples_handled += len(sublist)
fw_accuracy = []
fw_time = []
frameworks_out = []
for i in range(len(frameworks)):
start = time.time()
out = frameworks[i].get_output(batch)
end = time.time()
correct_answers[i] += self.get_correct_answers(sublist, out)
fw_accuracy.append(100 * correct_answers[i] / float(samples_handled))
frameworks_out.append(out)
inference_time[i] += end - start
fw_time.append(inference_time[i] / samples_handled * 1000)
print(samples_handled, 'Accuracy for', frameworks[i].get_name() + ':', fw_accuracy[i], file=self.log)
print("Inference time, ms ", frameworks[i].get_name(), fw_time[i], file=self.log)
self.general_quality_metric.append(fw_accuracy)
self.general_inference_time.append(fw_time)
for i in range(1, len(frameworks)):
log_str = frameworks[0].get_name() + " vs " + frameworks[i].get_name() + ':'
diff = np.abs(frameworks_out[0] - frameworks_out[i])
l1_diff = np.sum(diff) / diff.size
print(samples_handled, "L1 difference", log_str, l1_diff, file=self.log)
blobs_l1_diff[i] += l1_diff
blobs_l1_diff_count[i] += 1
if np.max(diff) > blobs_l_inf_diff[i]:
blobs_l_inf_diff[i] = np.max(diff)
print(samples_handled, "L_INF difference", log_str, blobs_l_inf_diff[i], file=self.log)
self.log.flush()
for i in range(1, len(blobs_l1_diff)):
log_str = frameworks[0].get_name() + " vs " + frameworks[i].get_name() + ':'
print('Final l1 diff', log_str, blobs_l1_diff[i] / blobs_l1_diff_count[i], file=self.log)
print(
get_final_summary_info(
self.general_quality_metric,
self.general_inference_time,
"accuracy"
),
file=self.log
)

View File

@ -0,0 +1,87 @@
import os
from abc import ABCMeta, abstractmethod
import cv2
import numpy as np
from ...img_utils import read_rgb_img, get_pytorch_preprocess
from ...test.configs.default_preprocess_config import PYTORCH_RSZ_HEIGHT, PYTORCH_RSZ_WIDTH
class DataFetch(object):
imgs_dir = ''
frame_size = 0
bgr_to_rgb = False
__metaclass__ = ABCMeta
@abstractmethod
def preprocess(self, img):
pass
@staticmethod
def reshape_img(img):
img = img[:, :, 0:3].transpose(2, 0, 1)
return np.expand_dims(img, 0)
def center_crop(self, img):
cols = img.shape[1]
rows = img.shape[0]
y1 = round((rows - self.frame_size) / 2)
y2 = round(y1 + self.frame_size)
x1 = round((cols - self.frame_size) / 2)
x2 = round(x1 + self.frame_size)
return img[y1:y2, x1:x2]
def initial_preprocess(self, img):
min_dim = min(img.shape[-3], img.shape[-2])
resize_ratio = self.frame_size / float(min_dim)
img = cv2.resize(img, (0, 0), fx=resize_ratio, fy=resize_ratio)
img = self.center_crop(img)
return img
def get_preprocessed_img(self, img_path):
image_data = read_rgb_img(img_path, self.bgr_to_rgb)
image_data = self.preprocess(image_data)
return self.reshape_img(image_data)
def get_batch(self, img_names):
assert type(img_names) is list
batch = np.zeros((len(img_names), 3, self.frame_size, self.frame_size)).astype(np.float32)
for i in range(len(img_names)):
img_name = img_names[i]
img_file = os.path.join(self.imgs_dir, img_name)
assert os.path.exists(img_file)
batch[i] = self.get_preprocessed_img(img_file)
return batch
class PyTorchPreprocessedFetch(DataFetch):
def __init__(self, pytorch_cls_config, preprocess_input=None):
self.imgs_dir = pytorch_cls_config.img_root_dir
self.frame_size = pytorch_cls_config.frame_size
self.bgr_to_rgb = pytorch_cls_config.bgr_to_rgb
self.preprocess_input = preprocess_input
def preprocess(self, img):
img = cv2.resize(img, (PYTORCH_RSZ_WIDTH, PYTORCH_RSZ_HEIGHT))
img = self.center_crop(img)
if self.preprocess_input:
return self.presprocess_input(img)
return get_pytorch_preprocess(img)
class TFPreprocessedFetch(DataFetch):
def __init__(self, tf_cls_config, preprocess_input):
self.imgs_dir = tf_cls_config.img_root_dir
self.frame_size = tf_cls_config.frame_size
self.bgr_to_rgb = tf_cls_config.bgr_to_rgb
self.preprocess_input = preprocess_input
def preprocess(self, img):
img = self.initial_preprocess(img)
return self.preprocess_input(img)

View File

@ -0,0 +1,19 @@
import cv2
import numpy as np
from .test.configs.default_preprocess_config import BASE_IMG_SCALE_FACTOR
def read_rgb_img(img_file, is_bgr_to_rgb=True):
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
if is_bgr_to_rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def get_pytorch_preprocess(img):
img = img.astype(np.float32)
img *= BASE_IMG_SCALE_FACTOR
img -= [0.485, 0.456, 0.406]
img /= [0.229, 0.224, 0.225]
return img

View File

@ -0,0 +1,60 @@
from .configs.test_config import TestClsConfig, TestClsModuleConfig
from .model_test_pipeline import ModelTestPipeline
from ..evaluation.classification.cls_accuracy_evaluator import ClsAccEvaluation
from ..utils import get_test_module
class ClsModelTestPipeline(ModelTestPipeline):
def __init__(
self,
network_model,
model_processor,
dnn_model_processor,
data_fetcher,
img_processor=None,
cls_args_parser=None,
default_input_blob_preproc=None
):
super(ClsModelTestPipeline, self).__init__(
network_model,
model_processor,
dnn_model_processor
)
if cls_args_parser:
self._parser = cls_args_parser
self.test_config = TestClsConfig()
parser_args = self._parser.parse_args()
if parser_args.test:
self._test_module_config = TestClsModuleConfig()
self._test_module = get_test_module(
self._test_module_config.test_module_name,
self._test_module_config.test_module_path
)
if parser_args.default_img_preprocess:
self._default_input_blob_preproc = default_input_blob_preproc
if parser_args.evaluate:
self._data_fetcher = data_fetcher(self.test_config, img_processor)
def _configure_test_module_params(self):
self._test_module_param_list.extend((
'--crop', self._test_module_config.crop,
'--std', *self._test_module_config.std
))
if self._test_module_config.rsz_height and self._test_module_config.rsz_width:
self._test_module_param_list.extend((
'--initial_height', self._test_module_config.rsz_height,
'--initial_width', self._test_module_config.rsz_width,
))
def _configure_acc_eval(self, log_path):
self._accuracy_evaluator = ClsAccEvaluation(
log_path,
self.test_config.img_cls_file,
self.test_config.batch_size
)

View File

@ -0,0 +1,37 @@
BASE_IMG_SCALE_FACTOR = 1 / 255.0
PYTORCH_RSZ_HEIGHT = 256
PYTORCH_RSZ_WIDTH = 256
pytorch_resize_input_blob = {
"mean": ["123.675", "116.28", "103.53"],
"scale": str(BASE_IMG_SCALE_FACTOR),
"std": ["0.229", "0.224", "0.225"],
"crop": "True",
"rgb": True,
"rsz_height": str(PYTORCH_RSZ_HEIGHT),
"rsz_width": str(PYTORCH_RSZ_WIDTH)
}
pytorch_input_blob = {
"mean": ["123.675", "116.28", "103.53"],
"scale": str(BASE_IMG_SCALE_FACTOR),
"std": ["0.229", "0.224", "0.225"],
"crop": "True",
"rgb": True
}
tf_input_blob = {
"scale": str(1 / 127.5),
"mean": ["127.5", "127.5", "127.5"],
"std": [],
"crop": "True",
"rgb": True
}
tf_model_blob_caffe_mode = {
"mean": ["103.939", "116.779", "123.68"],
"scale": "1.0",
"std": [],
"crop": "True",
"rgb": False
}

View File

@ -0,0 +1,40 @@
import os
from dataclasses import dataclass, field
from typing import List
@dataclass
class CommonConfig:
output_data_root_dir: str = "dnn_model_runner/dnn_conversion"
logs_dir: str = os.path.join(output_data_root_dir, "logs")
log_file_path: str = os.path.join(logs_dir, "{}_log.txt")
@dataclass
class TestClsConfig:
batch_size: int = 1
frame_size: int = 224
img_root_dir: str = "./ILSVRC2012_img_val"
# location of image-class matching
img_cls_file: str = "./val.txt"
bgr_to_rgb: bool = True
@dataclass
class TestClsModuleConfig:
cls_test_data_dir: str = "../data"
test_module_name: str = "classification"
test_module_path: str = "classification.py"
input_img: str = os.path.join(cls_test_data_dir, "squirrel_cls.jpg")
model: str = ""
frame_height: str = str(TestClsConfig.frame_size)
frame_width: str = str(TestClsConfig.frame_size)
scale: str = "1.0"
mean: List[str] = field(default_factory=lambda: ["0.0", "0.0", "0.0"])
std: List[str] = field(default_factory=list)
crop: str = "False"
rgb: str = "True"
rsz_height: str = ""
rsz_width: str = ""
classes: str = os.path.join(cls_test_data_dir, "dnn", "classification_classes_ILSVRC2012.txt")

View File

@ -0,0 +1,126 @@
import os
import numpy as np
from .configs.test_config import CommonConfig
from ..utils import create_parser, plot_acc
class ModelTestPipeline:
def __init__(
self,
network_model,
model_processor,
dnn_model_processor
):
self._net_model = network_model
self._model_processor = model_processor
self._dnn_model_processor = dnn_model_processor
self._parser = create_parser()
self._test_module = None
self._test_module_config = None
self._test_module_param_list = None
self.test_config = None
self._data_fetcher = None
self._default_input_blob_preproc = None
self._accuracy_evaluator = None
def init_test_pipeline(self):
cmd_args = self._parser.parse_args()
model_dict = self._net_model.get_prepared_models()
model_names = list(model_dict.keys())
print(
"The model {} was successfully obtained and converted to OpenCV {}".format(model_names[0], model_names[1])
)
if cmd_args.test:
if not self._test_module_config.model:
self._test_module_config.model = self._net_model.model_path["full_path"]
if cmd_args.default_img_preprocess:
self._test_module_config.scale = self._default_input_blob_preproc["scale"]
self._test_module_config.mean = self._default_input_blob_preproc["mean"]
self._test_module_config.std = self._default_input_blob_preproc["std"]
self._test_module_config.crop = self._default_input_blob_preproc["crop"]
if "rsz_height" in self._default_input_blob_preproc and "rsz_width" in self._default_input_blob_preproc:
self._test_module_config.rsz_height = self._default_input_blob_preproc["rsz_height"]
self._test_module_config.rsz_width = self._default_input_blob_preproc["rsz_width"]
self._test_module_param_list = [
'--model', self._test_module_config.model,
'--input', self._test_module_config.input_img,
'--width', self._test_module_config.frame_width,
'--height', self._test_module_config.frame_height,
'--scale', self._test_module_config.scale,
'--mean', *self._test_module_config.mean,
'--std', *self._test_module_config.std,
'--classes', self._test_module_config.classes,
]
if self._default_input_blob_preproc["rgb"]:
self._test_module_param_list.append('--rgb')
self._configure_test_module_params()
self._test_module.main(
self._test_module_param_list
)
if cmd_args.evaluate:
original_model_name = model_names[0]
dnn_model_name = model_names[1]
self.run_test_pipeline(
[
self._model_processor(model_dict[original_model_name], original_model_name),
self._dnn_model_processor(model_dict[dnn_model_name], dnn_model_name)
],
original_model_name.replace(" ", "_")
)
def run_test_pipeline(
self,
models_list,
formatted_exp_name,
is_plot_acc=True
):
log_path, logs_dir = self._configure_eval_log(formatted_exp_name)
print(
"===== Running evaluation of the model with the following params:\n"
"\t* val data location: {}\n"
"\t* log file location: {}\n".format(
self.test_config.img_root_dir,
log_path
)
)
os.makedirs(logs_dir, exist_ok=True)
self._configure_acc_eval(log_path)
self._accuracy_evaluator.process(models_list, self._data_fetcher)
if is_plot_acc:
plot_acc(
np.array(self._accuracy_evaluator.general_inference_time),
formatted_exp_name
)
print("===== End of the evaluation pipeline =====")
def _configure_acc_eval(self, log_path):
pass
def _configure_test_module_params(self):
pass
@staticmethod
def _configure_eval_log(formatted_exp_name):
common_test_config = CommonConfig()
return common_test_config.log_file_path.format(formatted_exp_name), common_test_config.logs_dir

View File

@ -0,0 +1,153 @@
import argparse
import importlib.util
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
from .test.configs.test_config import CommonConfig
SEED_VAL = 42
DNN_LIB = "DNN"
# common path for model savings
MODEL_PATH_ROOT = os.path.join(CommonConfig().output_data_root_dir, "{}/models")
def get_full_model_path(lib_name, model_full_name):
model_path = MODEL_PATH_ROOT.format(lib_name)
return {
"path": model_path,
"full_path": os.path.join(model_path, model_full_name)
}
def plot_acc(data_list, experiment_name):
plt.figure(figsize=[8, 6])
plt.plot(data_list[:, 0], "r", linewidth=2.5, label="Original Model")
plt.plot(data_list[:, 1], "b", linewidth=2.5, label="Converted DNN Model")
plt.xlabel("Iterations ", fontsize=15)
plt.ylabel("Time (ms)", fontsize=15)
plt.title(experiment_name, fontsize=15)
plt.legend()
full_path_to_fig = os.path.join(CommonConfig().output_data_root_dir, experiment_name + ".png")
plt.savefig(full_path_to_fig, bbox_inches="tight")
def get_final_summary_info(general_quality_metric, general_inference_time, metric_name):
general_quality_metric = np.array(general_quality_metric)
general_inference_time = np.array(general_inference_time)
summary_line = "===== End of processing. General results:\n"
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
np.mean(general_inference_time[:, 1]) / 60000,
)
return summary_line
def set_common_reproducibility():
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
def set_pytorch_env():
set_common_reproducibility()
torch.manual_seed(SEED_VAL)
torch.set_printoptions(precision=10)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(SEED_VAL)
torch.backends.cudnn_benchmark_enabled = False
torch.backends.cudnn.deterministic = True
def set_tf_env(is_use_gpu=True):
set_common_reproducibility()
tf.random.set_seed(SEED_VAL)
os.environ["TF_DETERMINISTIC_OPS"] = "1"
if tf.config.list_physical_devices("GPU") and is_use_gpu:
gpu_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_visible_devices(gpu_devices[0], "GPU")
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
os.environ["TF_USE_CUDNN"] = "1"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def str_bool(input_val):
if input_val.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif input_val.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value was expected')
def get_formatted_model_list(model_list):
note_line = 'Please, choose the model from the below list:\n'
spaces_to_set = ' ' * (len(note_line) - 2)
return note_line + ''.join([spaces_to_set, '{} \n'] * len(model_list)).format(*model_list)
def model_str(model_list):
def type_model_list(input_val):
if input_val.lower() in model_list:
return input_val.lower()
else:
raise argparse.ArgumentTypeError(
'The model is currently unavailable for test.\n' +
get_formatted_model_list(model_list)
)
return type_model_list
def get_test_module(test_module_name, test_module_path):
module_spec = importlib.util.spec_from_file_location(test_module_name, test_module_path)
test_module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(test_module)
module_spec.loader.exec_module(test_module)
return test_module
def create_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--test",
type=str_bool,
help="Define whether you'd like to run the model with OpenCV for testing.",
default=False
),
parser.add_argument(
"--default_img_preprocess",
type=str_bool,
help="Define whether you'd like to preprocess the input image with defined"
" PyTorch or TF functions for model test with OpenCV.",
default=False
),
parser.add_argument(
"--evaluate",
type=str_bool,
help="Define whether you'd like to run evaluation of the models (ex.: TF vs OpenCV networks).",
default=True
)
return parser
def create_extended_parser(model_list):
parser = create_parser()
parser.add_argument(
"--model_name",
type=model_str(model_list=model_list),
help="\nDefine the model name to test.\n" +
get_formatted_model_list(model_list),
required=True
)
return parser

View File

@ -0,0 +1,78 @@
# Run PaddlePaddle model using OpenCV
These two demonstrations show how to inference PaddlePaddle model using OpenCV.
## Environment Setup
```shell
pip install paddlepaddle-gpu
pip install paddlehub
pip install paddle2onnx
```
## 1. Run PaddlePaddle ResNet50 using OpenCV
### Run PaddlePaddle model demo
Run the code sample as follows:
```shell
python paddle_resnet50.py
```
There are three parts to the process:
1. Export PaddlePaddle ResNet50 model to onnx format.
2. Use `cv2.dnn.readNetFromONNX` to load the model file.
3. Preprocess image file and do the inference.
## 2. Run PaddleSeg Portrait Segmentation using OpenCV
### Convert to ONNX Model
#### 1. Get Paddle Inference model
For more details, please refer to [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/contrib/HumanSeg/README.md).
```shell
wget https://x2paddle.bj.bcebos.com/inference/models/humanseg_hrnet18_small_v1.zip
unzip humanseg_hrnet18_small_v1.zip
```
Notes:
* The exported model must have a fixed input shape, as dynamic is not supported at this moment.
#### 2. Convert to ONNX model using paddle2onnx
To convert the model, use the following command:
```
paddle2onnx --model_dir humanseg_hrnet18_small_v1 \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--opset_version 11 \
--save_file humanseg_hrnet18_tiny.onnx
```
The converted model can be found in the current directory by the name `humanseg_hrnet18_tiny.onnx` .
### Run PaddleSeg Portrait Segmentation demo
Run the code sample as follows:
```shell
python paddle_humanseg.py
```
There are three parts to the process:
1. Use `cv2.dnn.readNetFromONNX` to load the model file.
2. Preprocess image file and do inference.
3. Postprocess image file and visualize.
The resulting file can be found at `data/result_test_human.jpg` .
### Portrait segmentation visualization
<img src="../../../../data/messi5.jpg" width="50%" height="50%"><img src="./data/result_test_human.jpg" width="50%" height="50%">

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

View File

@ -0,0 +1,112 @@
import os
import paddlehub.vision.transforms as T
import numpy as np
import cv2 as cv
def get_color_map_list(num_classes):
"""
Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes (int): Number of classes.
Returns:
(list). The color map.
"""
num_classes += 1
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = color_map[3:]
return color_map
def visualize(image, result, save_dir=None, weight=0.6):
"""
Convert predict result to color image, and save added image.
Args:
image (str): The path of origin image.
result (np.ndarray): The predict result of image.
save_dir (str): The directory for saving visual image. Default: None.
weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6
Returns:
vis_result (np.ndarray): If `save_dir` is None, return the visualized result.
"""
color_map = get_color_map_list(256)
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
color_map = np.array(color_map).astype("uint8")
# Use OpenCV LUT for color mapping
c1 = cv.LUT(result, color_map[:, 0])
c2 = cv.LUT(result, color_map[:, 1])
c3 = cv.LUT(result, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
im = cv.imread(image)
vis_result = cv.addWeighted(im, weight, pseudo_img, 1 - weight, 0)
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
image_name = os.path.split(image)[-1]
out_path = os.path.join(save_dir, image_name)
cv.imwrite(out_path, vis_result)
else:
return vis_result
def preprocess(image_path):
''' preprocess input image file to np.ndarray
Args:
image_path(str): Path of input image file
Returns:
ProcessedImage(numpy.ndarray): A numpy.ndarray
variable which shape is (1, 3, 192, 192)
'''
transforms = T.Compose([
T.Resize((192, 192)),
T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
],
to_rgb=True)
return np.expand_dims(transforms(image_path), axis=0)
if __name__ == '__main__':
img_path = "../../../../data/messi5.jpg"
# load PPSeg Model use cv.dnn
net = cv.dnn.readNetFromONNX('humanseg_hrnet18_tiny.onnx')
# read and preprocess image file
im = preprocess(img_path)
# inference
net.setInput(im)
result = net.forward(['save_infer_model/scale_0.tmp_1'])
# post process
image = cv.imread(img_path)
r, c, _ = image.shape
result = np.argmax(result[0], axis=1).astype(np.uint8)
result = cv.resize(result[0, :, :],
dsize=(c, r),
interpolation=cv.INTER_NEAREST)
print("grid_image.shape is: ", result.shape)
folder_path = "data"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, '%s.jpg' % "result_test_human")
result_color = visualize(img_path, result)
cv.imwrite(file_path, result_color)
print('%s saved' % file_path)

View File

@ -0,0 +1,61 @@
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
import cv2 as cv
import numpy as np
def preprocess(image_path):
''' preprocess input image file to np.ndarray
Args:
image_path(str): Path of input image file
Returns:
ProcessedImage(numpy.ndarray): A numpy.ndarray
variable which shape is (1, 3, 224, 224)
'''
transforms = T.Compose([
T.Resize((256, 256)),
T.CenterCrop(224),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])],
to_rgb=True)
return np.expand_dims(transforms(image_path), axis=0)
def export_onnx_resnet50(save_path):
''' export PaddlePaddle model to ONNX format
Args:
save_path(str): Path to save exported ONNX model
Returns:
None
'''
model = hub.Module(name="resnet50_vd_imagenet_ssld")
input_spec = paddle.static.InputSpec(
[1, 3, 224, 224], "float32", "image")
paddle.onnx.export(model, save_path,
input_spec=[input_spec],
opset_version=10)
if __name__ == '__main__':
save_path = './resnet50'
image_file = './data/cat.jpg'
labels = open('./data/labels.txt').read().strip().split('\n')
model = export_onnx_resnet50(save_path)
# load resnet50 use cv.dnn
net = cv.dnn.readNetFromONNX(save_path + '.onnx')
# read and preprocess image file
im = preprocess(image_file)
# inference
net.setInput(im)
result = net.forward(['save_infer_model/scale_0.tmp_0'])
# post process
class_id = np.argmax(result[0])
label = labels[class_id]
print("Image: {}".format(image_file))
print("Predict Category: {}".format(label))

View File

@ -0,0 +1,71 @@
from torchvision import models
from ..pytorch_model import (
PyTorchModelPreparer,
PyTorchModelProcessor,
PyTorchDnnModelProcessor
)
from ...common.evaluation.classification.cls_data_fetcher import PyTorchPreprocessedFetch
from ...common.test.cls_model_test_pipeline import ClsModelTestPipeline
from ...common.test.configs.default_preprocess_config import pytorch_resize_input_blob
from ...common.test.configs.test_config import TestClsConfig
from ...common.utils import set_pytorch_env, create_extended_parser
model_dict = {
"alexnet": models.alexnet,
"vgg11": models.vgg11,
"vgg13": models.vgg13,
"vgg16": models.vgg16,
"vgg19": models.vgg19,
"resnet18": models.resnet18,
"resnet34": models.resnet34,
"resnet50": models.resnet50,
"resnet101": models.resnet101,
"resnet152": models.resnet152,
"squeezenet1_0": models.squeezenet1_0,
"squeezenet1_1": models.squeezenet1_1,
"resnext50_32x4d": models.resnext50_32x4d,
"resnext101_32x8d": models.resnext101_32x8d,
"wide_resnet50_2": models.wide_resnet50_2,
"wide_resnet101_2": models.wide_resnet101_2
}
class PyTorchClsModel(PyTorchModelPreparer):
def __init__(self, height, width, model_name, original_model):
super(PyTorchClsModel, self).__init__(height, width, model_name, original_model)
def main():
set_pytorch_env()
parser = create_extended_parser(list(model_dict.keys()))
cmd_args = parser.parse_args()
model_name = cmd_args.model_name
cls_model = PyTorchClsModel(
height=TestClsConfig().frame_size,
width=TestClsConfig().frame_size,
model_name=model_name,
original_model=model_dict[model_name](pretrained=True)
)
pytorch_cls_pipeline = ClsModelTestPipeline(
network_model=cls_model,
model_processor=PyTorchModelProcessor,
dnn_model_processor=PyTorchDnnModelProcessor,
data_fetcher=PyTorchPreprocessedFetch,
cls_args_parser=parser,
default_input_blob_preproc=pytorch_resize_input_blob
)
pytorch_cls_pipeline.init_test_pipeline()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,139 @@
import os
import cv2
import numpy as np
import torch
import torch.onnx
from torch.autograd import Variable
from torchvision import models
def get_pytorch_onnx_model(original_model):
# define the directory for further converted model save
onnx_model_path = "models"
# define the name of further converted model
onnx_model_name = "resnet50.onnx"
# create directory for further converted model
os.makedirs(onnx_model_path, exist_ok=True)
# get full path to the converted model
full_model_path = os.path.join(onnx_model_path, onnx_model_name)
# generate model input
generated_input = Variable(
torch.randn(1, 3, 224, 224)
)
# model export into ONNX format
torch.onnx.export(
original_model,
generated_input,
full_model_path,
verbose=True,
input_names=["input"],
output_names=["output"],
opset_version=11
)
return full_model_path
def get_preprocessed_img(img_path):
# read the image
input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
input_img = input_img.astype(np.float32)
input_img = cv2.resize(input_img, (256, 256))
# define preprocess parameters
mean = np.array([0.485, 0.456, 0.406]) * 255.0
scale = 1 / 255.0
std = [0.229, 0.224, 0.225]
# prepare input blob to fit the model input:
# 1. subtract mean
# 2. scale to set pixel values from 0 to 1
input_blob = cv2.dnn.blobFromImage(
image=input_img,
scalefactor=scale,
size=(224, 224), # img target size
mean=mean,
swapRB=True, # BGR -> RGB
crop=True # center crop
)
# 3. divide by std
input_blob[0] /= np.asarray(std, dtype=np.float32).reshape(3, 1, 1)
return input_blob
def get_imagenet_labels(labels_path):
with open(labels_path) as f:
imagenet_labels = [line.strip() for line in f.readlines()]
return imagenet_labels
def get_opencv_dnn_prediction(opencv_net, preproc_img, imagenet_labels):
# set OpenCV DNN input
opencv_net.setInput(preproc_img)
# OpenCV DNN inference
out = opencv_net.forward()
print("OpenCV DNN prediction: \n")
print("* shape: ", out.shape)
# get the predicted class ID
imagenet_class_id = np.argmax(out)
# get confidence
confidence = out[0][imagenet_class_id]
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
print("* confidence: {:.4f}".format(confidence))
def get_pytorch_dnn_prediction(original_net, preproc_img, imagenet_labels):
original_net.eval()
preproc_img = torch.FloatTensor(preproc_img)
# inference
with torch.no_grad():
out = original_net(preproc_img)
print("\nPyTorch model prediction: \n")
print("* shape: ", out.shape)
# get the predicted class ID
imagenet_class_id = torch.argmax(out, axis=1).item()
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
# get confidence
confidence = out[0][imagenet_class_id]
print("* confidence: {:.4f}".format(confidence.item()))
def main():
# initialize PyTorch ResNet-50 model
original_model = models.resnet50(pretrained=True)
# get the path to the converted into ONNX PyTorch model
full_model_path = get_pytorch_onnx_model(original_model)
# read converted .onnx model with OpenCV API
opencv_net = cv2.dnn.readNetFromONNX(full_model_path)
print("OpenCV model was successfully read. Layer IDs: \n", opencv_net.getLayerNames())
# get preprocessed image
input_img = get_preprocessed_img("../data/squirrel_cls.jpg")
# get ImageNet labels
imagenet_labels = get_imagenet_labels("../data/dnn/classification_classes_ILSVRC2012.txt")
# obtain OpenCV DNN predictions
get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels)
# obtain original PyTorch ResNet50 predictions
get_pytorch_dnn_prediction(original_model, input_img, imagenet_labels)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,50 @@
import os
import torch
import torch.onnx
from torch.autograd import Variable
from torchvision import models
def get_pytorch_onnx_model(original_model):
# define the directory for further converted model save
onnx_model_path = "models"
# define the name of further converted model
onnx_model_name = "resnet50.onnx"
# create directory for further converted model
os.makedirs(onnx_model_path, exist_ok=True)
# get full path to the converted model
full_model_path = os.path.join(onnx_model_path, onnx_model_name)
# generate model input
generated_input = Variable(
torch.randn(1, 3, 224, 224)
)
# model export into ONNX format
torch.onnx.export(
original_model,
generated_input,
full_model_path,
verbose=True,
input_names=["input"],
output_names=["output"],
opset_version=11
)
return full_model_path
def main():
# initialize PyTorch ResNet-50 model
original_model = models.resnet50(pretrained=True)
# get the path to the converted into ONNX PyTorch model
full_model_path = get_pytorch_onnx_model(original_model)
print("PyTorch ResNet-50 model was successfully converted: ", full_model_path)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,98 @@
import os
import cv2
import torch.onnx
from torch.autograd import Variable
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "PyTorch"
MODEL_FORMAT = ".onnx"
class PyTorchModelPreparer(AbstractModel):
def __init__(
self,
height,
width,
model_name="default",
original_model=object,
batch_size=1,
default_input_name="input",
default_output_name="output"
):
self._height = height
self._width = width
self._model_name = model_name
self._original_model = original_model
self._batch_size = batch_size
self._default_input_name = default_input_name
self._default_output_name = default_output_name
self.model_path = self._set_model_path()
self._dnn_model = self._set_dnn_model()
def _set_dnn_model(self):
generated_input = Variable(torch.randn(
self._batch_size, 3, self._height, self._width)
)
os.makedirs(self.model_path["path"], exist_ok=True)
torch.onnx.export(
self._original_model,
generated_input,
self.model_path["full_path"],
verbose=True,
input_names=[self._default_input_name],
output_names=[self._default_output_name],
opset_version=11
)
return cv2.dnn.readNetFromONNX(self.model_path["full_path"])
def _set_model_path(self):
model_to_save = self._model_name + MODEL_FORMAT
return get_full_model_path(CURRENT_LIB.lower(), model_to_save)
def get_prepared_models(self):
return {
CURRENT_LIB + " " + self._model_name: self._original_model,
DNN_LIB + " " + self._model_name: self._dnn_model
}
class PyTorchModelProcessor(Framework):
def __init__(self, prepared_model, model_name):
self._prepared_model = prepared_model
self._name = model_name
def get_output(self, input_blob):
tensor = torch.FloatTensor(input_blob)
self._prepared_model.eval()
with torch.no_grad():
model_out = self._prepared_model(tensor)
# segmentation case
if len(model_out) == 2:
model_out = model_out['out']
out = model_out.detach().numpy()
return out
def get_name(self):
return self._name
class PyTorchDnnModelProcessor(Framework):
def __init__(self, prepared_dnn_model, model_name):
self._prepared_dnn_model = prepared_dnn_model
self._name = model_name
def get_output(self, input_blob):
self._prepared_dnn_model.setInput(input_blob, '')
return self._prepared_dnn_model.forward()
def get_name(self):
return self._name

View File

@ -0,0 +1,15 @@
# Python 3.7.5
onnx>=1.7.0
numpy>=1.19.1
torch>=1.5.1
torchvision>=0.6.1
tensorflow>=2.1.0
tensorflow-gpu>=2.1.0
paddlepaddle>=2.0.0
paddlepaddle-gpu>=2.0.0
paddlehub>=2.1.0
paddle2onnx>=0.5.1
paddleseg>=2.0.0

View File

@ -0,0 +1,104 @@
from tensorflow.keras.applications import (
VGG16, vgg16,
VGG19, vgg19,
ResNet50, resnet,
ResNet101,
ResNet152,
DenseNet121, densenet,
DenseNet169,
DenseNet201,
InceptionResNetV2, inception_resnet_v2,
InceptionV3, inception_v3,
MobileNet, mobilenet,
MobileNetV2, mobilenet_v2,
NASNetLarge, nasnet,
NASNetMobile,
Xception, xception
)
from ..tf_model import TFModelPreparer
from ..tf_model import (
TFModelProcessor,
TFDnnModelProcessor
)
from ...common.evaluation.classification.cls_data_fetcher import TFPreprocessedFetch
from ...common.test.cls_model_test_pipeline import ClsModelTestPipeline
from ...common.test.configs.default_preprocess_config import (
tf_input_blob,
pytorch_input_blob,
tf_model_blob_caffe_mode
)
from ...common.utils import set_tf_env, create_extended_parser
model_dict = {
"vgg16": [VGG16, vgg16, tf_model_blob_caffe_mode],
"vgg19": [VGG19, vgg19, tf_model_blob_caffe_mode],
"resnet50": [ResNet50, resnet, tf_model_blob_caffe_mode],
"resnet101": [ResNet101, resnet, tf_model_blob_caffe_mode],
"resnet152": [ResNet152, resnet, tf_model_blob_caffe_mode],
"densenet121": [DenseNet121, densenet, pytorch_input_blob],
"densenet169": [DenseNet169, densenet, pytorch_input_blob],
"densenet201": [DenseNet201, densenet, pytorch_input_blob],
"inceptionresnetv2": [InceptionResNetV2, inception_resnet_v2, tf_input_blob],
"inceptionv3": [InceptionV3, inception_v3, tf_input_blob],
"mobilenet": [MobileNet, mobilenet, tf_input_blob],
"mobilenetv2": [MobileNetV2, mobilenet_v2, tf_input_blob],
"nasnetlarge": [NASNetLarge, nasnet, tf_input_blob],
"nasnetmobile": [NASNetMobile, nasnet, tf_input_blob],
"xception": [Xception, xception, tf_input_blob]
}
CNN_CLASS_ID = 0
CNN_UTILS_ID = 1
DEFAULT_BLOB_PARAMS_ID = 2
class TFClsModel(TFModelPreparer):
def __init__(self, model_name, original_model):
super(TFClsModel, self).__init__(model_name, original_model)
def main():
set_tf_env()
parser = create_extended_parser(list(model_dict.keys()))
cmd_args = parser.parse_args()
model_name = cmd_args.model_name
model_name_val = model_dict[model_name]
cls_model = TFClsModel(
model_name=model_name,
original_model=model_name_val[CNN_CLASS_ID](
include_top=True,
weights="imagenet"
)
)
tf_cls_pipeline = ClsModelTestPipeline(
network_model=cls_model,
model_processor=TFModelProcessor,
dnn_model_processor=TFDnnModelProcessor,
data_fetcher=TFPreprocessedFetch,
img_processor=model_name_val[CNN_UTILS_ID].preprocess_input,
cls_args_parser=parser,
default_input_blob_preproc=model_name_val[DEFAULT_BLOB_PARAMS_ID]
)
tf_cls_pipeline.init_test_pipeline()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,142 @@
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import MobileNet
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ...common.utils import set_tf_env
def get_tf_model_proto(tf_model):
# define the directory for .pb model
pb_model_path = "models"
# define the name of .pb model
pb_model_name = "mobilenet.pb"
# create directory for further converted model
os.makedirs(pb_model_path, exist_ok=True)
# get model TF graph
tf_model_graph = tf.function(lambda x: tf_model(x))
# get concrete function
tf_model_graph = tf_model_graph.get_concrete_function(
tf.TensorSpec(tf_model.inputs[0].shape, tf_model.inputs[0].dtype))
# obtain frozen concrete function
frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph)
# get frozen graph
frozen_tf_func.graph.as_graph_def()
# save full tf model
tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph,
logdir=pb_model_path,
name=pb_model_name,
as_text=False)
return os.path.join(pb_model_path, pb_model_name)
def get_preprocessed_img(img_path):
# read the image
input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
input_img = input_img.astype(np.float32)
# define preprocess parameters
mean = np.array([1.0, 1.0, 1.0]) * 127.5
scale = 1 / 127.5
# prepare input blob to fit the model input:
# 1. subtract mean
# 2. scale to set pixel values from 0 to 1
input_blob = cv2.dnn.blobFromImage(
image=input_img,
scalefactor=scale,
size=(224, 224), # img target size
mean=mean,
swapRB=True, # BGR -> RGB
crop=True # center crop
)
print("Input blob shape: {}\n".format(input_blob.shape))
return input_blob
def get_imagenet_labels(labels_path):
with open(labels_path) as f:
imagenet_labels = [line.strip() for line in f.readlines()]
return imagenet_labels
def get_opencv_dnn_prediction(opencv_net, preproc_img, imagenet_labels):
# set OpenCV DNN input
opencv_net.setInput(preproc_img)
# OpenCV DNN inference
out = opencv_net.forward()
print("OpenCV DNN prediction: \n")
print("* shape: ", out.shape)
# get the predicted class ID
imagenet_class_id = np.argmax(out)
# get confidence
confidence = out[0][imagenet_class_id]
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
print("* confidence: {:.4f}\n".format(confidence))
def get_tf_dnn_prediction(original_net, preproc_img, imagenet_labels):
# inference
preproc_img = preproc_img.transpose(0, 2, 3, 1)
print("TF input blob shape: {}\n".format(preproc_img.shape))
out = original_net(preproc_img)
print("\nTensorFlow model prediction: \n")
print("* shape: ", out.shape)
# get the predicted class ID
imagenet_class_id = np.argmax(out)
print("* class ID: {}, label: {}".format(imagenet_class_id, imagenet_labels[imagenet_class_id]))
# get confidence
confidence = out[0][imagenet_class_id]
print("* confidence: {:.4f}".format(confidence))
def main():
# configure TF launching
set_tf_env()
# initialize TF MobileNet model
original_tf_model = MobileNet(
include_top=True,
weights="imagenet"
)
# get TF frozen graph path
full_pb_path = get_tf_model_proto(original_tf_model)
# read frozen graph with OpenCV API
opencv_net = cv2.dnn.readNetFromTensorflow(full_pb_path)
print("OpenCV model was successfully read. Model layers: \n", opencv_net.getLayerNames())
# get preprocessed image
input_img = get_preprocessed_img("../data/squirrel_cls.jpg")
# get ImageNet labels
imagenet_labels = get_imagenet_labels("../data/dnn/classification_classes_ILSVRC2012.txt")
# obtain OpenCV DNN predictions
get_opencv_dnn_prediction(opencv_net, input_img, imagenet_labels)
# obtain TF model predictions
get_tf_dnn_prediction(original_tf_model, input_img, imagenet_labels)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,45 @@
import os
import tarfile
import urllib
DETECTION_MODELS_URL = 'http://download.tensorflow.org/models/object_detection/'
def extract_tf_frozen_graph(model_name, extracted_model_path):
# define model archive name
tf_model_tar = model_name + '.tar.gz'
# define link to retrieve model archive
model_link = DETECTION_MODELS_URL + tf_model_tar
tf_frozen_graph_name = 'frozen_inference_graph'
try:
urllib.request.urlretrieve(model_link, tf_model_tar)
except Exception:
print("TF {} was not retrieved: {}".format(model_name, model_link))
return
print("TF {} was retrieved.".format(model_name))
tf_model_tar = tarfile.open(tf_model_tar)
frozen_graph_path = ""
for model_tar_elem in tf_model_tar.getmembers():
if tf_frozen_graph_name in os.path.basename(model_tar_elem.name):
tf_model_tar.extract(model_tar_elem, extracted_model_path)
frozen_graph_path = os.path.join(extracted_model_path, model_tar_elem.name)
break
tf_model_tar.close()
return frozen_graph_path
def main():
tf_model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
graph_extraction_dir = "./"
frozen_graph_path = extract_tf_frozen_graph(tf_model_name, graph_extraction_dir)
print("Frozen graph path for {}: {}".format(tf_model_name, frozen_graph_path))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,112 @@
import cv2
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
class TFModelPreparer(AbstractModel):
""" Class for the preparation of the TF models: original and converted OpenCV Net.
Args:
model_name: TF model name
original_model: TF configured model object or session
is_ready_graph: indicates whether ready .pb file already exists
tf_model_graph_path: path to the existing frozen TF graph
"""
def __init__(
self,
model_name="default",
original_model=None,
is_ready_graph=False,
tf_model_graph_path=""
):
self._model_name = model_name
self._original_model = original_model
self._model_to_save = ""
self._is_ready_to_transfer_graph = is_ready_graph
self.model_path = self._set_model_path(tf_model_graph_path)
self._dnn_model = self._set_dnn_model()
def _set_dnn_model(self):
if not self._is_ready_to_transfer_graph:
# get model TF graph
tf_model_graph = tf.function(lambda x: self._original_model(x))
tf_model_graph = tf_model_graph.get_concrete_function(
tf.TensorSpec(self._original_model.inputs[0].shape, self._original_model.inputs[0].dtype))
# obtain frozen concrete function
frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph)
frozen_tf_func.graph.as_graph_def()
# save full TF model
tf.io.write_graph(graph_or_graph_def=frozen_tf_func.graph,
logdir=self.model_path["path"],
name=self._model_to_save,
as_text=False)
return cv2.dnn.readNetFromTensorflow(self.model_path["full_path"])
def _set_model_path(self, tf_pb_file_path):
""" Method for setting model paths.
Args:
tf_pb_file_path: path to the existing TF .pb
Returns:
dictionary, where full_path key means saved model path and its full name.
"""
model_paths_dict = {
"path": "",
"full_path": tf_pb_file_path
}
if not self._is_ready_to_transfer_graph:
self._model_to_save = self._model_name + MODEL_FORMAT
model_paths_dict = get_full_model_path(CURRENT_LIB.lower(), self._model_to_save)
return model_paths_dict
def get_prepared_models(self):
original_lib_name = CURRENT_LIB + " " + self._model_name
configured_model_dict = {
original_lib_name: self._original_model,
DNN_LIB + " " + self._model_name: self._dnn_model
}
return configured_model_dict
class TFModelProcessor(Framework):
def __init__(self, prepared_model, model_name):
self._prepared_model = prepared_model
self._name = model_name
def get_output(self, input_blob):
assert len(input_blob.shape) == 4
batch_tf = input_blob.transpose(0, 2, 3, 1)
out = self._prepared_model(batch_tf)
return out
def get_name(self):
return CURRENT_LIB
class TFDnnModelProcessor(Framework):
def __init__(self, prepared_dnn_model, model_name):
self._prepared_dnn_model = prepared_dnn_model
self._name = model_name
def get_output(self, input_blob):
self._prepared_dnn_model.setInput(input_blob)
ret_val = self._prepared_dnn_model.forward()
return ret_val
def get_name(self):
return DNN_LIB