718c41634f
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
419 lines
13 KiB
C++
419 lines
13 KiB
C++
// This file is wirtten base on the following file:
|
|
// https://github.com/Tencent/ncnn/blob/master/examples/yolov5.cpp
|
|
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
|
|
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
|
|
// in compliance with the License. You may obtain a copy of the License at
|
|
//
|
|
// https://opensource.org/licenses/BSD-3-Clause
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software distributed
|
|
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
|
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
|
// specific language governing permissions and limitations under the License.
|
|
// ------------------------------------------------------------------------------
|
|
// Copyright (C) 2020-2021, Megvii Inc. All rights reserved.
|
|
|
|
#include "layer.h"
|
|
#include "net.h"
|
|
|
|
#if defined(USE_NCNN_SIMPLEOCV)
|
|
#include "simpleocv.h"
|
|
#else
|
|
#include <opencv2/core/core.hpp>
|
|
#include <opencv2/highgui/highgui.hpp>
|
|
#include <opencv2/imgproc/imgproc.hpp>
|
|
#endif
|
|
#include <float.h>
|
|
#include <stdio.h>
|
|
#include <vector>
|
|
|
|
#define YOLOX_NMS_THRESH 0.45 // nms threshold
|
|
#define YOLOX_CONF_THRESH 0.25 // threshold of bounding box prob
|
|
#define YOLOX_TARGET_SIZE 640 // target image size after resize, might use 416 for small model
|
|
|
|
// YOLOX use the same focus in yolov5
|
|
class YoloV5Focus : public ncnn::Layer
|
|
{
|
|
public:
|
|
YoloV5Focus()
|
|
{
|
|
one_blob_only = true;
|
|
}
|
|
|
|
virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
|
|
{
|
|
int w = bottom_blob.w;
|
|
int h = bottom_blob.h;
|
|
int channels = bottom_blob.c;
|
|
|
|
int outw = w / 2;
|
|
int outh = h / 2;
|
|
int outc = channels * 4;
|
|
|
|
top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
|
|
if (top_blob.empty())
|
|
return -100;
|
|
|
|
#pragma omp parallel for num_threads(opt.num_threads)
|
|
for (int p = 0; p < outc; p++)
|
|
{
|
|
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
|
|
float* outptr = top_blob.channel(p);
|
|
|
|
for (int i = 0; i < outh; i++)
|
|
{
|
|
for (int j = 0; j < outw; j++)
|
|
{
|
|
*outptr = *ptr;
|
|
|
|
outptr += 1;
|
|
ptr += 2;
|
|
}
|
|
|
|
ptr += w;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
};
|
|
|
|
DEFINE_LAYER_CREATOR(YoloV5Focus)
|
|
|
|
struct Object
|
|
{
|
|
cv::Rect_<float> rect;
|
|
int label;
|
|
float prob;
|
|
};
|
|
|
|
struct GridAndStride
|
|
{
|
|
int grid0;
|
|
int grid1;
|
|
int stride;
|
|
};
|
|
|
|
static inline float intersection_area(const Object& a, const Object& b)
|
|
{
|
|
cv::Rect_<float> inter = a.rect & b.rect;
|
|
return inter.area();
|
|
}
|
|
|
|
static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
|
|
{
|
|
int i = left;
|
|
int j = right;
|
|
float p = faceobjects[(left + right) / 2].prob;
|
|
|
|
while (i <= j)
|
|
{
|
|
while (faceobjects[i].prob > p)
|
|
i++;
|
|
|
|
while (faceobjects[j].prob < p)
|
|
j--;
|
|
|
|
if (i <= j)
|
|
{
|
|
// swap
|
|
std::swap(faceobjects[i], faceobjects[j]);
|
|
|
|
i++;
|
|
j--;
|
|
}
|
|
}
|
|
|
|
#pragma omp parallel sections
|
|
{
|
|
#pragma omp section
|
|
{
|
|
if (left < j) qsort_descent_inplace(faceobjects, left, j);
|
|
}
|
|
#pragma omp section
|
|
{
|
|
if (i < right) qsort_descent_inplace(faceobjects, i, right);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void qsort_descent_inplace(std::vector<Object>& objects)
|
|
{
|
|
if (objects.empty())
|
|
return;
|
|
|
|
qsort_descent_inplace(objects, 0, objects.size() - 1);
|
|
}
|
|
|
|
static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
|
|
{
|
|
picked.clear();
|
|
|
|
const int n = faceobjects.size();
|
|
|
|
std::vector<float> areas(n);
|
|
for (int i = 0; i < n; i++)
|
|
{
|
|
areas[i] = faceobjects[i].rect.area();
|
|
}
|
|
|
|
for (int i = 0; i < n; i++)
|
|
{
|
|
const Object& a = faceobjects[i];
|
|
|
|
int keep = 1;
|
|
for (int j = 0; j < (int)picked.size(); j++)
|
|
{
|
|
const Object& b = faceobjects[picked[j]];
|
|
|
|
// intersection over union
|
|
float inter_area = intersection_area(a, b);
|
|
float union_area = areas[i] + areas[picked[j]] - inter_area;
|
|
// float IoU = inter_area / union_area
|
|
if (inter_area / union_area > nms_threshold)
|
|
keep = 0;
|
|
}
|
|
|
|
if (keep)
|
|
picked.push_back(i);
|
|
}
|
|
}
|
|
|
|
static void generate_grids_and_stride(const int target_size, std::vector<int>& strides, std::vector<GridAndStride>& grid_strides)
|
|
{
|
|
for (int i = 0; i < (int)strides.size(); i++)
|
|
{
|
|
int stride = strides[i];
|
|
int num_grid = target_size / stride;
|
|
for (int g1 = 0; g1 < num_grid; g1++)
|
|
{
|
|
for (int g0 = 0; g0 < num_grid; g0++)
|
|
{
|
|
GridAndStride gs;
|
|
gs.grid0 = g0;
|
|
gs.grid1 = g1;
|
|
gs.stride = stride;
|
|
grid_strides.push_back(gs);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static void generate_yolox_proposals(std::vector<GridAndStride> grid_strides, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects)
|
|
{
|
|
const int num_grid = feat_blob.h;
|
|
const int num_class = feat_blob.w - 5;
|
|
const int num_anchors = grid_strides.size();
|
|
|
|
const float* feat_ptr = feat_blob.channel(0);
|
|
for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
|
|
{
|
|
const int grid0 = grid_strides[anchor_idx].grid0;
|
|
const int grid1 = grid_strides[anchor_idx].grid1;
|
|
const int stride = grid_strides[anchor_idx].stride;
|
|
|
|
// yolox/models/yolo_head.py decode logic
|
|
// outputs[..., :2] = (outputs[..., :2] + grids) * strides
|
|
// outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
|
|
float x_center = (feat_ptr[0] + grid0) * stride;
|
|
float y_center = (feat_ptr[1] + grid1) * stride;
|
|
float w = exp(feat_ptr[2]) * stride;
|
|
float h = exp(feat_ptr[3]) * stride;
|
|
float x0 = x_center - w * 0.5f;
|
|
float y0 = y_center - h * 0.5f;
|
|
|
|
float box_objectness = feat_ptr[4];
|
|
for (int class_idx = 0; class_idx < num_class; class_idx++)
|
|
{
|
|
float box_cls_score = feat_ptr[5 + class_idx];
|
|
float box_prob = box_objectness * box_cls_score;
|
|
if (box_prob > prob_threshold)
|
|
{
|
|
Object obj;
|
|
obj.rect.x = x0;
|
|
obj.rect.y = y0;
|
|
obj.rect.width = w;
|
|
obj.rect.height = h;
|
|
obj.label = class_idx;
|
|
obj.prob = box_prob;
|
|
|
|
objects.push_back(obj);
|
|
}
|
|
|
|
} // class loop
|
|
feat_ptr += feat_blob.w;
|
|
|
|
} // point anchor loop
|
|
}
|
|
|
|
static int detect_yolox(const cv::Mat& bgr, std::vector<Object>& objects)
|
|
{
|
|
ncnn::Net yolox;
|
|
|
|
yolox.opt.use_vulkan_compute = true;
|
|
// yolox.opt.use_bf16_storage = true;
|
|
|
|
// Focus in yolov5
|
|
yolox.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator);
|
|
|
|
// original pretrained model from https://github.com/Megvii-BaseDetection/YOLOX
|
|
// ncnn model param: https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s_ncnn.tar.gz
|
|
// NOTE that newest version YOLOX remove normalization of model (minus mean and then div by std),
|
|
// which might cause your model outputs becoming a total mess, plz check carefully.
|
|
yolox.load_param("yolox.param");
|
|
yolox.load_model("yolox.bin");
|
|
|
|
int img_w = bgr.cols;
|
|
int img_h = bgr.rows;
|
|
|
|
int w = img_w;
|
|
int h = img_h;
|
|
float scale = 1.f;
|
|
if (w > h)
|
|
{
|
|
scale = (float)YOLOX_TARGET_SIZE / w;
|
|
w = YOLOX_TARGET_SIZE;
|
|
h = h * scale;
|
|
}
|
|
else
|
|
{
|
|
scale = (float)YOLOX_TARGET_SIZE / h;
|
|
h = YOLOX_TARGET_SIZE;
|
|
w = w * scale;
|
|
}
|
|
ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, img_w, img_h, w, h);
|
|
|
|
// pad to YOLOX_TARGET_SIZE rectangle
|
|
int wpad = YOLOX_TARGET_SIZE - w;
|
|
int hpad = YOLOX_TARGET_SIZE - h;
|
|
ncnn::Mat in_pad;
|
|
// different from yolov5, yolox only pad on bottom and right side,
|
|
// which means users don't need to extra padding info to decode boxes coordinate.
|
|
ncnn::copy_make_border(in, in_pad, 0, hpad, 0, wpad, ncnn::BORDER_CONSTANT, 114.f);
|
|
|
|
ncnn::Extractor ex = yolox.create_extractor();
|
|
|
|
ex.input("images", in_pad);
|
|
|
|
std::vector<Object> proposals;
|
|
|
|
{
|
|
ncnn::Mat out;
|
|
ex.extract("output", out);
|
|
|
|
static const int stride_arr[] = {8, 16, 32}; // might have stride=64 in YOLOX
|
|
std::vector<int> strides(stride_arr, stride_arr + sizeof(stride_arr) / sizeof(stride_arr[0]));
|
|
std::vector<GridAndStride> grid_strides;
|
|
generate_grids_and_stride(YOLOX_TARGET_SIZE, strides, grid_strides);
|
|
generate_yolox_proposals(grid_strides, out, YOLOX_CONF_THRESH, proposals);
|
|
}
|
|
|
|
// sort all proposals by score from highest to lowest
|
|
qsort_descent_inplace(proposals);
|
|
|
|
// apply nms with nms_threshold
|
|
std::vector<int> picked;
|
|
nms_sorted_bboxes(proposals, picked, YOLOX_NMS_THRESH);
|
|
|
|
int count = picked.size();
|
|
|
|
objects.resize(count);
|
|
for (int i = 0; i < count; i++)
|
|
{
|
|
objects[i] = proposals[picked[i]];
|
|
|
|
// adjust offset to original unpadded
|
|
float x0 = (objects[i].rect.x) / scale;
|
|
float y0 = (objects[i].rect.y) / scale;
|
|
float x1 = (objects[i].rect.x + objects[i].rect.width) / scale;
|
|
float y1 = (objects[i].rect.y + objects[i].rect.height) / scale;
|
|
|
|
// clip
|
|
x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
|
|
y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
|
|
x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
|
|
y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);
|
|
|
|
objects[i].rect.x = x0;
|
|
objects[i].rect.y = y0;
|
|
objects[i].rect.width = x1 - x0;
|
|
objects[i].rect.height = y1 - y0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects)
|
|
{
|
|
static const char* class_names[] = {
|
|
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
|
|
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
|
|
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
|
|
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
|
|
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
|
|
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
|
|
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
|
|
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
|
|
"hair drier", "toothbrush"
|
|
};
|
|
|
|
cv::Mat image = bgr.clone();
|
|
|
|
for (size_t i = 0; i < objects.size(); i++)
|
|
{
|
|
const Object& obj = objects[i];
|
|
|
|
fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
|
|
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
|
|
|
|
cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));
|
|
|
|
char text[256];
|
|
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
|
|
|
|
int baseLine = 0;
|
|
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
|
|
|
|
int x = obj.rect.x;
|
|
int y = obj.rect.y - label_size.height - baseLine;
|
|
if (y < 0)
|
|
y = 0;
|
|
if (x + label_size.width > image.cols)
|
|
x = image.cols - label_size.width;
|
|
|
|
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
|
|
cv::Scalar(255, 255, 255), -1);
|
|
|
|
cv::putText(image, text, cv::Point(x, y + label_size.height),
|
|
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
|
|
}
|
|
|
|
cv::imshow("image", image);
|
|
cv::waitKey(0);
|
|
}
|
|
|
|
int main(int argc, char** argv)
|
|
{
|
|
if (argc != 2)
|
|
{
|
|
fprintf(stderr, "Usage: %s [imagepath]\n", argv[0]);
|
|
return -1;
|
|
}
|
|
|
|
const char* imagepath = argv[1];
|
|
|
|
cv::Mat m = cv::imread(imagepath, 1);
|
|
if (m.empty())
|
|
{
|
|
fprintf(stderr, "cv::imread %s failed\n", imagepath);
|
|
return -1;
|
|
}
|
|
|
|
std::vector<Object> objects;
|
|
detect_yolox(m, objects);
|
|
|
|
draw_objects(m, objects);
|
|
|
|
return 0;
|
|
}
|