feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,410 @@
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GAPI)
#include <chrono>
#include <iomanip>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/gapi.hpp"
#include "opencv2/gapi/core.hpp"
#include "opencv2/gapi/imgproc.hpp"
#include "opencv2/gapi/infer.hpp"
#include "opencv2/gapi/infer/ie.hpp"
#include "opencv2/gapi/cpu/gcpukernel.hpp"
#include "opencv2/gapi/streaming/cap.hpp"
namespace {
const std::string about =
"This is an OpenCV-based version of Security Barrier Camera example";
const std::string keys =
"{ h help | | print this help message }"
"{ input | | Path to an input video file }"
"{ fdm | | IE face detection model IR }"
"{ fdw | | IE face detection model weights }"
"{ fdd | | IE face detection device }"
"{ agem | | IE age/gender recognition model IR }"
"{ agew | | IE age/gender recognition model weights }"
"{ aged | | IE age/gender recognition model device }"
"{ emom | | IE emotions recognition model IR }"
"{ emow | | IE emotions recognition model weights }"
"{ emod | | IE emotions recognition model device }"
"{ pure | | When set, no output is displayed. Useful for benchmarking }"
"{ ser | | Run serially (no pipelining involved). Useful for benchmarking }";
struct Avg {
struct Elapsed {
explicit Elapsed(double ms) : ss(ms/1000.), mm(static_cast<int>(ss)/60) {}
const double ss;
const int mm;
};
using MS = std::chrono::duration<double, std::ratio<1, 1000>>;
using TS = std::chrono::time_point<std::chrono::high_resolution_clock>;
TS started;
void start() { started = now(); }
TS now() const { return std::chrono::high_resolution_clock::now(); }
double tick() const { return std::chrono::duration_cast<MS>(now() - started).count(); }
Elapsed elapsed() const { return Elapsed{tick()}; }
double fps(std::size_t n) const { return static_cast<double>(n) / (tick() / 1000.); }
};
std::ostream& operator<<(std::ostream &os, const Avg::Elapsed &e) {
os << e.mm << ':' << (e.ss - 60*e.mm);
return os;
}
} // namespace
namespace custom {
// Describe networks we use in our program.
// In G-API, topologies act like "operations". Here we define our
// topologies as operations which have inputs and outputs.
// Every network requires three parameters to define:
// 1) Network's TYPE name - this TYPE is then used as a template
// parameter to generic functions like cv::gapi::infer<>(),
// and is used to define network's configuration (per-backend).
// 2) Network's SIGNATURE - a std::function<>-like record which defines
// networks' input and output parameters (its API)
// 3) Network's IDENTIFIER - a string defining what the network is.
// Must be unique within the pipeline.
// Note: these definitions are neutral to _how_ the networks are
// executed. The _how_ is defined at graph compilation stage (via parameters),
// not on the graph construction stage.
//! [G_API_NET]
// Face detector: takes one Mat, returns another Mat
G_API_NET(Faces, <cv::GMat(cv::GMat)>, "face-detector");
// Age/Gender recognition - takes one Mat, returns two:
// one for Age and one for Gender. In G-API, multiple-return-value operations
// are defined using std::tuple<>.
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "age-gender-recoginition");
// Emotion recognition - takes one Mat, returns another.
G_API_NET(Emotions, <cv::GMat(cv::GMat)>, "emotions-recognition");
//! [G_API_NET]
//! [Postproc]
// SSD Post-processing function - this is not a network but a kernel.
// The kernel body is declared separately, this is just an interface.
// This operation takes two Mats (detections and the source image),
// and returns a vector of ROI (filtered by a default threshold).
// Threshold (or a class to select) may become a parameter, but since
// this kernel is custom, it doesn't make a lot of sense.
G_API_OP(PostProc, <cv::GArray<cv::Rect>(cv::GMat, cv::GMat)>, "custom.fd_postproc") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) {
// This function is required for G-API engine to figure out
// what the output format is, given the input parameters.
// Since the output is an array (with a specific type),
// there's nothing to describe.
return cv::empty_array_desc();
}
};
// OpenCV-based implementation of the above kernel.
GAPI_OCV_KERNEL(OCVPostProc, PostProc) {
static void run(const cv::Mat &in_ssd_result,
const cv::Mat &in_frame,
std::vector<cv::Rect> &out_faces) {
const int MAX_PROPOSALS = 200;
const int OBJECT_SIZE = 7;
const cv::Size upscale = in_frame.size();
const cv::Rect surface({0,0}, upscale);
out_faces.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0]; // batch id
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
if (image_id < 0.f) { // indicates end of detections
break;
}
if (confidence < 0.5f) { // a hard-coded snapshot
continue;
}
// Convert floating-point coordinates to the absolute image
// frame coordinates; clip by the source image boundaries.
cv::Rect rc;
rc.x = static_cast<int>(rc_left * upscale.width);
rc.y = static_cast<int>(rc_top * upscale.height);
rc.width = static_cast<int>(rc_right * upscale.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
out_faces.push_back(rc & surface);
}
}
};
//! [Postproc]
} // namespace custom
namespace labels {
const std::string genders[] = {
"Female", "Male"
};
const std::string emotions[] = {
"neutral", "happy", "sad", "surprise", "anger"
};
namespace {
void DrawResults(cv::Mat &frame,
const std::vector<cv::Rect> &faces,
const std::vector<cv::Mat> &out_ages,
const std::vector<cv::Mat> &out_genders,
const std::vector<cv::Mat> &out_emotions) {
CV_Assert(faces.size() == out_ages.size());
CV_Assert(faces.size() == out_genders.size());
CV_Assert(faces.size() == out_emotions.size());
for (auto it = faces.begin(); it != faces.end(); ++it) {
const auto idx = std::distance(faces.begin(), it);
const auto &rc = *it;
const float *ages_data = out_ages[idx].ptr<float>();
const float *genders_data = out_genders[idx].ptr<float>();
const float *emotions_data = out_emotions[idx].ptr<float>();
const auto gen_id = std::max_element(genders_data, genders_data + 2) - genders_data;
const auto emo_id = std::max_element(emotions_data, emotions_data + 5) - emotions_data;
std::stringstream ss;
ss << static_cast<int>(ages_data[0]*100)
<< ' '
<< genders[gen_id]
<< ' '
<< emotions[emo_id];
const int ATTRIB_OFFSET = 15;
cv::rectangle(frame, rc, {0, 255, 0}, 4);
cv::putText(frame, ss.str(),
cv::Point(rc.x, rc.y - ATTRIB_OFFSET),
cv::FONT_HERSHEY_COMPLEX_SMALL,
1,
cv::Scalar(0, 0, 255));
}
}
void DrawFPS(cv::Mat &frame, std::size_t n, double fps) {
std::ostringstream out;
out << "FRAME " << n << ": "
<< std::fixed << std::setprecision(2) << fps
<< " FPS (AVG)";
cv::putText(frame, out.str(),
cv::Point(0, frame.rows),
cv::FONT_HERSHEY_SIMPLEX,
1,
cv::Scalar(0, 255, 0),
2);
}
} // anonymous namespace
} // namespace labels
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
cmd.about(about);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
const std::string input = cmd.get<std::string>("input");
const bool no_show = cmd.get<bool>("pure");
const bool be_serial = cmd.get<bool>("ser");
// Express our processing pipeline. Lambda-based constructor
// is used to keep all temporary objects in a dedicated scope.
//! [GComputation]
cv::GComputation pp([]() {
// Declare an empty GMat - the beginning of the pipeline.
cv::GMat in;
// Run face detection on the input frame. Result is a single GMat,
// internally representing an 1x1x200x7 SSD output.
// This is a single-patch version of infer:
// - Inference is running on the whole input image;
// - Image is converted and resized to the network's expected format
// automatically.
cv::GMat detections = cv::gapi::infer<custom::Faces>(in);
// Parse SSD output to a list of ROI (rectangles) using
// a custom kernel. Note: parsing SSD may become a "standard" kernel.
cv::GArray<cv::Rect> faces = custom::PostProc::on(detections, in);
// Now run Age/Gender model on every detected face. This model has two
// outputs (for age and gender respectively).
// A special ROI-list-oriented form of infer<>() is used here:
// - First input argument is the list of rectangles to process,
// - Second one is the image where to take ROI from;
// - Crop/Resize/Layout conversion happens automatically for every image patch
// from the list
// - Inference results are also returned in form of list (GArray<>)
// - Since there're two outputs, infer<> return two arrays (via std::tuple).
cv::GArray<cv::GMat> ages;
cv::GArray<cv::GMat> genders;
std::tie(ages, genders) = cv::gapi::infer<custom::AgeGender>(faces, in);
// Recognize emotions on every face.
// ROI-list-oriented infer<>() is used here as well.
// Since custom::Emotions network produce a single output, only one
// GArray<> is returned here.
cv::GArray<cv::GMat> emotions = cv::gapi::infer<custom::Emotions>(faces, in);
// Return the decoded frame as a result as well.
// Input matrix can't be specified as output one, so use copy() here
// (this copy will be optimized out in the future).
cv::GMat frame = cv::gapi::copy(in);
// Now specify the computation's boundaries - our pipeline consumes
// one images and produces five outputs.
return cv::GComputation(cv::GIn(in),
cv::GOut(frame, faces, ages, genders, emotions));
});
//! [GComputation]
// Note: it might be very useful to have dimensions loaded at this point!
// After our computation is defined, specify how it should be executed.
// Execution is defined by inference backends and kernel backends we use to
// compile the pipeline (it is a different step).
// Declare IE parameters for FaceDetection network. Note here custom::Face
// is the type name we specified in GAPI_NETWORK() previously.
// cv::gapi::ie::Params<> is a generic configuration description which is
// specialized to every particular network we use.
//
// OpenCV DNN backend will have its own parmater structure with settings
// relevant to OpenCV DNN module. Same applies to other possible inference
// backends...
//! [Param_Cfg]
auto det_net = cv::gapi::ie::Params<custom::Faces> {
cmd.get<std::string>("fdm"), // read cmd args: path to topology IR
cmd.get<std::string>("fdw"), // read cmd args: path to weights
cmd.get<std::string>("fdd"), // read cmd args: device specifier
};
auto age_net = cv::gapi::ie::Params<custom::AgeGender> {
cmd.get<std::string>("agem"), // read cmd args: path to topology IR
cmd.get<std::string>("agew"), // read cmd args: path to weights
cmd.get<std::string>("aged"), // read cmd args: device specifier
}.cfgOutputLayers({ "age_conv3", "prob" });
auto emo_net = cv::gapi::ie::Params<custom::Emotions> {
cmd.get<std::string>("emom"), // read cmd args: path to topology IR
cmd.get<std::string>("emow"), // read cmd args: path to weights
cmd.get<std::string>("emod"), // read cmd args: device specifier
};
//! [Param_Cfg]
//! [Compile]
// Form a kernel package (with a single OpenCV-based implementation of our
// post-processing) and a network package (holding our three networks).
auto kernels = cv::gapi::kernels<custom::OCVPostProc>();
auto networks = cv::gapi::networks(det_net, age_net, emo_net);
// Compile our pipeline and pass our kernels & networks as
// parameters. This is the place where G-API learns which
// networks & kernels we're actually operating with (the graph
// description itself known nothing about that).
auto cc = pp.compileStreaming(cv::compile_args(kernels, networks));
//! [Compile]
Avg avg;
std::size_t frames = 0u; // Frame counter (not produced by the graph)
std::cout << "Reading " << input << std::endl;
// Duplicate huge portions of the code in if/else branches in the sake of
// better documentation snippets
if (!be_serial) {
//! [Source]
auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input);
cc.setSource(cv::gin(in_src));
//! [Source]
avg.start();
//! [Run]
// After data source is specified, start the execution
cc.start();
// Declare data objects we will be receiving from the pipeline.
cv::Mat frame; // The captured frame itself
std::vector<cv::Rect> faces; // Array of detected faces
std::vector<cv::Mat> out_ages; // Array of inferred ages (one blob per face)
std::vector<cv::Mat> out_genders; // Array of inferred genders (one blob per face)
std::vector<cv::Mat> out_emotions; // Array of classified emotions (one blob per face)
// Implement different execution policies depending on the display option
// for the best performance.
while (cc.running()) {
auto out_vector = cv::gout(frame, faces, out_ages, out_genders, out_emotions);
if (no_show) {
// This is purely a video processing. No need to balance
// with UI rendering. Use a blocking pull() to obtain
// data. Break the loop if the stream is over.
if (!cc.pull(std::move(out_vector)))
break;
} else if (!cc.try_pull(std::move(out_vector))) {
// Use a non-blocking try_pull() to obtain data.
// If there's no data, let UI refresh (and handle keypress)
if (cv::waitKey(1) >= 0) break;
else continue;
}
// At this point we have data for sure (obtained in either
// blocking or non-blocking way).
frames++;
labels::DrawResults(frame, faces, out_ages, out_genders, out_emotions);
labels::DrawFPS(frame, frames, avg.fps(frames));
if (!no_show) cv::imshow("Out", frame);
}
//! [Run]
} else { // (serial flag)
//! [Run_Serial]
cv::VideoCapture cap(input);
cv::Mat in_frame, frame; // The captured frame itself
std::vector<cv::Rect> faces; // Array of detected faces
std::vector<cv::Mat> out_ages; // Array of inferred ages (one blob per face)
std::vector<cv::Mat> out_genders; // Array of inferred genders (one blob per face)
std::vector<cv::Mat> out_emotions; // Array of classified emotions (one blob per face)
while (cap.read(in_frame)) {
pp.apply(cv::gin(in_frame),
cv::gout(frame, faces, out_ages, out_genders, out_emotions),
cv::compile_args(kernels, networks));
labels::DrawResults(frame, faces, out_ages, out_genders, out_emotions);
frames++;
if (frames == 1u) {
// Start timer only after 1st frame processed -- compilation
// happens on-the-fly here
avg.start();
} else {
// Measurfe & draw FPS for all other frames
labels::DrawFPS(frame, frames, avg.fps(frames-1));
}
if (!no_show) {
cv::imshow("Out", frame);
if (cv::waitKey(1) >= 0) break;
}
}
//! [Run_Serial]
}
std::cout << "Processed " << frames << " frames in " << avg.elapsed()
<< " (" << avg.fps(frames) << " FPS)" << std::endl;
return 0;
}
#else
#include <iostream>
int main()
{
std::cerr << "This tutorial code requires G-API module "
"with Inference Engine backend to run"
<< std::endl;
return 1;
}
#endif // HAVE_OPECV_GAPI

View File

@ -0,0 +1,905 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level
// directory of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019 Intel Corporation
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GAPI)
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/core.hpp>
#include <opencv2/gapi/imgproc.hpp>
#include <opencv2/gapi/fluid/core.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp> // windows
namespace config
{
constexpr char kWinFaceBeautification[] = "FaceBeautificator";
constexpr char kWinInput[] = "Input";
constexpr char kParserAbout[] =
"Use this script to run the face beautification algorithm with G-API.";
constexpr char kParserOptions[] =
"{ help h || print the help message. }"
"{ facepath f || a path to a Face detection model file (.xml).}"
"{ facedevice |GPU| the face detection computation device.}"
"{ landmpath l || a path to a Landmarks detection model file (.xml).}"
"{ landmdevice |CPU| the landmarks detection computation device.}"
"{ input i || a path to an input. Skip to capture from a camera.}"
"{ boxes b |false| set true to draw face Boxes in the \"Input\" window.}"
"{ landmarks m |false| set true to draw landMarks in the \"Input\" window.}"
"{ streaming s |true| set false to disable stream pipelining.}"
"{ performance p |false| set true to disable output displaying.}";
const cv::Scalar kClrWhite (255, 255, 255);
const cv::Scalar kClrGreen ( 0, 255, 0);
const cv::Scalar kClrYellow( 0, 255, 255);
constexpr float kConfThresh = 0.7f;
const cv::Size kGKernelSize(5, 5);
constexpr double kGSigma = 0.0;
constexpr int kBSize = 9;
constexpr double kBSigmaCol = 30.0;
constexpr double kBSigmaSp = 30.0;
constexpr int kUnshSigma = 3;
constexpr float kUnshStrength = 0.7f;
constexpr int kAngDelta = 1;
constexpr bool kClosedLine = true;
} // namespace config
namespace
{
//! [vec_ROI]
using VectorROI = std::vector<cv::Rect>;
//! [vec_ROI]
using GArrayROI = cv::GArray<cv::Rect>;
using Contour = std::vector<cv::Point>;
using Landmarks = std::vector<cv::Point>;
// Wrapper function
template<typename Tp> inline int toIntRounded(const Tp x)
{
return static_cast<int>(std::lround(x));
}
//! [toDbl]
template<typename Tp> inline double toDouble(const Tp x)
{
return static_cast<double>(x);
}
//! [toDbl]
struct Avg {
struct Elapsed {
explicit Elapsed(double ms) : ss(ms / 1000.),
mm(toIntRounded(ss / 60)) {}
const double ss;
const int mm;
};
using MS = std::chrono::duration<double, std::ratio<1, 1000>>;
using TS = std::chrono::time_point<std::chrono::high_resolution_clock>;
TS started;
void start() { started = now(); }
TS now() const { return std::chrono::high_resolution_clock::now(); }
double tick() const { return std::chrono::duration_cast<MS>(now() - started).count(); }
Elapsed elapsed() const { return Elapsed{tick()}; }
double fps(std::size_t n) const { return static_cast<double>(n) / (tick() / 1000.); }
};
std::ostream& operator<<(std::ostream &os, const Avg::Elapsed &e) {
os << e.mm << ':' << (e.ss - 60*e.mm);
return os;
}
std::string getWeightsPath(const std::string &mdlXMLPath) // mdlXMLPath =
// "The/Full/Path.xml"
{
size_t size = mdlXMLPath.size();
CV_Assert(mdlXMLPath.substr(size - 4, size) // The last 4 symbols
== ".xml"); // must be ".xml"
std::string mdlBinPath(mdlXMLPath);
return mdlBinPath.replace(size - 3, 3, "bin"); // return
// "The/Full/Path.bin"
}
} // anonymous namespace
namespace custom
{
using TplPtsFaceElements_Jaw = std::tuple<cv::GArray<Landmarks>,
cv::GArray<Contour>>;
// Wrapper-functions
inline int getLineInclinationAngleDegrees(const cv::Point &ptLeft,
const cv::Point &ptRight);
inline Contour getForeheadEllipse(const cv::Point &ptJawLeft,
const cv::Point &ptJawRight,
const cv::Point &ptJawMiddle);
inline Contour getEyeEllipse(const cv::Point &ptLeft,
const cv::Point &ptRight);
inline Contour getPatchedEllipse(const cv::Point &ptLeft,
const cv::Point &ptRight,
const cv::Point &ptUp,
const cv::Point &ptDown);
// Networks
//! [net_decl]
G_API_NET(FaceDetector, <cv::GMat(cv::GMat)>, "face_detector");
G_API_NET(LandmDetector, <cv::GMat(cv::GMat)>, "landm_detector");
//! [net_decl]
// Function kernels
G_TYPED_KERNEL(GBilatFilter, <cv::GMat(cv::GMat,int,double,double)>,
"custom.faceb12n.bilateralFilter")
{
static cv::GMatDesc outMeta(cv::GMatDesc in, int,double,double)
{
return in;
}
};
G_TYPED_KERNEL(GLaplacian, <cv::GMat(cv::GMat,int)>,
"custom.faceb12n.Laplacian")
{
static cv::GMatDesc outMeta(cv::GMatDesc in, int)
{
return in;
}
};
G_TYPED_KERNEL(GFillPolyGContours, <cv::GMat(cv::GMat,cv::GArray<Contour>)>,
"custom.faceb12n.fillPolyGContours")
{
static cv::GMatDesc outMeta(cv::GMatDesc in, cv::GArrayDesc)
{
return in.withType(CV_8U, 1);
}
};
G_TYPED_KERNEL(GPolyLines, <cv::GMat(cv::GMat,cv::GArray<Contour>,bool,
cv::Scalar)>,
"custom.faceb12n.polyLines")
{
static cv::GMatDesc outMeta(cv::GMatDesc in, cv::GArrayDesc,bool,cv::Scalar)
{
return in;
}
};
G_TYPED_KERNEL(GRectangle, <cv::GMat(cv::GMat,GArrayROI,cv::Scalar)>,
"custom.faceb12n.rectangle")
{
static cv::GMatDesc outMeta(cv::GMatDesc in, cv::GArrayDesc,cv::Scalar)
{
return in;
}
};
G_TYPED_KERNEL(GFacePostProc, <GArrayROI(cv::GMat,cv::GMat,float)>,
"custom.faceb12n.faceDetectPostProc")
{
static cv::GArrayDesc outMeta(const cv::GMatDesc&,const cv::GMatDesc&,float)
{
return cv::empty_array_desc();
}
};
G_TYPED_KERNEL_M(GLandmPostProc, <TplPtsFaceElements_Jaw(cv::GArray<cv::GMat>,
GArrayROI)>,
"custom.faceb12n.landmDetectPostProc")
{
static std::tuple<cv::GArrayDesc,cv::GArrayDesc> outMeta(
const cv::GArrayDesc&,const cv::GArrayDesc&)
{
return std::make_tuple(cv::empty_array_desc(), cv::empty_array_desc());
}
};
//! [kern_m_decl]
using TplFaces_FaceElements = std::tuple<cv::GArray<Contour>, cv::GArray<Contour>>;
G_TYPED_KERNEL_M(GGetContours, <TplFaces_FaceElements (cv::GArray<Landmarks>, cv::GArray<Contour>)>,
"custom.faceb12n.getContours")
{
static std::tuple<cv::GArrayDesc,cv::GArrayDesc> outMeta(const cv::GArrayDesc&,const cv::GArrayDesc&)
{
return std::make_tuple(cv::empty_array_desc(), cv::empty_array_desc());
}
};
//! [kern_m_decl]
// OCV_Kernels
// This kernel applies Bilateral filter to an input src with default
// "cv::bilateralFilter" border argument
GAPI_OCV_KERNEL(GCPUBilateralFilter, custom::GBilatFilter)
{
static void run(const cv::Mat &src,
const int diameter,
const double sigmaColor,
const double sigmaSpace,
cv::Mat &out)
{
cv::bilateralFilter(src, out, diameter, sigmaColor, sigmaSpace);
}
};
// This kernel applies Laplace operator to an input src with default
// "cv::Laplacian" arguments
GAPI_OCV_KERNEL(GCPULaplacian, custom::GLaplacian)
{
static void run(const cv::Mat &src,
const int ddepth,
cv::Mat &out)
{
cv::Laplacian(src, out, ddepth);
}
};
// This kernel draws given white filled contours "cnts" on a clear Mat "out"
// (defined by a Scalar(0)) with standard "cv::fillPoly" arguments.
// It should be used to create a mask.
// The input Mat seems unused inside the function "run", but it is used deeper
// in the kernel to define an output size.
GAPI_OCV_KERNEL(GCPUFillPolyGContours, custom::GFillPolyGContours)
{
static void run(const cv::Mat &,
const std::vector<Contour> &cnts,
cv::Mat &out)
{
out = cv::Scalar(0);
cv::fillPoly(out, cnts, config::kClrWhite);
}
};
// This kernel draws given contours on an input src with default "cv::polylines"
// arguments
GAPI_OCV_KERNEL(GCPUPolyLines, custom::GPolyLines)
{
static void run(const cv::Mat &src,
const std::vector<Contour> &cnts,
const bool isClosed,
const cv::Scalar &color,
cv::Mat &out)
{
src.copyTo(out);
cv::polylines(out, cnts, isClosed, color);
}
};
// This kernel draws given rectangles on an input src with default
// "cv::rectangle" arguments
GAPI_OCV_KERNEL(GCPURectangle, custom::GRectangle)
{
static void run(const cv::Mat &src,
const VectorROI &vctFaceBoxes,
const cv::Scalar &color,
cv::Mat &out)
{
src.copyTo(out);
for (const cv::Rect &box : vctFaceBoxes)
{
cv::rectangle(out, box, color);
}
}
};
// A face detector outputs a blob with the shape: [1, 1, N, 7], where N is
// the number of detected bounding boxes. Structure of an output for every
// detected face is the following:
// [image_id, label, conf, x_min, y_min, x_max, y_max], all the seven elements
// are floating point. For more details please visit:
// https://github.com/opencv/open_model_zoo/blob/master/intel_models/face-detection-adas-0001
// This kernel is the face detection output blob parsing that returns a vector
// of detected faces' rects:
//! [fd_pp]
GAPI_OCV_KERNEL(GCPUFacePostProc, GFacePostProc)
{
static void run(const cv::Mat &inDetectResult,
const cv::Mat &inFrame,
const float faceConfThreshold,
VectorROI &outFaces)
{
const int kObjectSize = 7;
const int imgCols = inFrame.size().width;
const int imgRows = inFrame.size().height;
const cv::Rect borders({0, 0}, inFrame.size());
outFaces.clear();
const int numOfDetections = inDetectResult.size[2];
const float *data = inDetectResult.ptr<float>();
for (int i = 0; i < numOfDetections; i++)
{
const float faceId = data[i * kObjectSize + 0];
if (faceId < 0.f) // indicates the end of detections
{
break;
}
const float faceConfidence = data[i * kObjectSize + 2];
// We can cut detections by the `conf` field
// to avoid mistakes of the detector.
if (faceConfidence > faceConfThreshold)
{
const float left = data[i * kObjectSize + 3];
const float top = data[i * kObjectSize + 4];
const float right = data[i * kObjectSize + 5];
const float bottom = data[i * kObjectSize + 6];
// These are normalized coordinates and are between 0 and 1;
// to get the real pixel coordinates we should multiply it by
// the image sizes respectively to the directions:
cv::Point tl(toIntRounded(left * imgCols),
toIntRounded(top * imgRows));
cv::Point br(toIntRounded(right * imgCols),
toIntRounded(bottom * imgRows));
outFaces.push_back(cv::Rect(tl, br) & borders);
}
}
}
};
//! [fd_pp]
// This kernel is the facial landmarks detection output Mat parsing for every
// detected face; returns a tuple containing a vector of vectors of
// face elements' Points and a vector of vectors of jaw's Points:
// There are 35 landmarks given by the default detector for each face
// in a frame; the first 18 of them are face elements (eyes, eyebrows,
// a nose, a mouth) and the last 17 - a jaw contour. The detector gives
// floating point values for landmarks' normed coordinates relatively
// to an input ROI (not the original frame).
// For more details please visit:
// https://github.com/opencv/open_model_zoo/blob/master/intel_models/facial-landmarks-35-adas-0002
GAPI_OCV_KERNEL(GCPULandmPostProc, GLandmPostProc)
{
static void run(const std::vector<cv::Mat> &vctDetectResults,
const VectorROI &vctRects,
std::vector<Landmarks> &vctPtsFaceElems,
std::vector<Contour> &vctCntJaw)
{
static constexpr int kNumFaceElems = 18;
static constexpr int kNumTotal = 35;
const size_t numFaces = vctRects.size();
CV_Assert(vctPtsFaceElems.size() == 0ul);
CV_Assert(vctCntJaw.size() == 0ul);
vctPtsFaceElems.reserve(numFaces);
vctCntJaw.reserve(numFaces);
Landmarks ptsFaceElems;
Contour cntJaw;
ptsFaceElems.reserve(kNumFaceElems);
cntJaw.reserve(kNumTotal - kNumFaceElems);
for (size_t i = 0; i < numFaces; i++)
{
const float *data = vctDetectResults[i].ptr<float>();
// The face elements points:
ptsFaceElems.clear();
for (int j = 0; j < kNumFaceElems * 2; j += 2)
{
cv::Point pt = cv::Point(toIntRounded(data[j] * vctRects[i].width),
toIntRounded(data[j+1] * vctRects[i].height)) + vctRects[i].tl();
ptsFaceElems.push_back(pt);
}
vctPtsFaceElems.push_back(ptsFaceElems);
// The jaw contour points:
cntJaw.clear();
for(int j = kNumFaceElems * 2; j < kNumTotal * 2; j += 2)
{
cv::Point pt = cv::Point(toIntRounded(data[j] * vctRects[i].width),
toIntRounded(data[j+1] * vctRects[i].height)) + vctRects[i].tl();
cntJaw.push_back(pt);
}
vctCntJaw.push_back(cntJaw);
}
}
};
// This kernel is the facial landmarks detection post-processing for every face
// detected before; output is a tuple of vectors of detected face contours and
// facial elements contours:
//! [ld_pp_cnts]
//! [kern_m_impl]
GAPI_OCV_KERNEL(GCPUGetContours, GGetContours)
{
static void run(const std::vector<Landmarks> &vctPtsFaceElems, // 18 landmarks of the facial elements
const std::vector<Contour> &vctCntJaw, // 17 landmarks of a jaw
std::vector<Contour> &vctElemsContours,
std::vector<Contour> &vctFaceContours)
{
//! [kern_m_impl]
size_t numFaces = vctCntJaw.size();
CV_Assert(numFaces == vctPtsFaceElems.size());
CV_Assert(vctElemsContours.size() == 0ul);
CV_Assert(vctFaceContours.size() == 0ul);
// vctFaceElemsContours will store all the face elements' contours found
// in an input image, namely 4 elements (two eyes, nose, mouth) for every detected face:
vctElemsContours.reserve(numFaces * 4);
// vctFaceElemsContours will store all the faces' contours found in an input image:
vctFaceContours.reserve(numFaces);
Contour cntFace, cntLeftEye, cntRightEye, cntNose, cntMouth;
cntNose.reserve(4);
for (size_t i = 0ul; i < numFaces; i++)
{
// The face elements contours
// A left eye:
// Approximating the lower eye contour by half-ellipse (using eye points) and storing in cntLeftEye:
cntLeftEye = getEyeEllipse(vctPtsFaceElems[i][1], vctPtsFaceElems[i][0]);
// Pushing the left eyebrow clock-wise:
cntLeftEye.insert(cntLeftEye.end(), {vctPtsFaceElems[i][12], vctPtsFaceElems[i][13],
vctPtsFaceElems[i][14]});
// A right eye:
// Approximating the lower eye contour by half-ellipse (using eye points) and storing in vctRightEye:
cntRightEye = getEyeEllipse(vctPtsFaceElems[i][2], vctPtsFaceElems[i][3]);
// Pushing the right eyebrow clock-wise:
cntRightEye.insert(cntRightEye.end(), {vctPtsFaceElems[i][15], vctPtsFaceElems[i][16],
vctPtsFaceElems[i][17]});
// A nose:
// Storing the nose points clock-wise
cntNose.clear();
cntNose.insert(cntNose.end(), {vctPtsFaceElems[i][4], vctPtsFaceElems[i][7],
vctPtsFaceElems[i][5], vctPtsFaceElems[i][6]});
// A mouth:
// Approximating the mouth contour by two half-ellipses (using mouth points) and storing in vctMouth:
cntMouth = getPatchedEllipse(vctPtsFaceElems[i][8], vctPtsFaceElems[i][9],
vctPtsFaceElems[i][10], vctPtsFaceElems[i][11]);
// Storing all the elements in a vector:
vctElemsContours.insert(vctElemsContours.end(), {cntLeftEye, cntRightEye, cntNose, cntMouth});
// The face contour:
// Approximating the forehead contour by half-ellipse (using jaw points) and storing in vctFace:
cntFace = getForeheadEllipse(vctCntJaw[i][0], vctCntJaw[i][16], vctCntJaw[i][8]);
// The ellipse is drawn clock-wise, but jaw contour points goes vice versa, so it's necessary to push
// cntJaw from the end to the begin using a reverse iterator:
std::copy(vctCntJaw[i].crbegin(), vctCntJaw[i].crend(), std::back_inserter(cntFace));
// Storing the face contour in another vector:
vctFaceContours.push_back(cntFace);
}
}
};
//! [ld_pp_cnts]
// GAPI subgraph functions
inline cv::GMat unsharpMask(const cv::GMat &src,
const int sigma,
const float strength);
inline cv::GMat mask3C(const cv::GMat &src,
const cv::GMat &mask);
} // namespace custom
// Functions implementation:
// Returns an angle (in degrees) between a line given by two Points and
// the horison. Note that the result depends on the arguments order:
//! [ld_pp_incl]
inline int custom::getLineInclinationAngleDegrees(const cv::Point &ptLeft, const cv::Point &ptRight)
{
const cv::Point residual = ptRight - ptLeft;
if (residual.y == 0 && residual.x == 0)
return 0;
else
return toIntRounded(atan2(toDouble(residual.y), toDouble(residual.x)) * 180.0 / CV_PI);
}
//! [ld_pp_incl]
// Approximates a forehead by half-ellipse using jaw points and some geometry
// and then returns points of the contour; "capacity" is used to reserve enough
// memory as there will be other points inserted.
//! [ld_pp_fhd]
inline Contour custom::getForeheadEllipse(const cv::Point &ptJawLeft,
const cv::Point &ptJawRight,
const cv::Point &ptJawLower)
{
Contour cntForehead;
// The point amid the top two points of a jaw:
const cv::Point ptFaceCenter((ptJawLeft + ptJawRight) / 2);
// This will be the center of the ellipse.
// The angle between the jaw and the vertical:
const int angFace = getLineInclinationAngleDegrees(ptJawLeft, ptJawRight);
// This will be the inclination of the ellipse
// Counting the half-axis of the ellipse:
const double jawWidth = cv::norm(ptJawLeft - ptJawRight);
// A forehead width equals the jaw width, and we need a half-axis:
const int axisX = toIntRounded(jawWidth / 2.0);
const double jawHeight = cv::norm(ptFaceCenter - ptJawLower);
// According to research, in average a forehead is approximately 2/3 of
// a jaw:
const int axisY = toIntRounded(jawHeight * 2 / 3.0);
// We need the upper part of an ellipse:
static constexpr int kAngForeheadStart = 180;
static constexpr int kAngForeheadEnd = 360;
cv::ellipse2Poly(ptFaceCenter, cv::Size(axisX, axisY), angFace, kAngForeheadStart, kAngForeheadEnd,
config::kAngDelta, cntForehead);
return cntForehead;
}
//! [ld_pp_fhd]
// Approximates the lower eye contour by half-ellipse using eye points and some
// geometry and then returns points of the contour.
//! [ld_pp_eye]
inline Contour custom::getEyeEllipse(const cv::Point &ptLeft, const cv::Point &ptRight)
{
Contour cntEyeBottom;
const cv::Point ptEyeCenter((ptRight + ptLeft) / 2);
const int angle = getLineInclinationAngleDegrees(ptLeft, ptRight);
const int axisX = toIntRounded(cv::norm(ptRight - ptLeft) / 2.0);
// According to research, in average a Y axis of an eye is approximately
// 1/3 of an X one.
const int axisY = axisX / 3;
// We need the lower part of an ellipse:
static constexpr int kAngEyeStart = 0;
static constexpr int kAngEyeEnd = 180;
cv::ellipse2Poly(ptEyeCenter, cv::Size(axisX, axisY), angle, kAngEyeStart, kAngEyeEnd, config::kAngDelta,
cntEyeBottom);
return cntEyeBottom;
}
//! [ld_pp_eye]
//This function approximates an object (a mouth) by two half-ellipses using
// 4 points of the axes' ends and then returns points of the contour:
inline Contour custom::getPatchedEllipse(const cv::Point &ptLeft,
const cv::Point &ptRight,
const cv::Point &ptUp,
const cv::Point &ptDown)
{
// Shared characteristics for both half-ellipses:
const cv::Point ptMouthCenter((ptLeft + ptRight) / 2);
const int angMouth = getLineInclinationAngleDegrees(ptLeft, ptRight);
const int axisX = toIntRounded(cv::norm(ptRight - ptLeft) / 2.0);
// The top half-ellipse:
Contour cntMouthTop;
const int axisYTop = toIntRounded(cv::norm(ptMouthCenter - ptUp));
// We need the upper part of an ellipse:
static constexpr int angTopStart = 180;
static constexpr int angTopEnd = 360;
cv::ellipse2Poly(ptMouthCenter, cv::Size(axisX, axisYTop), angMouth, angTopStart, angTopEnd, config::kAngDelta, cntMouthTop);
// The bottom half-ellipse:
Contour cntMouth;
const int axisYBot = toIntRounded(cv::norm(ptMouthCenter - ptDown));
// We need the lower part of an ellipse:
static constexpr int angBotStart = 0;
static constexpr int angBotEnd = 180;
cv::ellipse2Poly(ptMouthCenter, cv::Size(axisX, axisYBot), angMouth, angBotStart, angBotEnd, config::kAngDelta, cntMouth);
// Pushing the upper part to vctOut
std::copy(cntMouthTop.cbegin(), cntMouthTop.cend(), std::back_inserter(cntMouth));
return cntMouth;
}
//! [unsh]
inline cv::GMat custom::unsharpMask(const cv::GMat &src,
const int sigma,
const float strength)
{
cv::GMat blurred = cv::gapi::medianBlur(src, sigma);
cv::GMat laplacian = custom::GLaplacian::on(blurred, CV_8U);
return (src - (laplacian * strength));
}
//! [unsh]
inline cv::GMat custom::mask3C(const cv::GMat &src,
const cv::GMat &mask)
{
std::tuple<cv::GMat,cv::GMat,cv::GMat> tplIn = cv::gapi::split3(src);
cv::GMat masked0 = cv::gapi::mask(std::get<0>(tplIn), mask);
cv::GMat masked1 = cv::gapi::mask(std::get<1>(tplIn), mask);
cv::GMat masked2 = cv::gapi::mask(std::get<2>(tplIn), mask);
return cv::gapi::merge3(masked0, masked1, masked2);
}
int main(int argc, char** argv)
{
cv::namedWindow(config::kWinFaceBeautification, cv::WINDOW_NORMAL);
cv::namedWindow(config::kWinInput, cv::WINDOW_NORMAL);
cv::CommandLineParser parser(argc, argv, config::kParserOptions);
parser.about(config::kParserAbout);
if (argc == 1 || parser.has("help"))
{
parser.printMessage();
return 0;
}
// Parsing input arguments
const std::string faceXmlPath = parser.get<std::string>("facepath");
const std::string faceBinPath = getWeightsPath(faceXmlPath);
const std::string faceDevice = parser.get<std::string>("facedevice");
const std::string landmXmlPath = parser.get<std::string>("landmpath");
const std::string landmBinPath = getWeightsPath(landmXmlPath);
const std::string landmDevice = parser.get<std::string>("landmdevice");
// Declaring a graph
// The version of a pipeline expression with a lambda-based
// constructor is used to keep all temporary objects in a dedicated scope.
//! [ppl]
cv::GComputation pipeline([=]()
{
//! [net_usg_fd]
cv::GMat gimgIn; // input
cv::GMat faceOut = cv::gapi::infer<custom::FaceDetector>(gimgIn);
//! [net_usg_fd]
GArrayROI garRects = custom::GFacePostProc::on(faceOut, gimgIn, config::kConfThresh); // post-proc
//! [net_usg_ld]
cv::GArray<cv::GMat> landmOut = cv::gapi::infer<custom::LandmDetector>(garRects, gimgIn);
//! [net_usg_ld]
cv::GArray<Landmarks> garElems; // |
cv::GArray<Contour> garJaws; // |output arrays
std::tie(garElems, garJaws) = custom::GLandmPostProc::on(landmOut, garRects); // post-proc
cv::GArray<Contour> garElsConts; // face elements
cv::GArray<Contour> garFaceConts; // whole faces
std::tie(garElsConts, garFaceConts) = custom::GGetContours::on(garElems, garJaws); // interpolation
//! [msk_ppline]
cv::GMat mskSharp = custom::GFillPolyGContours::on(gimgIn, garElsConts); // |
cv::GMat mskSharpG = cv::gapi::gaussianBlur(mskSharp, config::kGKernelSize, // |
config::kGSigma); // |
cv::GMat mskBlur = custom::GFillPolyGContours::on(gimgIn, garFaceConts); // |
cv::GMat mskBlurG = cv::gapi::gaussianBlur(mskBlur, config::kGKernelSize, // |
config::kGSigma); // |draw masks
// The first argument in mask() is Blur as we want to subtract from // |
// BlurG the next step: // |
cv::GMat mskBlurFinal = mskBlurG - cv::gapi::mask(mskBlurG, mskSharpG); // |
cv::GMat mskFacesGaussed = mskBlurFinal + mskSharpG; // |
cv::GMat mskFacesWhite = cv::gapi::threshold(mskFacesGaussed, 0, 255, cv::THRESH_BINARY); // |
cv::GMat mskNoFaces = cv::gapi::bitwise_not(mskFacesWhite); // |
//! [msk_ppline]
cv::GMat gimgBilat = custom::GBilatFilter::on(gimgIn, config::kBSize,
config::kBSigmaCol, config::kBSigmaSp);
cv::GMat gimgSharp = custom::unsharpMask(gimgIn, config::kUnshSigma,
config::kUnshStrength);
// Applying the masks
// Custom function mask3C() should be used instead of just gapi::mask()
// as mask() provides CV_8UC1 source only (and we have CV_8U3C)
cv::GMat gimgBilatMasked = custom::mask3C(gimgBilat, mskBlurFinal);
cv::GMat gimgSharpMasked = custom::mask3C(gimgSharp, mskSharpG);
cv::GMat gimgInMasked = custom::mask3C(gimgIn, mskNoFaces);
cv::GMat gimgBeautif = gimgBilatMasked + gimgSharpMasked + gimgInMasked;
return cv::GComputation(cv::GIn(gimgIn), cv::GOut(gimgBeautif,
cv::gapi::copy(gimgIn),
garFaceConts,
garElsConts,
garRects));
});
//! [ppl]
// Declaring IE params for networks
//! [net_param]
auto faceParams = cv::gapi::ie::Params<custom::FaceDetector>
{
/*std::string*/ faceXmlPath,
/*std::string*/ faceBinPath,
/*std::string*/ faceDevice
};
auto landmParams = cv::gapi::ie::Params<custom::LandmDetector>
{
/*std::string*/ landmXmlPath,
/*std::string*/ landmBinPath,
/*std::string*/ landmDevice
};
//! [net_param]
//! [netw]
auto networks = cv::gapi::networks(faceParams, landmParams);
//! [netw]
// Declaring custom and fluid kernels have been used:
//! [kern_pass_1]
auto customKernels = cv::gapi::kernels<custom::GCPUBilateralFilter,
custom::GCPULaplacian,
custom::GCPUFillPolyGContours,
custom::GCPUPolyLines,
custom::GCPURectangle,
custom::GCPUFacePostProc,
custom::GCPULandmPostProc,
custom::GCPUGetContours>();
auto kernels = cv::gapi::combine(cv::gapi::core::fluid::kernels(),
customKernels);
//! [kern_pass_1]
Avg avg;
size_t frames = 0;
// The flags for drawing/not drawing face boxes or/and landmarks in the
// \"Input\" window:
const bool flgBoxes = parser.get<bool>("boxes");
const bool flgLandmarks = parser.get<bool>("landmarks");
// The flag to involve stream pipelining:
const bool flgStreaming = parser.get<bool>("streaming");
// The flag to display the output images or not:
const bool flgPerformance = parser.get<bool>("performance");
// Now we are ready to compile the pipeline to a stream with specified
// kernels, networks and image format expected to process
if (flgStreaming == true)
{
//! [str_comp]
cv::GStreamingCompiled stream = pipeline.compileStreaming(cv::compile_args(kernels, networks));
//! [str_comp]
// Setting the source for the stream:
//! [str_src]
if (parser.has("input"))
{
stream.setSource(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(parser.get<cv::String>("input")));
}
//! [str_src]
else
{
stream.setSource(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(0));
}
// Declaring output variables
// Streaming:
cv::Mat imgShow;
cv::Mat imgBeautif;
std::vector<Contour> vctFaceConts, vctElsConts;
VectorROI vctRects;
if (flgPerformance == true)
{
auto out_vector = cv::gout(imgBeautif, imgShow, vctFaceConts,
vctElsConts, vctRects);
stream.start();
avg.start();
while (stream.running())
{
stream.pull(std::move(out_vector));
frames++;
}
}
else // flgPerformance == false
{
//! [str_loop]
auto out_vector = cv::gout(imgBeautif, imgShow, vctFaceConts,
vctElsConts, vctRects);
stream.start();
avg.start();
while (stream.running())
{
if (!stream.try_pull(std::move(out_vector)))
{
// Use a try_pull() to obtain data.
// If there's no data, let UI refresh (and handle keypress)
if (cv::waitKey(1) >= 0) break;
else continue;
}
frames++;
// Drawing face boxes and landmarks if necessary:
if (flgLandmarks == true)
{
cv::polylines(imgShow, vctFaceConts, config::kClosedLine,
config::kClrYellow);
cv::polylines(imgShow, vctElsConts, config::kClosedLine,
config::kClrYellow);
}
if (flgBoxes == true)
for (auto rect : vctRects)
cv::rectangle(imgShow, rect, config::kClrGreen);
cv::imshow(config::kWinInput, imgShow);
cv::imshow(config::kWinFaceBeautification, imgBeautif);
}
//! [str_loop]
}
std::cout << "Processed " << frames << " frames in " << avg.elapsed()
<< " (" << avg.fps(frames) << " FPS)" << std::endl;
}
else // serial mode:
{
//! [bef_cap]
#include <opencv2/videoio.hpp>
cv::GCompiled cc;
cv::VideoCapture cap;
if (parser.has("input"))
{
cap.open(parser.get<cv::String>("input"));
}
//! [bef_cap]
else if (!cap.open(0))
{
std::cout << "No input available" << std::endl;
return 1;
}
if (flgPerformance == true)
{
while (true)
{
cv::Mat img;
cv::Mat imgShow;
cv::Mat imgBeautif;
std::vector<Contour> vctFaceConts, vctElsConts;
VectorROI vctRects;
cap >> img;
if (img.empty())
{
break;
}
frames++;
if (!cc)
{
cc = pipeline.compile(cv::descr_of(img), cv::compile_args(kernels, networks));
avg.start();
}
cc(cv::gin(img), cv::gout(imgBeautif, imgShow, vctFaceConts,
vctElsConts, vctRects));
}
}
else // flgPerformance == false
{
//! [bef_loop]
while (cv::waitKey(1) < 0)
{
cv::Mat img;
cv::Mat imgShow;
cv::Mat imgBeautif;
std::vector<Contour> vctFaceConts, vctElsConts;
VectorROI vctRects;
cap >> img;
if (img.empty())
{
cv::waitKey();
break;
}
frames++;
//! [apply]
pipeline.apply(cv::gin(img), cv::gout(imgBeautif, imgShow,
vctFaceConts,
vctElsConts, vctRects),
cv::compile_args(kernels, networks));
//! [apply]
if (frames == 1)
{
// Start timer only after 1st frame processed -- compilation
// happens on-the-fly here
avg.start();
}
// Drawing face boxes and landmarks if necessary:
if (flgLandmarks == true)
{
cv::polylines(imgShow, vctFaceConts, config::kClosedLine,
config::kClrYellow);
cv::polylines(imgShow, vctElsConts, config::kClosedLine,
config::kClrYellow);
}
if (flgBoxes == true)
for (auto rect : vctRects)
cv::rectangle(imgShow, rect, config::kClrGreen);
cv::imshow(config::kWinInput, imgShow);
cv::imshow(config::kWinFaceBeautification, imgBeautif);
}
}
//! [bef_loop]
std::cout << "Processed " << frames << " frames in " << avg.elapsed()
<< " (" << avg.fps(frames) << " FPS)" << std::endl;
}
return 0;
}
#else
#include <iostream>
int main()
{
std::cerr << "This tutorial code requires G-API module "
"with Inference Engine backend to run"
<< std::endl;
return 1;
}
#endif // HAVE_OPECV_GAPI

View File

@ -0,0 +1,107 @@
/**
* @brief You will learn how port an existing algorithm to G-API
* @author Dmitry Matveev, dmitry.matveev@intel.com, based
* on sample by Karpushin Vladislav, karpushin@ngs.ru
*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GAPI
//! [full_sample]
#include <iostream>
#include <utility>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/gapi.hpp"
#include "opencv2/gapi/core.hpp"
#include "opencv2/gapi/imgproc.hpp"
//! [calcGST_proto]
void calcGST(const cv::GMat& inputImg, cv::GMat& imgCoherencyOut, cv::GMat& imgOrientationOut, int w);
//! [calcGST_proto]
int main()
{
int W = 52; // window size is WxW
double C_Thr = 0.43; // threshold for coherency
int LowThr = 35; // threshold1 for orientation, it ranges from 0 to 180
int HighThr = 57; // threshold2 for orientation, it ranges from 0 to 180
cv::Mat imgIn = cv::imread("input.jpg", cv::IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
std::cout << "ERROR : Image cannot be loaded..!!" << std::endl;
return -1;
}
//! [main]
// Calculate Gradient Structure Tensor and post-process it for output with G-API
cv::GMat in;
cv::GMat imgCoherency, imgOrientation;
calcGST(in, imgCoherency, imgOrientation, W);
cv::GMat imgCoherencyBin = imgCoherency > C_Thr;
cv::GMat imgOrientationBin = cv::gapi::inRange(imgOrientation, LowThr, HighThr);
cv::GMat imgBin = imgCoherencyBin & imgOrientationBin;
cv::GMat out = cv::gapi::addWeighted(in, 0.5, imgBin, 0.5, 0.0);
// Normalize extra outputs
cv::GMat imgCoherencyNorm = cv::gapi::normalize(imgCoherency, 0, 255, cv::NORM_MINMAX);
cv::GMat imgOrientationNorm = cv::gapi::normalize(imgOrientation, 0, 255, cv::NORM_MINMAX);
// Capture the graph into object segm
cv::GComputation segm(cv::GIn(in), cv::GOut(out, imgCoherencyNorm, imgOrientationNorm));
// Define cv::Mats for output data
cv::Mat imgOut, imgOutCoherency, imgOutOrientation;
// Run the graph
segm.apply(cv::gin(imgIn), cv::gout(imgOut, imgOutCoherency, imgOutOrientation));
cv::imwrite("result.jpg", imgOut);
cv::imwrite("Coherency.jpg", imgOutCoherency);
cv::imwrite("Orientation.jpg", imgOutOrientation);
//! [main]
return 0;
}
//! [calcGST]
//! [calcGST_header]
void calcGST(const cv::GMat& inputImg, cv::GMat& imgCoherencyOut, cv::GMat& imgOrientationOut, int w)
{
auto img = cv::gapi::convertTo(inputImg, CV_32F);
auto imgDiffX = cv::gapi::Sobel(img, CV_32F, 1, 0, 3);
auto imgDiffY = cv::gapi::Sobel(img, CV_32F, 0, 1, 3);
auto imgDiffXY = cv::gapi::mul(imgDiffX, imgDiffY);
//! [calcGST_header]
auto imgDiffXX = cv::gapi::mul(imgDiffX, imgDiffX);
auto imgDiffYY = cv::gapi::mul(imgDiffY, imgDiffY);
auto J11 = cv::gapi::boxFilter(imgDiffXX, CV_32F, cv::Size(w, w));
auto J22 = cv::gapi::boxFilter(imgDiffYY, CV_32F, cv::Size(w, w));
auto J12 = cv::gapi::boxFilter(imgDiffXY, CV_32F, cv::Size(w, w));
auto tmp1 = J11 + J22;
auto tmp2 = J11 - J22;
auto tmp22 = cv::gapi::mul(tmp2, tmp2);
auto tmp3 = cv::gapi::mul(J12, J12);
auto tmp4 = cv::gapi::sqrt(tmp22 + 4.0*tmp3);
auto lambda1 = tmp1 + tmp4;
auto lambda2 = tmp1 - tmp4;
imgCoherencyOut = (lambda1 - lambda2) / (lambda1 + lambda2);
imgOrientationOut = 0.5*cv::gapi::phase(J22 - J11, 2.0*J12, true);
}
//! [calcGST]
//! [full_sample]
#else
#include <iostream>
int main()
{
std::cerr << "This tutorial code requires G-API module to run" << std::endl;
}
#endif // HAVE_OPECV_GAPI

View File

@ -0,0 +1,127 @@
/**
* @brief You will learn how port an existing algorithm to G-API
* @author Dmitry Matveev, dmitry.matveev@intel.com, based
* on sample by Karpushin Vladislav, karpushin@ngs.ru
*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GAPI
//! [full_sample]
#include <iostream>
#include <utility>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/gapi.hpp"
#include "opencv2/gapi/core.hpp"
#include "opencv2/gapi/imgproc.hpp"
//! [fluid_includes]
#include "opencv2/gapi/fluid/core.hpp" // Fluid Core kernel library
#include "opencv2/gapi/fluid/imgproc.hpp" // Fluid ImgProc kernel library
//! [fluid_includes]
#include "opencv2/gapi/fluid/gfluidkernel.hpp" // Fluid user kernel API
//! [calcGST_proto]
void calcGST(const cv::GMat& inputImg, cv::GMat& imgCoherencyOut, cv::GMat& imgOrientationOut, int w);
//! [calcGST_proto]
int main()
{
int W = 52; // window size is WxW
double C_Thr = 0.43; // threshold for coherency
int LowThr = 35; // threshold1 for orientation, it ranges from 0 to 180
int HighThr = 57; // threshold2 for orientation, it ranges from 0 to 180
cv::Mat imgIn = cv::imread("input.jpg", cv::IMREAD_GRAYSCALE);
if (imgIn.empty()) //check whether the image is loaded or not
{
std::cout << "ERROR : Image cannot be loaded..!!" << std::endl;
return -1;
}
//! [main]
// Calculate Gradient Structure Tensor and post-process it for output with G-API
cv::GMat in;
cv::GMat imgCoherency, imgOrientation;
calcGST(in, imgCoherency, imgOrientation, W);
auto imgCoherencyBin = imgCoherency > C_Thr;
auto imgOrientationBin = cv::gapi::inRange(imgOrientation, LowThr, HighThr);
auto imgBin = imgCoherencyBin & imgOrientationBin;
cv::GMat out = cv::gapi::addWeighted(in, 0.5, imgBin, 0.5, 0.0);
// Normalize extra outputs
cv::GMat imgCoherencyNorm = cv::gapi::normalize(imgCoherency, 0, 255, cv::NORM_MINMAX);
cv::GMat imgOrientationNorm = cv::gapi::normalize(imgOrientation, 0, 255, cv::NORM_MINMAX);
// Capture the graph into object segm
cv::GComputation segm(cv::GIn(in), cv::GOut(out, imgCoherencyNorm, imgOrientationNorm));
// Define cv::Mats for output data
cv::Mat imgOut, imgOutCoherency, imgOutOrientation;
//! [kernel_pkg_proper]
//! [kernel_pkg]
// Prepare the kernel package and run the graph
cv::gapi::GKernelPackage fluid_kernels = cv::gapi::combine // Define a custom kernel package:
(cv::gapi::core::fluid::kernels(), // ...with Fluid Core kernels
cv::gapi::imgproc::fluid::kernels()); // ...and Fluid ImgProc kernels
//! [kernel_pkg]
//! [kernel_hotfix]
fluid_kernels.remove<cv::gapi::imgproc::GBoxFilter>(); // Remove Fluid Box filter as unsuitable,
// G-API will fall-back to OpenCV there.
//! [kernel_hotfix]
//! [kernel_pkg_use]
segm.apply(cv::gin(imgIn), // Input data vector
cv::gout(imgOut, imgOutCoherency, imgOutOrientation), // Output data vector
cv::compile_args(fluid_kernels)); // Kernel package to use
//! [kernel_pkg_use]
//! [kernel_pkg_proper]
cv::imwrite("result.jpg", imgOut);
cv::imwrite("Coherency.jpg", imgOutCoherency);
cv::imwrite("Orientation.jpg", imgOutOrientation);
//! [main]
return 0;
}
//! [calcGST]
//! [calcGST_header]
void calcGST(const cv::GMat& inputImg, cv::GMat& imgCoherencyOut, cv::GMat& imgOrientationOut, int w)
{
auto img = cv::gapi::convertTo(inputImg, CV_32F);
auto imgDiffX = cv::gapi::Sobel(img, CV_32F, 1, 0, 3);
auto imgDiffY = cv::gapi::Sobel(img, CV_32F, 0, 1, 3);
auto imgDiffXY = cv::gapi::mul(imgDiffX, imgDiffY);
//! [calcGST_header]
auto imgDiffXX = cv::gapi::mul(imgDiffX, imgDiffX);
auto imgDiffYY = cv::gapi::mul(imgDiffY, imgDiffY);
auto J11 = cv::gapi::boxFilter(imgDiffXX, CV_32F, cv::Size(w, w));
auto J22 = cv::gapi::boxFilter(imgDiffYY, CV_32F, cv::Size(w, w));
auto J12 = cv::gapi::boxFilter(imgDiffXY, CV_32F, cv::Size(w, w));
auto tmp1 = J11 + J22;
auto tmp2 = J11 - J22;
auto tmp22 = cv::gapi::mul(tmp2, tmp2);
auto tmp3 = cv::gapi::mul(J12, J12);
auto tmp4 = cv::gapi::sqrt(tmp22 + 4.0*tmp3);
auto lambda1 = tmp1 + tmp4;
auto lambda2 = tmp1 - tmp4;
imgCoherencyOut = (lambda1 - lambda2) / (lambda1 + lambda2);
imgOrientationOut = 0.5*cv::gapi::phase(J22 - J11, 2.0*J12, true);
}
//! [calcGST]
//! [full_sample]
#else
#include <iostream>
int main()
{
std::cerr << "This tutorial code requires G-API module to run" << std::endl;
}
#endif // HAVE_OPECV_GAPI

View File

@ -0,0 +1,351 @@
#include "opencv2/opencv_modules.hpp"
#include <iostream>
#if defined(HAVE_OPENCV_GAPI)
#include <chrono>
#include <iomanip>
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/gapi.hpp"
#include "opencv2/gapi/core.hpp"
#include "opencv2/gapi/imgproc.hpp"
#include "opencv2/gapi/infer.hpp"
#include "opencv2/gapi/infer/ie.hpp"
#include "opencv2/gapi/cpu/gcpukernel.hpp"
#include "opencv2/gapi/streaming/cap.hpp"
#include "opencv2/highgui.hpp"
const std::string about =
"This is an OpenCV-based version of Security Barrier Camera example";
const std::string keys =
"{ h help | | print this help message }"
"{ input | | Path to an input video file }"
"{ detm | | IE vehicle/license plate detection model IR }"
"{ detw | | IE vehicle/license plate detection model weights }"
"{ detd | | IE vehicle/license plate detection model device }"
"{ vehm | | IE vehicle attributes model IR }"
"{ vehw | | IE vehicle attributes model weights }"
"{ vehd | | IE vehicle attributes model device }"
"{ lprm | | IE license plate recognition model IR }"
"{ lprw | | IE license plate recognition model weights }"
"{ lprd | | IE license plate recognition model device }"
"{ pure | | When set, no output is displayed. Useful for benchmarking }"
"{ ser | | When set, runs a regular (serial) pipeline }";
namespace {
struct Avg {
struct Elapsed {
explicit Elapsed(double ms) : ss(ms/1000.), mm(static_cast<int>(ss)/60) {}
const double ss;
const int mm;
};
using MS = std::chrono::duration<double, std::ratio<1, 1000>>;
using TS = std::chrono::time_point<std::chrono::high_resolution_clock>;
TS started;
void start() { started = now(); }
TS now() const { return std::chrono::high_resolution_clock::now(); }
double tick() const { return std::chrono::duration_cast<MS>(now() - started).count(); }
Elapsed elapsed() const { return Elapsed{tick()}; }
double fps(std::size_t n) const { return static_cast<double>(n) / (tick() / 1000.); }
};
std::ostream& operator<<(std::ostream &os, const Avg::Elapsed &e) {
os << e.mm << ':' << (e.ss - 60*e.mm);
return os;
}
} // namespace
namespace custom {
G_API_NET(VehicleLicenseDetector, <cv::GMat(cv::GMat)>, "vehicle-license-plate-detector");
using Attrs = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(VehicleAttributes, <Attrs(cv::GMat)>, "vehicle-attributes");
G_API_NET(LPR, <cv::GMat(cv::GMat)>, "license-plate-recognition");
using GVehiclesPlates = std::tuple< cv::GArray<cv::Rect>
, cv::GArray<cv::Rect> >;
G_API_OP_M(ProcessDetections,
<GVehiclesPlates(cv::GMat, cv::GMat)>,
"custom.security_barrier.detector.postproc") {
static std::tuple<cv::GArrayDesc,cv::GArrayDesc>
outMeta(const cv::GMatDesc &, const cv::GMatDesc) {
// FIXME: Need to get rid of this - literally there's nothing useful
return std::make_tuple(cv::empty_array_desc(), cv::empty_array_desc());
}
};
GAPI_OCV_KERNEL(OCVProcessDetections, ProcessDetections) {
static void run(const cv::Mat &in_ssd_result,
const cv::Mat &in_frame,
std::vector<cv::Rect> &out_vehicles,
std::vector<cv::Rect> &out_plates) {
const int MAX_PROPOSALS = 200;
const int OBJECT_SIZE = 7;
const cv::Size upscale = in_frame.size();
const cv::Rect surface({0,0}, upscale);
out_vehicles.clear();
out_plates.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0]; // batch id
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
if (image_id < 0.f) { // indicates end of detections
break;
}
if (confidence < 0.5f) { // fixme: hard-coded snapshot
continue;
}
cv::Rect rc;
rc.x = static_cast<int>(rc_left * upscale.width);
rc.y = static_cast<int>(rc_top * upscale.height);
rc.width = static_cast<int>(rc_right * upscale.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
using PT = cv::Point;
using SZ = cv::Size;
switch (static_cast<int>(label)) {
case 1: out_vehicles.push_back(rc & surface); break;
case 2: out_plates.emplace_back((rc-PT(15,15)+SZ(30,30)) & surface); break;
default: CV_Assert(false && "Unknown object class");
}
}
}
};
} // namespace custom
namespace labels {
const std::string colors[] = {
"white", "gray", "yellow", "red", "green", "blue", "black"
};
const std::string types[] = {
"car", "van", "truck", "bus"
};
const std::vector<std::string> license_text = {
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"<Anhui>", "<Beijing>", "<Chongqing>", "<Fujian>",
"<Gansu>", "<Guangdong>", "<Guangxi>", "<Guizhou>",
"<Hainan>", "<Hebei>", "<Heilongjiang>", "<Henan>",
"<HongKong>", "<Hubei>", "<Hunan>", "<InnerMongolia>",
"<Jiangsu>", "<Jiangxi>", "<Jilin>", "<Liaoning>",
"<Macau>", "<Ningxia>", "<Qinghai>", "<Shaanxi>",
"<Shandong>", "<Shanghai>", "<Shanxi>", "<Sichuan>",
"<Tianjin>", "<Tibet>", "<Xinjiang>", "<Yunnan>",
"<Zhejiang>", "<police>",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z"
};
namespace {
void DrawResults(cv::Mat &frame,
const std::vector<cv::Rect> &vehicles,
const std::vector<cv::Mat> &out_colors,
const std::vector<cv::Mat> &out_types,
const std::vector<cv::Rect> &plates,
const std::vector<cv::Mat> &out_numbers) {
CV_Assert(vehicles.size() == out_colors.size());
CV_Assert(vehicles.size() == out_types.size());
CV_Assert(plates.size() == out_numbers.size());
for (auto it = vehicles.begin(); it != vehicles.end(); ++it) {
const auto idx = std::distance(vehicles.begin(), it);
const auto &rc = *it;
const float *colors_data = out_colors[idx].ptr<float>();
const float *types_data = out_types [idx].ptr<float>();
const auto color_id = std::max_element(colors_data, colors_data + 7) - colors_data;
const auto type_id = std::max_element(types_data, types_data + 4) - types_data;
const int ATTRIB_OFFSET = 25;
cv::rectangle(frame, rc, {0, 255, 0}, 4);
cv::putText(frame, labels::colors[color_id],
cv::Point(rc.x + 5, rc.y + ATTRIB_OFFSET),
cv::FONT_HERSHEY_COMPLEX_SMALL,
1,
cv::Scalar(255, 0, 0));
cv::putText(frame, labels::types[type_id],
cv::Point(rc.x + 5, rc.y + ATTRIB_OFFSET * 2),
cv::FONT_HERSHEY_COMPLEX_SMALL,
1,
cv::Scalar(255, 0, 0));
}
for (auto it = plates.begin(); it != plates.end(); ++it) {
const int MAX_LICENSE = 88;
const int LPR_OFFSET = 50;
const auto &rc = *it;
const auto idx = std::distance(plates.begin(), it);
std::string result;
const auto *lpr_data = out_numbers[idx].ptr<float>();
for (int i = 0; i < MAX_LICENSE; i++) {
if (lpr_data[i] == -1) break;
result += labels::license_text[static_cast<size_t>(lpr_data[i])];
}
const int y_pos = std::max(0, rc.y + rc.height - LPR_OFFSET);
cv::rectangle(frame, rc, {0, 0, 255}, 4);
cv::putText(frame, result,
cv::Point(rc.x, y_pos),
cv::FONT_HERSHEY_COMPLEX_SMALL,
1,
cv::Scalar(0, 0, 255));
}
}
void DrawFPS(cv::Mat &frame, std::size_t n, double fps) {
std::ostringstream out;
out << "FRAME " << n << ": "
<< std::fixed << std::setprecision(2) << fps
<< " FPS (AVG)";
cv::putText(frame, out.str(),
cv::Point(0, frame.rows),
cv::FONT_HERSHEY_SIMPLEX,
1,
cv::Scalar(0, 0, 0),
2);
}
} // anonymous namespace
} // namespace labels
int main(int argc, char *argv[])
{
cv::CommandLineParser cmd(argc, argv, keys);
cmd.about(about);
if (cmd.has("help")) {
cmd.printMessage();
return 0;
}
const std::string input = cmd.get<std::string>("input");
const bool no_show = cmd.get<bool>("pure");
cv::GComputation pp([]() {
cv::GMat in;
cv::GMat detections = cv::gapi::infer<custom::VehicleLicenseDetector>(in);
cv::GArray<cv::Rect> vehicles;
cv::GArray<cv::Rect> plates;
std::tie(vehicles, plates) = custom::ProcessDetections::on(detections, in);
cv::GArray<cv::GMat> colors;
cv::GArray<cv::GMat> types;
std::tie(colors, types) = cv::gapi::infer<custom::VehicleAttributes>(vehicles, in);
cv::GArray<cv::GMat> numbers = cv::gapi::infer<custom::LPR>(plates, in);
cv::GMat frame = cv::gapi::copy(in); // pass-through the input frame
return cv::GComputation(cv::GIn(in),
cv::GOut(frame, vehicles, colors, types, plates, numbers));
});
// Note: it might be very useful to have dimensions loaded at this point!
auto det_net = cv::gapi::ie::Params<custom::VehicleLicenseDetector> {
cmd.get<std::string>("detm"), // path to topology IR
cmd.get<std::string>("detw"), // path to weights
cmd.get<std::string>("detd"), // device specifier
};
auto attr_net = cv::gapi::ie::Params<custom::VehicleAttributes> {
cmd.get<std::string>("vehm"), // path to topology IR
cmd.get<std::string>("vehw"), // path to weights
cmd.get<std::string>("vehd"), // device specifier
}.cfgOutputLayers({ "color", "type" });
// Fill a special LPR input (seq_ind) with a predefined value
// First element is 0.f, the rest 87 are 1.f
const std::vector<int> lpr_seq_dims = {88,1};
cv::Mat lpr_seq(lpr_seq_dims, CV_32F, cv::Scalar(1.f));
lpr_seq.ptr<float>()[0] = 0.f;
auto lpr_net = cv::gapi::ie::Params<custom::LPR> {
cmd.get<std::string>("lprm"), // path to topology IR
cmd.get<std::string>("lprw"), // path to weights
cmd.get<std::string>("lprd"), // device specifier
}.constInput("seq_ind", lpr_seq);
auto kernels = cv::gapi::kernels<custom::OCVProcessDetections>();
auto networks = cv::gapi::networks(det_net, attr_net, lpr_net);
Avg avg;
cv::Mat frame;
std::vector<cv::Rect> vehicles, plates;
std::vector<cv::Mat> out_colors;
std::vector<cv::Mat> out_types;
std::vector<cv::Mat> out_numbers;
std::size_t frames = 0u;
std::cout << "Reading " << input << std::endl;
if (cmd.get<bool>("ser")) {
std::cout << "Going serial..." << std::endl;
cv::VideoCapture cap(input);
auto cc = pp.compile(cv::GMatDesc{CV_8U,3,cv::Size(1920,1080)},
cv::compile_args(kernels, networks));
avg.start();
while (cv::waitKey(1) < 0) {
cap >> frame;
if (frame.empty()) break;
cc(cv::gin(frame),
cv::gout(frame, vehicles, out_colors, out_types, plates, out_numbers));
frames++;
labels::DrawResults(frame, vehicles, out_colors, out_types, plates, out_numbers);
labels::DrawFPS(frame, frames, avg.fps(frames));
if (!no_show) cv::imshow("Out", frame);
}
} else {
std::cout << "Going pipelined..." << std::endl;
auto cc = pp.compileStreaming(cv::GMatDesc{CV_8U,3,cv::Size(1920,1080)},
cv::compile_args(kernels, networks));
cc.setSource(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
avg.start();
cc.start();
// Implement different execution policies depending on the display option
// for the best performance.
while (cc.running()) {
auto out_vector = cv::gout(frame, vehicles, out_colors, out_types, plates, out_numbers);
if (no_show) {
// This is purely a video processing. No need to balance with UI rendering.
// Use a blocking pull() to obtain data. Break the loop if the stream is over.
if (!cc.pull(std::move(out_vector)))
break;
} else if (!cc.try_pull(std::move(out_vector))) {
// Use a non-blocking try_pull() to obtain data.
// If there's no data, let UI refresh (and handle keypress)
if (cv::waitKey(1) >= 0) break;
else continue;
}
// At this point we have data for sure (obtained in either blocking or non-blocking way).
frames++;
labels::DrawResults(frame, vehicles, out_colors, out_types, plates, out_numbers);
labels::DrawFPS(frame, frames, avg.fps(frames));
if (!no_show) cv::imshow("Out", frame);
}
cc.stop();
}
std::cout << "Processed " << frames << " frames in " << avg.elapsed() << std::endl;
return 0;
}
#else
int main()
{
std::cerr << "This tutorial code requires G-API module "
"with Inference Engine backend to run"
<< std::endl;
return 1;
}
#endif // HAVE_OPECV_GAPI