feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake

1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试
2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程
3.重整权利声明文件,重整代码工程,确保最小化侵权风险

Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake
Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
wangzhengyang
2022-05-10 09:54:44 +08:00
parent ecdd171c6f
commit 718c41634f
10018 changed files with 3593797 additions and 186748 deletions

View File

@ -0,0 +1,43 @@
if(NCNN_PIXEL)
if(NOT NCNN_SIMPLEOCV)
find_package(OpenCV QUIET COMPONENTS opencv_world)
# for opencv 2.4 on ubuntu 16.04, there is no opencv_world but OpenCV_FOUND will be TRUE
if("${OpenCV_LIBS}" STREQUAL "")
set(OpenCV_FOUND FALSE)
endif()
if(NOT OpenCV_FOUND)
find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs)
endif()
if(NOT OpenCV_FOUND)
find_package(OpenCV QUIET COMPONENTS core highgui imgproc)
endif()
endif()
set(OpenCV_FOUND FALSE)
if(OpenCV_FOUND)
add_executable(ncnn2table ncnn2table.cpp)
target_include_directories(ncnn2table PRIVATE ${OpenCV_INCLUDE_DIRS})
target_link_libraries(ncnn2table PRIVATE ncnn ${OpenCV_LIBS})
elseif(NCNN_SIMPLEOCV)
add_executable(ncnn2table ncnn2table.cpp)
target_compile_definitions(ncnn2table PUBLIC USE_NCNN_SIMPLEOCV)
target_link_libraries(ncnn2table PRIVATE ncnn)
else()
add_executable(ncnn2table ncnn2table.cpp imreadwrite.cpp)
target_compile_definitions(ncnn2table PUBLIC USE_LOCAL_IMREADWRITE)
target_link_libraries(ncnn2table PRIVATE ncnn)
endif()
# add ncnn2table tool to a virtual project group
set_property(TARGET ncnn2table PROPERTY FOLDER "tools/optimization")
endif()
add_executable(ncnn2int8 ncnn2int8.cpp)
target_link_libraries(ncnn2int8 PRIVATE ncnn)
# add ncnn2int8 tool to a virtual project group
set_property(TARGET ncnn2int8 PROPERTY FOLDER "tools/optimization")
ncnn_install_tool(ncnn2table)
ncnn_install_tool(ncnn2int8)

View File

@ -0,0 +1 @@
see [quantized-int8-inference](../../docs/how-to-use-and-FAQ/quantized-int8-inference.md)

View File

@ -0,0 +1,212 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "imreadwrite.h"
#include <stdio.h>
#define STB_IMAGE_IMPLEMENTATION
#define STBI_NO_THREAD_LOCALS
#define STBI_ONLY_JPEG
#define STBI_ONLY_PNG
#define STBI_ONLY_BMP
#define STBI_ONLY_PNM
#include "../../src/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../../src/stb_image_write.h"
namespace cv {
Mat imread(const std::string& path, int flags)
{
int desired_channels = 0;
if (flags == IMREAD_UNCHANGED)
{
desired_channels = 0;
}
else if (flags == IMREAD_GRAYSCALE)
{
desired_channels = 1;
}
else if (flags == IMREAD_COLOR)
{
desired_channels = 3;
}
else
{
// unknown flags
return Mat();
}
int w;
int h;
int c;
unsigned char* pixeldata = stbi_load(path.c_str(), &w, &h, &c, desired_channels);
if (!pixeldata)
{
// load failed
return Mat();
}
if (desired_channels)
{
c = desired_channels;
}
// copy pixeldata to Mat
Mat img;
if (c == 1)
{
img.create(h, w, CV_8UC1);
}
else if (c == 3)
{
img.create(h, w, CV_8UC3);
}
else if (c == 4)
{
img.create(h, w, CV_8UC4);
}
else
{
// unexpected channels
stbi_image_free(pixeldata);
return Mat();
}
memcpy(img.data, pixeldata, w * h * c);
stbi_image_free(pixeldata);
// // resolve exif orientation
// {
// std::ifstream ifs;
// ifs.open(filename.c_str(), std::ifstream::in);
//
// if (ifs.good())
// {
// ExifReader exif_reader(ifs);
// if (exif_reader.parse())
// {
// ExifEntry_t e = exif_reader.getTag(ORIENTATION);
// int orientation = e.field_u16;
// if (orientation >= 1 && orientation <= 8)
// rotate_by_orientation(img, img, orientation);
// }
// }
//
// ifs.close();
// }
// rgb to bgr
if (c == 3)
{
uchar* p = img.data;
for (int i = 0; i < w * h; i++)
{
std::swap(p[0], p[2]);
p += 3;
}
}
if (c == 4)
{
uchar* p = img.data;
for (int i = 0; i < w * h; i++)
{
std::swap(p[0], p[2]);
p += 4;
}
}
return img;
}
bool imwrite(const std::string& path, const Mat& m, const std::vector<int>& params)
{
const char* _ext = strrchr(path.c_str(), '.');
if (!_ext)
{
// missing extension
return false;
}
std::string ext = _ext;
Mat img = m.clone();
// bgr to rgb
int c = 0;
if (img.type() == CV_8UC1)
{
c = 1;
}
else if (img.type() == CV_8UC3)
{
c = 3;
uchar* p = img.data;
for (int i = 0; i < img.cols * img.rows; i++)
{
std::swap(p[0], p[2]);
p += 3;
}
}
else if (img.type() == CV_8UC4)
{
c = 4;
uchar* p = img.data;
for (int i = 0; i < img.cols * img.rows; i++)
{
std::swap(p[0], p[2]);
p += 4;
}
}
else
{
// unexpected image channels
return false;
}
bool success = false;
if (ext == ".jpg" || ext == ".jpeg" || ext == ".JPG" || ext == ".JPEG")
{
int quality = 95;
for (size_t i = 0; i < params.size(); i += 2)
{
if (params[i] == IMWRITE_JPEG_QUALITY)
{
quality = params[i + 1];
break;
}
}
success = stbi_write_jpg(path.c_str(), img.cols, img.rows, c, img.data, quality);
}
else if (ext == ".png" || ext == ".PNG")
{
success = stbi_write_png(path.c_str(), img.cols, img.rows, c, img.data, 0);
}
else if (ext == ".bmp" || ext == ".BMP")
{
success = stbi_write_bmp(path.c_str(), img.cols, img.rows, c, img.data);
}
else
{
// unknown extension type
return false;
}
return success;
}
} // namespace cv

View File

@ -0,0 +1,200 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef IMREADWRITE_H
#define IMREADWRITE_H
#include <limits.h>
#include <string.h>
#include "allocator.h"
#include "mat.h"
#ifndef NCNN_XADD
using ncnn::NCNN_XADD;
#endif
typedef unsigned char uchar;
enum
{
CV_LOAD_IMAGE_UNCHANGED = -1,
CV_LOAD_IMAGE_GRAYSCALE = 0,
CV_LOAD_IMAGE_COLOR = 1,
};
enum
{
CV_IMWRITE_JPEG_QUALITY = 1
};
// minimal opencv style data structure implementation
namespace cv {
#define CV_8UC1 1
#define CV_8UC3 3
#define CV_8UC4 4
#define CV_32FC1 4
struct Mat
{
Mat()
: data(0), refcount(0), rows(0), cols(0), c(0)
{
}
Mat(int _rows, int _cols, int flags)
: data(0), refcount(0)
{
create(_rows, _cols, flags);
}
// copy
Mat(const Mat& m)
: data(m.data), refcount(m.refcount)
{
if (refcount)
NCNN_XADD(refcount, 1);
rows = m.rows;
cols = m.cols;
c = m.c;
}
Mat(int _rows, int _cols, int flags, void* _data)
: data((unsigned char*)_data), refcount(0)
{
rows = _rows;
cols = _cols;
c = flags;
}
~Mat()
{
release();
}
// assign
Mat& operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
rows = m.rows;
cols = m.cols;
c = m.c;
return *this;
}
void create(int _rows, int _cols, int flags)
{
release();
rows = _rows;
cols = _cols;
c = flags;
if (total() > 0)
{
// refcount address must be aligned, so we expand totalsize here
size_t totalsize = (total() + 3) >> 2 << 2;
data = (uchar*)ncnn::fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((uchar*)data) + totalsize);
*refcount = 1;
}
}
void release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
ncnn::fastFree(data);
data = 0;
rows = 0;
cols = 0;
c = 0;
refcount = 0;
}
Mat clone() const
{
if (empty())
return Mat();
Mat m(rows, cols, c);
if (total() > 0)
{
memcpy(m.data, data, total());
}
return m;
}
bool empty() const
{
return data == 0 || total() == 0;
}
int type() const
{
return c;
}
size_t total() const
{
return cols * rows * c;
}
uchar* data;
// pointer to the reference counter;
// when points to user-allocated data, the pointer is NULL
int* refcount;
int rows;
int cols;
int c;
};
enum ImreadModes
{
IMREAD_UNCHANGED = -1,
IMREAD_GRAYSCALE = 0,
IMREAD_COLOR = 1
};
Mat imread(const std::string& path, int flags = IMREAD_COLOR);
enum ImwriteFlags
{
IMWRITE_JPEG_QUALITY = 1
};
bool imwrite(const std::string& path, const Mat& m, const std::vector<int>& params = std::vector<int>());
} // namespace cv
#endif // IMREADWRITE_H

View File

@ -0,0 +1,564 @@
// BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifdef _MSC_VER
#define _CRT_SECURE_NO_DEPRECATE
#endif
#include <cstdio>
#include <cstring>
#include <map>
#include <set>
#include <vector>
// ncnn public header
#include "datareader.h"
#include "layer.h"
#include "layer_type.h"
#include "net.h"
// ncnn private header
#include "../modelwriter.h"
class DataReaderFromEmpty : public ncnn::DataReader
{
public:
virtual int scan(const char* format, void* p) const
{
return 0;
}
virtual size_t read(void* buf, size_t size) const
{
memset(buf, 0, size);
return size;
}
};
static bool read_int8scale_table(const char* filepath, std::map<std::string, ncnn::Mat>& blob_int8scale_table, std::map<std::string, ncnn::Mat>& weight_int8scale_table)
{
blob_int8scale_table.clear();
weight_int8scale_table.clear();
FILE* fp = fopen(filepath, "rb");
if (!fp)
{
fprintf(stderr, "Open %s failed.\n", filepath);
return false;
}
std::string key_str;
std::vector<float> scales;
std::vector<char> line(10240000);
char* pch = NULL;
size_t len = 0;
while (!feof(fp))
{
char* s = fgets(line.data(), (int)line.size(), fp);
if (!s)
break;
float scale = 1.f;
char key[256];
line[strcspn(line.data(), "\r\n")] = 0;
pch = strtok(line.data(), " ");
if (pch == NULL) break;
bool is_key = true;
while (pch != NULL)
{
if (is_key)
{
sscanf(pch, "%255s", key);
key_str = key;
is_key = false;
}
else
{
sscanf(pch, "%f", &scale);
scales.push_back(scale);
}
pch = strtok(NULL, " ");
}
// XYZ_param_N pattern
if (strstr(key_str.c_str(), "_param_"))
{
weight_int8scale_table[key_str] = ncnn::Mat((int)scales.size(), (void*)scales.data()).clone();
}
else
{
blob_int8scale_table[key_str] = ncnn::Mat((int)scales.size(), (void*)scales.data()).clone();
}
key_str.clear();
scales.clear();
}
fclose(fp);
return true;
}
class NetQuantize : public ModelWriter
{
public:
NetQuantize();
std::map<std::string, ncnn::Mat> blob_int8scale_table;
std::map<std::string, ncnn::Mat> weight_int8scale_table;
public:
int quantize_convolution();
int quantize_convolutiondepthwise();
int quantize_innerproduct();
int fuse_requantize();
};
NetQuantize::NetQuantize()
: ModelWriter()
{
}
int NetQuantize::quantize_convolution()
{
const int layer_count = static_cast<int>(layers.size());
for (int i = 0; i < layer_count; i++)
{
// find convolution layer
if (layers[i]->type != "Convolution")
continue;
// find convolution layer
std::map<std::string, ncnn::Mat>::iterator iter_data = blob_int8scale_table.find(layers[i]->name);
if (iter_data == blob_int8scale_table.end())
continue;
char key[256];
sprintf(key, "%s_param_0", layers[i]->name.c_str());
std::map<std::string, ncnn::Mat>::iterator iter = weight_int8scale_table.find(key);
if (iter == weight_int8scale_table.end())
{
fprintf(stderr, "this layer need to be quantized, but no scale param!\n");
return -1;
}
// Convolution - quantize weight from fp32 to int8
ncnn::Convolution* convolution = (ncnn::Convolution*)layers[i];
ncnn::Mat bottom_blob_int8_scales = iter_data->second;
ncnn::Mat weight_data_int8_scales = iter->second;
fprintf(stderr, "quantize_convolution %s\n", convolution->name.c_str());
{
const int maxk = convolution->kernel_w * convolution->kernel_h;
const int num_input = convolution->weight_data_size / convolution->num_output / maxk;
ncnn::Mat weight_data_r2 = convolution->weight_data.reshape(maxk, num_input, convolution->num_output);
ncnn::Mat weight_data_int8;
ncnn::Option opt_q = opt;
opt_q.blob_allocator = convolution->weight_data.allocator;
opt_q.use_packing_layout = false;
ncnn::quantize_to_int8(weight_data_r2, weight_data_int8, weight_data_int8_scales, opt_q);
if (weight_data_int8.empty())
return -100;
convolution->weight_data = weight_data_int8.reshape(convolution->weight_data_size);
}
convolution->int8_scale_term = 2;
convolution->weight_data_int8_scales = weight_data_int8_scales;
convolution->bottom_blob_int8_scales = bottom_blob_int8_scales;
}
return 0;
}
int NetQuantize::quantize_convolutiondepthwise()
{
const int layer_count = static_cast<int>(layers.size());
for (int i = 0; i < layer_count; i++)
{
// find convolution layer
if (layers[i]->type != "ConvolutionDepthWise")
continue;
// find convolutiondepthwise layer
std::map<std::string, ncnn::Mat>::iterator iter_data = blob_int8scale_table.find(layers[i]->name);
if (iter_data == blob_int8scale_table.end())
continue;
char key[256];
sprintf(key, "%s_param_0", layers[i]->name.c_str());
std::map<std::string, ncnn::Mat>::iterator iter = weight_int8scale_table.find(key);
if (iter == weight_int8scale_table.end())
{
fprintf(stderr, "this layer need to be quantized, but no scale param!\n");
return -1;
}
// Convolution - quantize weight from fp32 to int8
ncnn::ConvolutionDepthWise* convdw = (ncnn::ConvolutionDepthWise*)layers[i];
ncnn::Mat bottom_blob_int8_scales = iter_data->second;
ncnn::Mat weight_data_int8_scales = iter->second;
fprintf(stderr, "quantize_convolutiondepthwise %s\n", convdw->name.c_str());
{
ncnn::Mat int8_weight_data(convdw->weight_data_size, (size_t)1u);
if (int8_weight_data.empty())
return -100;
const int weight_data_size_g = convdw->weight_data_size / convdw->group;
for (int g = 0; g < convdw->group; g++)
{
ncnn::Option opt_q = opt;
opt_q.blob_allocator = int8_weight_data.allocator;
opt_q.use_packing_layout = false;
const ncnn::Mat weight_data_g = convdw->weight_data.range(weight_data_size_g * g, weight_data_size_g);
ncnn::Mat int8_weight_data_g = int8_weight_data.range(weight_data_size_g * g, weight_data_size_g);
const ncnn::Mat weight_data_int8_scales_g = weight_data_int8_scales.range(g, 1);
ncnn::quantize_to_int8(weight_data_g, int8_weight_data_g, weight_data_int8_scales_g, opt_q);
}
convdw->weight_data = int8_weight_data;
}
convdw->int8_scale_term = 1;
convdw->weight_data_int8_scales = weight_data_int8_scales;
convdw->bottom_blob_int8_scales = bottom_blob_int8_scales;
}
return 0;
}
int NetQuantize::quantize_innerproduct()
{
const int layer_count = static_cast<int>(layers.size());
for (int i = 0; i < layer_count; i++)
{
// find convolution layer
if (layers[i]->type != "InnerProduct")
continue;
// find InnerProduct layer
std::map<std::string, ncnn::Mat>::iterator iter_data = blob_int8scale_table.find(layers[i]->name);
if (iter_data == blob_int8scale_table.end())
continue;
char key[256];
sprintf(key, "%s_param_0", layers[i]->name.c_str());
std::map<std::string, ncnn::Mat>::iterator iter = weight_int8scale_table.find(key);
if (iter == weight_int8scale_table.end())
{
fprintf(stderr, "this layer need to be quantized, but no scale param!\n");
return -1;
}
// InnerProduct - quantize weight from fp32 to int8
ncnn::InnerProduct* fc = (ncnn::InnerProduct*)layers[i];
ncnn::Mat bottom_blob_int8_scales = iter_data->second;
ncnn::Mat weight_data_int8_scales = iter->second;
fprintf(stderr, "quantize_innerproduct %s\n", fc->name.c_str());
{
const int num_input = fc->weight_data_size / fc->num_output;
ncnn::Mat weight_data_r2 = fc->weight_data.reshape(num_input, fc->num_output);
ncnn::Mat weight_data_int8;
ncnn::Option opt_q = opt;
opt_q.use_packing_layout = false;
ncnn::quantize_to_int8(weight_data_r2, weight_data_int8, weight_data_int8_scales, opt_q);
if (weight_data_int8.empty())
return -100;
fc->weight_data = weight_data_int8.reshape(fc->weight_data_size);
}
fc->int8_scale_term = 2;
fc->weight_data_int8_scales = weight_data_int8_scales;
fc->bottom_blob_int8_scales = bottom_blob_int8_scales;
}
return 0;
}
int NetQuantize::fuse_requantize()
{
const size_t layer_count = layers.size();
for (size_t i = 0; i < layer_count; i++)
{
if (layers[i]->type != "Convolution" && layers[i]->type != "ConvolutionDepthWise")
continue;
// Convolution/ConvolutionDepthWise - Convolution/ConvolutionDepthWise
int top_blob_index = layers[i]->tops[0];
size_t j = i + 1;
for (; j < layer_count; j++)
{
if (layers[j]->type != "Convolution" && layers[j]->type != "ConvolutionDepthWise")
continue;
if (layers[j]->bottoms.size() != 1)
continue;
if (layers[j]->bottoms[0] == top_blob_index)
break;
}
if (j == layer_count)
continue;
// fuse requantize
fprintf(stderr, "fuse_requantize %s %s\n", layers[i]->name.c_str(), layers[j]->name.c_str());
if (layers[i]->type == "Convolution" && layers[j]->type == "Convolution")
{
ncnn::Convolution* convolution1 = (ncnn::Convolution*)layers[i];
ncnn::Convolution* convolution2 = (ncnn::Convolution*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "Convolution" && layers[j]->type == "ConvolutionDepthWise")
{
ncnn::Convolution* convolution1 = (ncnn::Convolution*)layers[i];
ncnn::ConvolutionDepthWise* convolution2 = (ncnn::ConvolutionDepthWise*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "ConvolutionDepthWise" && layers[j]->type == "Convolution")
{
ncnn::ConvolutionDepthWise* convolution1 = (ncnn::ConvolutionDepthWise*)layers[i];
ncnn::Convolution* convolution2 = (ncnn::Convolution*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "ConvolutionDepthWise" && layers[j]->type == "ConvolutionDepthWise")
{
ncnn::ConvolutionDepthWise* convolution1 = (ncnn::ConvolutionDepthWise*)layers[i];
ncnn::ConvolutionDepthWise* convolution2 = (ncnn::ConvolutionDepthWise*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
}
for (size_t i = 0; i < layer_count; i++)
{
if (layers[i]->type != "Convolution" && layers[i]->type != "ConvolutionDepthWise")
continue;
// Convolution/ConvolutionDepthWise - Split - Convolution/ConvolutionDepthWise
int top_blob_index = layers[i]->tops[0];
size_t j = i + 1;
for (; j < layer_count; j++)
{
if (layers[j]->type != "Split")
continue;
if (layers[j]->bottoms.size() != 1)
continue;
if (layers[j]->bottoms[0] == top_blob_index)
break;
}
if (j == layer_count)
continue;
ncnn::Split* split = (ncnn::Split*)layers[j];
bool all_conv = true;
for (size_t p = 0; p < split->tops.size(); p++)
{
int split_top_blob_index = split->tops[p];
size_t k = j + 1;
for (; k < layer_count; k++)
{
if (layers[k]->type != "Convolution" && layers[k]->type != "ConvolutionDepthWise")
continue;
if (layers[k]->bottoms.size() != 1)
continue;
if (layers[k]->bottoms[0] == split_top_blob_index)
break;
}
if (k == layer_count)
{
all_conv = false;
break;
}
if (layers[k]->type == "Convolution")
{
ncnn::Convolution* convolution = (ncnn::Convolution*)layers[k];
if (convolution->weight_data.elemsize != 1u)
{
all_conv = false;
break;
}
}
if (layers[k]->type == "ConvolutionDepthWise")
{
ncnn::ConvolutionDepthWise* convolution = (ncnn::ConvolutionDepthWise*)layers[k];
if (convolution->weight_data.elemsize != 1u)
{
all_conv = false;
break;
}
}
}
if (!all_conv)
continue;
j = blobs[split->tops[0]].consumer;
// fuse requantize
fprintf(stderr, "fuse_requantize %s %s\n", layers[i]->name.c_str(), split->name.c_str());
if (layers[i]->type == "Convolution" && layers[j]->type == "Convolution")
{
ncnn::Convolution* convolution1 = (ncnn::Convolution*)layers[i];
ncnn::Convolution* convolution2 = (ncnn::Convolution*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "Convolution" && layers[j]->type == "ConvolutionDepthWise")
{
ncnn::Convolution* convolution1 = (ncnn::Convolution*)layers[i];
ncnn::ConvolutionDepthWise* convolution2 = (ncnn::ConvolutionDepthWise*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "ConvolutionDepthWise" && layers[j]->type == "Convolution")
{
ncnn::ConvolutionDepthWise* convolution1 = (ncnn::ConvolutionDepthWise*)layers[i];
ncnn::Convolution* convolution2 = (ncnn::Convolution*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
if (layers[i]->type == "ConvolutionDepthWise" && layers[j]->type == "ConvolutionDepthWise")
{
ncnn::ConvolutionDepthWise* convolution1 = (ncnn::ConvolutionDepthWise*)layers[i];
ncnn::ConvolutionDepthWise* convolution2 = (ncnn::ConvolutionDepthWise*)layers[j];
if (convolution1->weight_data.elemsize != 1u || convolution2->weight_data.elemsize != 1u)
continue;
convolution1->int8_scale_term += 100;
convolution1->top_blob_int8_scales = convolution2->bottom_blob_int8_scales;
}
}
return 0;
}
int main(int argc, char** argv)
{
if (argc != 6)
{
fprintf(stderr, "usage: %s [inparam] [inbin] [outparam] [outbin] [calibration table]\n", argv[0]);
return -1;
}
const char* inparam = argv[1];
const char* inbin = argv[2];
const char* outparam = argv[3];
const char* outbin = argv[4];
const char* int8scale_table_path = argv[5];
NetQuantize quantizer;
// parse the calibration scale table
if (int8scale_table_path)
{
bool s2 = read_int8scale_table(int8scale_table_path, quantizer.blob_int8scale_table, quantizer.weight_int8scale_table);
if (!s2)
{
fprintf(stderr, "read_int8scale_table failed\n");
return -1;
}
}
quantizer.load_param(inparam);
if (strcmp(inbin, "null") == 0)
{
DataReaderFromEmpty dr;
quantizer.load_model(dr);
quantizer.gen_random_weight = true;
}
else
quantizer.load_model(inbin);
quantizer.quantize_convolution();
quantizer.quantize_convolutiondepthwise();
quantizer.quantize_innerproduct();
quantizer.fuse_requantize();
quantizer.save(outparam, outbin);
return 0;
}

File diff suppressed because it is too large Load Diff