ed52fddf by 乔峰昇

submmit

0 parents
cmake_minimum_required(VERSION 3.10)
project(main)
set(CMAKE_CXX_STANDARD 11)
find_package(OpenCV REQUIRED)
set(MNN_DIR /home/situ/MNN/MNN1.0/MNN)
include_directories(${MNN_DIR}/include)
LINK_DIRECTORIES(${MNN_DIR}/build)
add_executable(main main.cpp faceLandmarks.cpp)
# add_executable(main z.cpp)
target_link_libraries(main -lMNN ${OpenCV_LIBS})
#include "faceLandmarks.h"
vector<vector<float>> FaceLandmarks::detect_landmarks(std::string image_path){
Mat input_data_=cv::imread(image_path);
float w_r=float(input_data_.cols)/112.0f;
float h_r=float(input_data_.rows)/112.0f;
Mat input_data;
cv::resize(input_data_,input_data,Size2d(112,112));
input_data.convertTo(input_data, CV_32F);
input_data = input_data /256.0f;
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * 112 * 112 *sizeof(float));
int nPlaneSize = 112 * 112;
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, 112,112});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
MNN::Tensor feat_tensor(output, output->getDimensionType());
output->copyToHostTensor(&feat_tensor);
vector<vector<float>> landmarks;
for(int idx =0;idx<106;++idx){
float x_= *(feat_tensor.host<float>()+2*idx)*w_r;
float y_= *(feat_tensor.host<float>()+2*idx+1)*h_r;
vector<float> tmp={x_,y_};
landmarks.push_back(tmp);
}
return landmarks;
}
vector<vector<float>> FaceLandmarks::detect_image_landmarks(Mat image){
Mat input_data_=image;
float w_r=float(input_data_.cols)/112.0f;
float h_r=float(input_data_.rows)/112.0f;
Mat input_data;
cv::resize(input_data_,input_data,Size2d(112,112));
input_data.convertTo(input_data, CV_32F);
input_data = input_data /256.0f;
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * 112 * 112 *sizeof(float));
int nPlaneSize = 112 * 112;
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, 112,112});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
MNN::Tensor feat_tensor(output, output->getDimensionType());
output->copyToHostTensor(&feat_tensor);
vector<vector<float>> landmarks;
for(int idx =0;idx<106;++idx){
float x_= *(feat_tensor.host<float>()+2*idx)*w_r;
float y_= *(feat_tensor.host<float>()+2*idx+1)*h_r;
vector<float> tmp={x_,y_};
landmarks.push_back(tmp);
}
return landmarks;
}
\ No newline at end of file
#ifndef FACELANDMARKS_H
#define FACELANDMARKS_H
#include <opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace std;
using namespace cv;
using namespace MNN;
class FaceLandmarks{
private:
vector<float> input_size={112,112};
std::shared_ptr<MNN::Interpreter> net;
Session *session = nullptr;
ScheduleConfig config;
public:
FaceLandmarks(){};
FaceLandmarks(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session = net->createSession(config);//创建session
}
vector<vector<float>> detect_landmarks(string image_path);
vector<vector<float>> detect_image_landmarks(cv::Mat image);
};
#endif
\ No newline at end of file
#include "facerecognize.h"
cv::Mat FaceRecognize::meanAxis0(const cv::Mat &src)
{
int num = src.rows;
int dim = src.cols;
cv::Mat output(1,dim,CV_32F);
for(int i = 0 ; i < dim; i ++)
{
float sum = 0 ;
for(int j = 0 ; j < num ; j++)
{
sum+=src.at<float>(j,i);
}
output.at<float>(0,i) = sum/num;
}
return output;
}
cv::Mat FaceRecognize::elementwiseMinus(const cv::Mat &A,const cv::Mat &B)
{
cv::Mat output(A.rows,A.cols,A.type());
assert(B.cols == A.cols);
if(B.cols == A.cols)
{
for(int i = 0 ; i < A.rows; i ++)
{
for(int j = 0 ; j < B.cols; j++)
{
output.at<float>(i,j) = A.at<float>(i,j) - B.at<float>(0,j);
}
}
}
return output;
}
cv::Mat FaceRecognize::varAxis0(const cv::Mat &src)
{
cv:Mat temp_ = elementwiseMinus(src,meanAxis0(src));
cv::multiply(temp_ ,temp_ ,temp_ );
return meanAxis0(temp_);
}
int FaceRecognize::MatrixRank(cv::Mat M)
{
Mat w, u, vt;
SVD::compute(M, w, u, vt);
Mat1b nonZeroSingularValues = w > 0.0001;
int rank = countNonZero(nonZeroSingularValues);
return rank;
}
cv::Mat FaceRecognize::similarTransform(cv::Mat src,cv::Mat dst) {
int num = src.rows;
int dim = src.cols;
cv::Mat src_mean = meanAxis0(src);
cv::Mat dst_mean = meanAxis0(dst);
cv::Mat src_demean = elementwiseMinus(src, src_mean);
cv::Mat dst_demean = elementwiseMinus(dst, dst_mean);
cv::Mat A = (dst_demean.t() * src_demean) / static_cast<float>(num);
cv::Mat d(dim, 1, CV_32F);
d.setTo(1.0f);
if (cv::determinant(A) < 0) {
d.at<float>(dim - 1, 0) = -1;
}
Mat T = cv::Mat::eye(dim + 1, dim + 1, CV_32F);
cv::Mat U, S, V;
SVD::compute(A, S,U, V);
// the SVD function in opencv differ from scipy .
int rank = MatrixRank(A);
if (rank == 0) {
assert(rank == 0);
} else if (rank == dim - 1) {
if (cv::determinant(U) * cv::determinant(V) > 0) {
T.rowRange(0, dim).colRange(0, dim) = U * V;
} else {
int s = d.at<float>(dim - 1, 0) = -1;
d.at<float>(dim - 1, 0) = -1;
T.rowRange(0, dim).colRange(0, dim) = U * V;
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V; //np.dot(np.diag(d), V.T)
Mat B = Mat::zeros(3, 3, CV_8UC1);
Mat C = B.diag(0);
T.rowRange(0, dim).colRange(0, dim) = U* twp;
d.at<float>(dim - 1, 0) = s;
}
}
else{
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V.t(); //np.dot(np.diag(d), V.T)
cv::Mat res = U* twp; // U
T.rowRange(0, dim).colRange(0, dim) = -U.t()* twp;
}
cv::Mat var_ = varAxis0(src_demean);
float val = cv::sum(var_).val[0];
cv::Mat res;
cv::multiply(d,S,res);
float scale = 1.0/val*cv::sum(res).val[0];
T.rowRange(0, dim).colRange(0, dim) = - T.rowRange(0, dim).colRange(0, dim).t();
cv::Mat temp1 = T.rowRange(0, dim).colRange(0, dim); // T[:dim, :dim]
cv::Mat temp2 = src_mean.t(); //src_mean.T
cv::Mat temp3 = temp1*temp2; // np.dot(T[:dim, :dim], src_mean.T)
cv::Mat temp4 = scale*temp3;
T.rowRange(0, dim).colRange(dim, dim+1)= -(temp4 - dst_mean.t()) ;
T.rowRange(0, dim).colRange(0, dim) *= scale;
return T;
}
Mat FaceRecognize::preprocess_face(Mat image,vector<vector<float>> land){
Mat out;
cv::resize(image,out,Size(112,112));
float default1[5][2] = {
{38.2946f, 51.6963f},
{73.5318f, 51.6963f},
{56.0252f, 71.7366f},
{41.5493f, 92.3655f},
{70.7299f, 92.3655f}
};
float lands[5][2]={
{float(land[0][0]*112.0)/float(image.cols),float(land[0][1]*112.0)/float(image.rows)},
{float(land[1][0]*112.0)/float(image.cols),float(land[1][1]*112.0)/float(image.rows)},
{float(land[2][0]*112.0)/float(image.cols),float(land[2][1]*112.0)/float(image.rows)},
{float(land[3][0]*112.0)/float(image.cols),float(land[3][1]*112.0)/float(image.rows)},
{float(land[4][0]*112.0)/float(image.cols),float(land[4][1]*112.0)/float(image.rows)}
};
cv::Mat src(5,2,CV_32FC1,default1);
memcpy(src.data, default1, 2 * 5 * sizeof(float));
cv::Mat dst(5,2,CV_32FC1,lands);
memcpy(dst.data, lands, 2 * 5 * sizeof(float));
cv::Mat M = similarTransform(dst, src);
float M_[2][3]={
{M.at<float>(0,0),M.at<float>(0,1),M.at<float>(0,2)},
{M.at<float>(1,0),M.at<float>(1,1),M.at<float>(1,2)},
};
cv::Mat M__(2,3,CV_32FC1,M_);
cv::Mat align_image;
cv::warpAffine(out,align_image,M__,Size(112, 112));
return align_image;
}
double FaceRecognize::getMold(const vector<double>& vec)
{
int n = vec.size();
double sum = 0.0;
for (int i = 0; i < n; ++i)
sum += vec[i] * vec[i];
return sqrt(sum);
}
double FaceRecognize::cos_distance(const vector<double>& base, const vector<double>& target)
{
int n = base.size();
assert(n == target.size());
double tmp = 0.0;
for (int i = 0; i < n; ++i)
tmp += base[i] * target[i];
double simility = tmp / (getMold(base)*getMold(target));
return simility;
}
double FaceRecognize::get_samilar(Mat image1,Mat image2){
cv::resize(image1,image1,Size2d(input_size[0],input_size[1]));
cv::resize(image2,image2,Size2d(input_size[0],input_size[1]));
image1.convertTo(image1, CV_32F);
image2.convertTo(image2, CV_32F);
image1 = (image1-mean)*scale;
image2 = (image2-mean)*scale;
std::vector<std::vector<cv::Mat>> nChannels1;
std::vector<cv::Mat> rgbChannels1(3);
cv::split(image1, rgbChannels1);
nChannels1.push_back(rgbChannels1); // NHWC 转NCHW
auto *pvData1 = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
int nPlaneSize = input_size[0] * input_size[1];
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane1 = nChannels1[0][c];
memcpy((float *)(pvData1) + c * nPlaneSize,\
matPlane1.data, nPlaneSize * sizeof(float));
}
auto inTensor1 = net->getSessionInput(session1, NULL);
net->resizeTensor(inTensor1, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session1);
auto nchwTensor1 = new Tensor(inTensor1, Tensor::CAFFE);
::memcpy(nchwTensor1->host<float>(), pvData1, nPlaneSize * 3 * sizeof(float));
inTensor1->copyFromHostTensor(nchwTensor1);
// //推理
net->runSession(session1);
auto output1= net->getSessionOutput(session1, NULL);
std::vector<std::vector<cv::Mat>> nChannels2;
std::vector<cv::Mat> rgbChannels2(3);
cv::split(image2, rgbChannels2);
nChannels2.push_back(rgbChannels2); // NHWC 转NCHW
auto *pvData2 = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane2 = nChannels2[0][c];
memcpy((float *)(pvData2) + c * nPlaneSize,\
matPlane2.data, nPlaneSize * sizeof(float));
}
auto inTensor2 = net->getSessionInput(session2, NULL);
net->resizeTensor(inTensor2, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session2);
auto nchwTensor2 = new Tensor(inTensor2, Tensor::CAFFE);
::memcpy(nchwTensor2->host<float>(), pvData2, nPlaneSize * 3 * sizeof(float));
inTensor2->copyFromHostTensor(nchwTensor2);
// //推理
net->runSession(session2);
auto output2= net->getSessionOutput(session2, NULL);
MNN::Tensor feat_tensor1(output1, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor2(output2, MNN::Tensor::CAFFE);
output1->copyToHostTensor(&feat_tensor1);
output2->copyToHostTensor(&feat_tensor2);
auto feature1 = feat_tensor1.host<float>();
auto feature2 = feat_tensor2.host<float>();
vector<double> v1,v2;
for(int i=0;i<int(feat_tensor1.size()/4);i++){
v1.push_back((double)feature1[i]);
v2.push_back((double)feature2[i]);
}
double cos_score=cos_distance(v1,v2);
return cos_score;
}
#ifndef FACERECOGNIZE_H
#define FACERECOGNIZE_H
#include<opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace MNN;
using namespace std;
using namespace cv;
class FaceRecognize{
private:
vector<float> input_size={112,112};
std::shared_ptr<MNN::Interpreter> net;
Session *session1 = nullptr;
Session *session2 = nullptr;
ScheduleConfig config;
Scalar mean=Scalar(127.5f,127.5f,127.5f);
float scale = 1.0f/127.5f;
public:
FaceRecognize(){};
FaceRecognize(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session1 = net->createSession(config);//创建session
session2 = net->createSession(config);//创建session
}
//预处理
cv::Mat meanAxis0(const cv::Mat &src);
cv::Mat elementwiseMinus(const cv::Mat &A,const cv::Mat &B);
cv::Mat varAxis0(const cv::Mat &src);
int MatrixRank(cv::Mat M);
cv::Mat similarTransform(cv::Mat src,cv::Mat dst);
Mat preprocess_face(Mat image,vector<vector<float>> land);
double getMold(const vector<double>& vec);
double cos_distance(const vector<double>& base, const vector<double>& target);
// 推理
double get_samilar(Mat image1,Mat image2);
};
#endif
\ No newline at end of file
#include "faceLandmarks.h"
int main(){
FaceLandmarks face_landmarks1 = FaceLandmarks("/home/situ/qfs/sdk_project/gitlab_demo/face_recognize_mnn/model/det_landmarks_106_v0.0.1.mnn");
vector<string> filenames;
cv::glob("/home/situ/图片/img3", filenames, false);
for(auto path:filenames){
// cout<<path<<endl;
Mat img1 =cv::imread(path);
auto landmarks1 = face_landmarks1.detect_landmarks(path);
for(auto landm:landmarks1){
cv::circle(img1,Point2d(landm[0],landm[1]),2,Scalar(255,0,0));
}
cv::imshow("img",img1);
cv::waitKey(0);
}
// Mat image1 = cv::imread("/home/situ/图片/4.jpg");
// Mat image2 = cv::imread("/home/situ/图片/img3/1.jpg");
// string face_det_model = "/home/situ/qfs/sdk_project/face_recognize_mnn/model/mnn/det_face_retina_mnn_1.0.0_v0.1.1.mnn";
// string face_landm_model = "/home/situ/qfs/sdk_project/face_recognize_mnn/model/mnn/det_landmarks_106_v0.0.1.mnn";
// string face_rec_model = "/home/situ/qfs/mobile_face_recognize/models/cls_face_mnn_1.0.0_v0.1.0.mnn";
// FaceComparison face_rec = FaceComparison(face_det_model,face_landm_model,face_rec_model);
// bool result = face_rec.face_compare("/home/situ/图片/2.png","/home/situ/图片/2.png");
// cout<<result<<endl;
}
No preview for this file type
No preview for this file type
No preview for this file type
#include "retinaface.h"
// 生成anchors
vector<vector<float>> RetinaFace::priorBox(vector<float> image_size){
vector<int> tmp1={16,32};
vector<int> tmp2={64,128};
vector<int> tmp3={256,512};
vector<vector<int>> min_sizes_;
min_sizes_.push_back(tmp1);
min_sizes_.push_back(tmp2);
min_sizes_.push_back(tmp3);
vector<int> steps={8,16,32};
vector<vector<int>> feature_maps;
vector<vector<float>> anchors;
for(int &step:steps){
vector<int> tmp(2,0);
tmp[0]=ceil(image_size[0]/step);
tmp[1]=ceil(image_size[1]/step);
feature_maps.push_back(tmp);
}
for(int k=0;k<feature_maps.size();k++){
vector<int> min_sizes=min_sizes_[k];
for(int i=0;i<feature_maps[k][0];i++){
for(int j=0;j<feature_maps[k][1];j++){
for(int &min_size:min_sizes){
float s_kx=float(min_size)/float(image_size[1]);
float s_ky=float(min_size)/float(image_size[0]);
float dense_cx=float((float(j)+float(0.5))*steps[k])/float(image_size[1]);
float dense_cy=float((float(i)+float(0.5))*steps[k])/float(image_size[1]);
vector<float> tmp_anchor={dense_cx,dense_cy,s_kx,s_ky};
anchors.push_back(tmp_anchor);
}
}
}
}
return anchors;
}
// 解析bounding box 包含置信度
vector<Bbox> RetinaFace::decode(float *loc,float *score,float *pre,vector<vector<float>> priors,vector<float> variances){
vector<float> input_size={640,640};
float resize_scale=1.0;
vector<Bbox> boxes;
for(int i=0;i<priors.size();++i){
float b1=priors[i][0]+loc[4*i]*variances[0]*priors[i][2];
float b2=priors[i][1]+loc[4*i+1]*variances[0]*priors[i][3];
float b3=priors[i][2]*exp(loc[4*i+2]*variances[1]);
float b4=priors[i][3]*exp(loc[4*i+3]*variances[1]);
b1=b1-b3/float(2);
b2=b2-b4/float(2);
b3=b3+b1;
b4=b4+b2;
float l1=priors[i][0]+pre[10*i]*variances[0]*priors[i][2];
float l2=priors[i][1]+pre[10*i+1]*variances[0]*priors[i][3];
float l3=priors[i][0]+pre[10*i+2]*variances[0]*priors[i][2];
float l4=priors[i][1]+pre[10*i+3]*variances[0]*priors[i][3];
float l5=priors[i][0]+pre[10*i+4]*variances[0]*priors[i][2];
float l6=priors[i][1]+pre[10*i+5]*variances[0]*priors[i][3];
float l7=priors[i][0]+pre[10*i+6]*variances[0]*priors[i][2];
float l8=priors[i][1]+pre[10*i+7]*variances[0]*priors[i][3];
float l9=priors[i][0]+pre[10*i+8]*variances[0]*priors[i][2];
float l10=priors[i][1]+pre[10*i+9]*variances[0]*priors[i][3];
b1>0?b1:0;
b2>0?b2:0;
b3>640?640:b3;
b4>640?640:b4;
Bbox tmp_box={.xmin=b1*input_size[0]/resize_scale,.ymin=b2*input_size[1]/resize_scale,.xmax=b3*input_size[0]/resize_scale,.ymax=b4*input_size[1]/resize_scale,
.score=score[2*i+1],.x1=(l1*input_size[0])/resize_scale,.y1=l2*input_size[1]/resize_scale,.x2=l3*input_size[0]/resize_scale,.y2=l4*input_size[1]/resize_scale,
.x3=l5*input_size[0]/resize_scale,.y3=l6*input_size[1]/resize_scale,.x4=l7*input_size[0]/resize_scale,.y4=l8*input_size[1]/resize_scale,.x5=l9*input_size[0]/resize_scale,.y5=l10*input_size[1]/resize_scale};
boxes.push_back(tmp_box);
}
return boxes;
}
//NMS
void RetinaFace::nms_cpu(std::vector<Bbox> &bboxes, float threshold){
if (bboxes.empty()){
return ;
}
// 1.之前需要按照score排序
std::sort(bboxes.begin(), bboxes.end(), [&](Bbox b1, Bbox b2){return b1.score>b2.score;});
// 2.先求出所有bbox自己的大小
std::vector<float> area(bboxes.size());
for (int i=0; i<bboxes.size(); ++i){
area[i] = (bboxes[i].xmax - bboxes[i].xmin + 1) * (bboxes[i].ymax - bboxes[i].ymin + 1);
}
// 3.循环
for (int i=0; i<bboxes.size(); ++i){
for (int j=i+1; j<bboxes.size(); ){
float left = std::max(bboxes[i].xmin, bboxes[j].xmin);
float right = std::min(bboxes[i].xmax, bboxes[j].xmax);
float top = std::max(bboxes[i].ymin, bboxes[j].ymin);
float bottom = std::min(bboxes[i].ymax, bboxes[j].ymax);
float width = std::max(right - left + 1, 0.f);
float height = std::max(bottom - top + 1, 0.f);
float u_area = height * width;
float iou = (u_area) / (area[i] + area[j] - u_area);
if (iou>=threshold){
bboxes.erase(bboxes.begin()+j);
area.erase(area.begin()+j);
}else{
++j;
}
}
}
}
// 根据阈值筛选
vector<Bbox> RetinaFace::select_score(vector<Bbox> bboxes,float threshold,float w_r,float h_r){
vector<Bbox> results;
for(Bbox &box:bboxes){
if (float(box.score)>=threshold){
box.xmin=box.xmin/w_r;
box.ymin=box.ymin/h_r;
box.xmax=box.xmax/w_r;
box.ymax=box.ymax/h_r;
box.x1=box.x1/w_r;
box.y1=box.y1/h_r;
box.x2=box.x2/w_r;
box.y2=box.y2/h_r;
box.x3=box.x3/w_r;
box.y3=box.y3/h_r;
box.x4=box.x4/w_r;
box.y4=box.y4/h_r;
box.x5=box.x5/w_r;
box.y5=box.y5/h_r;
results.push_back(box);
}
}
return results;
}
// 数据后处理
vector<Bbox> RetinaFace::bbox_process(vector<Bbox> bboxes,float frame_w,float frame_h){
vector<Bbox> result_bboxes;
for(Bbox &bbox:bboxes){
Bbox new_bbox;
float face_w=bbox.xmax-bbox.xmin;
float face_h=bbox.ymax-bbox.ymin;
new_bbox.xmin=bbox.xmin-face_w*0.15;
new_bbox.xmax=bbox.xmax+face_w*0.15;
new_bbox.ymin=bbox.ymin;
new_bbox.ymax=bbox.ymax+face_h*0.15;
new_bbox.xmin=new_bbox.xmin>0?new_bbox.xmin:0;
new_bbox.ymin=new_bbox.ymin>0?new_bbox.ymin:0;
new_bbox.xmax=new_bbox.xmax>frame_w?frame_w:new_bbox.xmax;
new_bbox.ymax=new_bbox.ymax>frame_h?frame_h:new_bbox.ymax;
new_bbox.score=bbox.score;
new_bbox.x1=bbox.x1>0?bbox.x1:0;
new_bbox.y1=bbox.y1>0?bbox.y1:0;
new_bbox.x2=bbox.x2>0?bbox.x2:0;
new_bbox.y2=bbox.y2>0?bbox.y2:0;
new_bbox.x3=bbox.x3>0?bbox.x3:0;
new_bbox.y3=bbox.y3>0?bbox.y3:0;
new_bbox.x4=bbox.x4>0?bbox.x4:0;
new_bbox.y4=bbox.y4>0?bbox.y4:0;
new_bbox.x5=bbox.x5>0?bbox.x5:0;
new_bbox.y5=bbox.y5>0?bbox.y5:0;
result_bboxes.push_back(new_bbox);
}
return result_bboxes;
}
// 推理
vector<Bbox> RetinaFace::detect(string image_path){
Mat image = cv::imread(image_path);
float w_r=float(input_size[0])/float(image.cols);
float h_r=float(input_size[1])/float(image.rows);
Mat input_data;
cv::resize(image,input_data,Size(input_size[0],input_size[1]));
input_data = input_data-mean;
input_data.convertTo(input_data, CV_32F);
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
int nPlaneSize = input_size[0] * input_size[1];
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output0= net->getSessionOutput(session, "output0");
auto output1= net->getSessionOutput(session, "output1");
auto output2= net->getSessionOutput(session, "output2");
MNN::Tensor feat_tensor0(output0, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor1(output1, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor2(output2, MNN::Tensor::CAFFE);
output0->copyToHostTensor(&feat_tensor0);
output1->copyToHostTensor(&feat_tensor1);
output2->copyToHostTensor(&feat_tensor2);
auto loc = feat_tensor0.host<float>();
auto score = feat_tensor1.host<float>();
auto landm = feat_tensor2.host<float>();
vector<Bbox> result_boxes = decode(loc,score,landm,anchors,variances);
vector<Bbox> results=select_score(result_boxes,confidence_threshold,w_r,h_r);
nms_cpu(results,nms_threshold);
if(is_bbox_process){
vector<Bbox> res_bboxes=bbox_process(results,image.cols,image.rows);
return res_bboxes;
}else{
return results;
}
}
vector<Bbox> RetinaFace::detect_image(Mat image){
float w_r=float(input_size[0])/float(image.cols);
float h_r=float(input_size[1])/float(image.rows);
Mat input_data;
cv::resize(image,input_data,Size(input_size[0],input_size[1]));
input_data = input_data-mean;
input_data.convertTo(input_data, CV_32F);
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
int nPlaneSize = input_size[0] * input_size[1];
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output0= net->getSessionOutput(session, "output0");
auto output1= net->getSessionOutput(session, "output1");
auto output2= net->getSessionOutput(session, "output2");
MNN::Tensor feat_tensor0(output0, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor1(output1, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor2(output2, MNN::Tensor::CAFFE);
output0->copyToHostTensor(&feat_tensor0);
output1->copyToHostTensor(&feat_tensor1);
output2->copyToHostTensor(&feat_tensor2);
auto loc = feat_tensor0.host<float>();
auto score = feat_tensor1.host<float>();
auto landm = feat_tensor2.host<float>();
vector<Bbox> result_boxes = decode(loc,score,landm,anchors,variances);
vector<Bbox> results=select_score(result_boxes,confidence_threshold,w_r,h_r);
nms_cpu(results,nms_threshold);
if(is_bbox_process){
vector<Bbox> res_bboxes=bbox_process(results,image.cols,image.rows);
return res_bboxes;
}else{
return results;
}
}
\ No newline at end of file
#ifndef RETINAFACE_H
#define RETINAFACE_H
#include<opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace MNN;
using namespace std;
using namespace cv;
struct Bbox{
float xmin;
float ymin;
float xmax;
float ymax;
float score;
float x1;
float y1;
float x2;
float y2;
float x3;
float y3;
float x4;
float y4;
float x5;
float y5;
};
class RetinaFace{
public:
float confidence_threshold = 0.5;
bool is_bbox_process=true;
private:
bool use_gpu=true;
vector<float> input_size={640,640};
vector<float> variances={0.1,0.2};
Scalar mean = Scalar(104.0f, 117.0f, 123.0f);
float keep_top_k = 100;
float nms_threshold = 0.4;
float resize_scale = 1.0;
std::shared_ptr<MNN::Interpreter> net;
Session *session = nullptr;
ScheduleConfig config;
vector<vector<float>> anchors;
private:
// 生成anchors
vector<vector<float>> priorBox(vector<float> image_size);
// 解析bounding box landmarks 包含置信度
vector<Bbox> decode(float *loc,float *score,float *pre,vector<vector<float>> priors,vector<float> variances);
// 解析landmarks
// vector<vector<float>> decode_landm(vector<vector<float>> pre,vector<vector<float>> priors,vector<float> variances);
//NMS
void nms_cpu(std::vector<Bbox> &bboxes, float threshold);
// 根据阈值筛选
vector<Bbox> select_score(vector<Bbox> bboxes,float threshold,float w_r,float h_r);
// 数据后处理
vector<Bbox> bbox_process(vector<Bbox> bboxes,float frame_w,float frame_h);
public:
RetinaFace(){};
RetinaFace(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session = net->createSession(config);//创建session
anchors=priorBox(input_size);
}
// 推理
vector<Bbox> detect(string image_path);
vector<Bbox> detect_image(Mat image);
};
#endif
\ No newline at end of file
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!