ed52fddf by 乔峰昇

submmit

0 parents
cmake_minimum_required(VERSION 3.10)
project(main)
set(CMAKE_CXX_STANDARD 11)
find_package(OpenCV REQUIRED)
set(MNN_DIR /home/situ/MNN/MNN1.0/MNN)
include_directories(${MNN_DIR}/include)
LINK_DIRECTORIES(${MNN_DIR}/build)
add_executable(main main.cpp faceLandmarks.cpp)
# add_executable(main z.cpp)
target_link_libraries(main -lMNN ${OpenCV_LIBS})
#include "faceLandmarks.h"
vector<vector<float>> FaceLandmarks::detect_landmarks(std::string image_path){
Mat input_data_=cv::imread(image_path);
float w_r=float(input_data_.cols)/112.0f;
float h_r=float(input_data_.rows)/112.0f;
Mat input_data;
cv::resize(input_data_,input_data,Size2d(112,112));
input_data.convertTo(input_data, CV_32F);
input_data = input_data /256.0f;
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * 112 * 112 *sizeof(float));
int nPlaneSize = 112 * 112;
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, 112,112});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
MNN::Tensor feat_tensor(output, output->getDimensionType());
output->copyToHostTensor(&feat_tensor);
vector<vector<float>> landmarks;
for(int idx =0;idx<106;++idx){
float x_= *(feat_tensor.host<float>()+2*idx)*w_r;
float y_= *(feat_tensor.host<float>()+2*idx+1)*h_r;
vector<float> tmp={x_,y_};
landmarks.push_back(tmp);
}
return landmarks;
}
vector<vector<float>> FaceLandmarks::detect_image_landmarks(Mat image){
Mat input_data_=image;
float w_r=float(input_data_.cols)/112.0f;
float h_r=float(input_data_.rows)/112.0f;
Mat input_data;
cv::resize(input_data_,input_data,Size2d(112,112));
input_data.convertTo(input_data, CV_32F);
input_data = input_data /256.0f;
std::vector<std::vector<cv::Mat>> nChannels;
std::vector<cv::Mat> rgbChannels(3);
cv::split(input_data, rgbChannels);
nChannels.push_back(rgbChannels); // NHWC 转NCHW
auto *pvData = malloc(1 * 3 * 112 * 112 *sizeof(float));
int nPlaneSize = 112 * 112;
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane = nChannels[0][c];
memcpy((float *)(pvData) + c * nPlaneSize,\
matPlane.data, nPlaneSize * sizeof(float));
}
auto inTensor = net->getSessionInput(session, NULL);
net->resizeTensor(inTensor, {1, 3, 112,112});
net->resizeSession(session);
auto nchwTensor = new Tensor(inTensor, Tensor::CAFFE);
::memcpy(nchwTensor->host<float>(), pvData, nPlaneSize * 3 * sizeof(float));
inTensor->copyFromHostTensor(nchwTensor);
// //推理
net->runSession(session);
auto output= net->getSessionOutput(session, NULL);
MNN::Tensor feat_tensor(output, output->getDimensionType());
output->copyToHostTensor(&feat_tensor);
vector<vector<float>> landmarks;
for(int idx =0;idx<106;++idx){
float x_= *(feat_tensor.host<float>()+2*idx)*w_r;
float y_= *(feat_tensor.host<float>()+2*idx+1)*h_r;
vector<float> tmp={x_,y_};
landmarks.push_back(tmp);
}
return landmarks;
}
\ No newline at end of file
#ifndef FACELANDMARKS_H
#define FACELANDMARKS_H
#include <opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace std;
using namespace cv;
using namespace MNN;
class FaceLandmarks{
private:
vector<float> input_size={112,112};
std::shared_ptr<MNN::Interpreter> net;
Session *session = nullptr;
ScheduleConfig config;
public:
FaceLandmarks(){};
FaceLandmarks(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session = net->createSession(config);//创建session
}
vector<vector<float>> detect_landmarks(string image_path);
vector<vector<float>> detect_image_landmarks(cv::Mat image);
};
#endif
\ No newline at end of file
#include "facerecognize.h"
cv::Mat FaceRecognize::meanAxis0(const cv::Mat &src)
{
int num = src.rows;
int dim = src.cols;
cv::Mat output(1,dim,CV_32F);
for(int i = 0 ; i < dim; i ++)
{
float sum = 0 ;
for(int j = 0 ; j < num ; j++)
{
sum+=src.at<float>(j,i);
}
output.at<float>(0,i) = sum/num;
}
return output;
}
cv::Mat FaceRecognize::elementwiseMinus(const cv::Mat &A,const cv::Mat &B)
{
cv::Mat output(A.rows,A.cols,A.type());
assert(B.cols == A.cols);
if(B.cols == A.cols)
{
for(int i = 0 ; i < A.rows; i ++)
{
for(int j = 0 ; j < B.cols; j++)
{
output.at<float>(i,j) = A.at<float>(i,j) - B.at<float>(0,j);
}
}
}
return output;
}
cv::Mat FaceRecognize::varAxis0(const cv::Mat &src)
{
cv:Mat temp_ = elementwiseMinus(src,meanAxis0(src));
cv::multiply(temp_ ,temp_ ,temp_ );
return meanAxis0(temp_);
}
int FaceRecognize::MatrixRank(cv::Mat M)
{
Mat w, u, vt;
SVD::compute(M, w, u, vt);
Mat1b nonZeroSingularValues = w > 0.0001;
int rank = countNonZero(nonZeroSingularValues);
return rank;
}
cv::Mat FaceRecognize::similarTransform(cv::Mat src,cv::Mat dst) {
int num = src.rows;
int dim = src.cols;
cv::Mat src_mean = meanAxis0(src);
cv::Mat dst_mean = meanAxis0(dst);
cv::Mat src_demean = elementwiseMinus(src, src_mean);
cv::Mat dst_demean = elementwiseMinus(dst, dst_mean);
cv::Mat A = (dst_demean.t() * src_demean) / static_cast<float>(num);
cv::Mat d(dim, 1, CV_32F);
d.setTo(1.0f);
if (cv::determinant(A) < 0) {
d.at<float>(dim - 1, 0) = -1;
}
Mat T = cv::Mat::eye(dim + 1, dim + 1, CV_32F);
cv::Mat U, S, V;
SVD::compute(A, S,U, V);
// the SVD function in opencv differ from scipy .
int rank = MatrixRank(A);
if (rank == 0) {
assert(rank == 0);
} else if (rank == dim - 1) {
if (cv::determinant(U) * cv::determinant(V) > 0) {
T.rowRange(0, dim).colRange(0, dim) = U * V;
} else {
int s = d.at<float>(dim - 1, 0) = -1;
d.at<float>(dim - 1, 0) = -1;
T.rowRange(0, dim).colRange(0, dim) = U * V;
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V; //np.dot(np.diag(d), V.T)
Mat B = Mat::zeros(3, 3, CV_8UC1);
Mat C = B.diag(0);
T.rowRange(0, dim).colRange(0, dim) = U* twp;
d.at<float>(dim - 1, 0) = s;
}
}
else{
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V.t(); //np.dot(np.diag(d), V.T)
cv::Mat res = U* twp; // U
T.rowRange(0, dim).colRange(0, dim) = -U.t()* twp;
}
cv::Mat var_ = varAxis0(src_demean);
float val = cv::sum(var_).val[0];
cv::Mat res;
cv::multiply(d,S,res);
float scale = 1.0/val*cv::sum(res).val[0];
T.rowRange(0, dim).colRange(0, dim) = - T.rowRange(0, dim).colRange(0, dim).t();
cv::Mat temp1 = T.rowRange(0, dim).colRange(0, dim); // T[:dim, :dim]
cv::Mat temp2 = src_mean.t(); //src_mean.T
cv::Mat temp3 = temp1*temp2; // np.dot(T[:dim, :dim], src_mean.T)
cv::Mat temp4 = scale*temp3;
T.rowRange(0, dim).colRange(dim, dim+1)= -(temp4 - dst_mean.t()) ;
T.rowRange(0, dim).colRange(0, dim) *= scale;
return T;
}
Mat FaceRecognize::preprocess_face(Mat image,vector<vector<float>> land){
Mat out;
cv::resize(image,out,Size(112,112));
float default1[5][2] = {
{38.2946f, 51.6963f},
{73.5318f, 51.6963f},
{56.0252f, 71.7366f},
{41.5493f, 92.3655f},
{70.7299f, 92.3655f}
};
float lands[5][2]={
{float(land[0][0]*112.0)/float(image.cols),float(land[0][1]*112.0)/float(image.rows)},
{float(land[1][0]*112.0)/float(image.cols),float(land[1][1]*112.0)/float(image.rows)},
{float(land[2][0]*112.0)/float(image.cols),float(land[2][1]*112.0)/float(image.rows)},
{float(land[3][0]*112.0)/float(image.cols),float(land[3][1]*112.0)/float(image.rows)},
{float(land[4][0]*112.0)/float(image.cols),float(land[4][1]*112.0)/float(image.rows)}
};
cv::Mat src(5,2,CV_32FC1,default1);
memcpy(src.data, default1, 2 * 5 * sizeof(float));
cv::Mat dst(5,2,CV_32FC1,lands);
memcpy(dst.data, lands, 2 * 5 * sizeof(float));
cv::Mat M = similarTransform(dst, src);
float M_[2][3]={
{M.at<float>(0,0),M.at<float>(0,1),M.at<float>(0,2)},
{M.at<float>(1,0),M.at<float>(1,1),M.at<float>(1,2)},
};
cv::Mat M__(2,3,CV_32FC1,M_);
cv::Mat align_image;
cv::warpAffine(out,align_image,M__,Size(112, 112));
return align_image;
}
double FaceRecognize::getMold(const vector<double>& vec)
{
int n = vec.size();
double sum = 0.0;
for (int i = 0; i < n; ++i)
sum += vec[i] * vec[i];
return sqrt(sum);
}
double FaceRecognize::cos_distance(const vector<double>& base, const vector<double>& target)
{
int n = base.size();
assert(n == target.size());
double tmp = 0.0;
for (int i = 0; i < n; ++i)
tmp += base[i] * target[i];
double simility = tmp / (getMold(base)*getMold(target));
return simility;
}
double FaceRecognize::get_samilar(Mat image1,Mat image2){
cv::resize(image1,image1,Size2d(input_size[0],input_size[1]));
cv::resize(image2,image2,Size2d(input_size[0],input_size[1]));
image1.convertTo(image1, CV_32F);
image2.convertTo(image2, CV_32F);
image1 = (image1-mean)*scale;
image2 = (image2-mean)*scale;
std::vector<std::vector<cv::Mat>> nChannels1;
std::vector<cv::Mat> rgbChannels1(3);
cv::split(image1, rgbChannels1);
nChannels1.push_back(rgbChannels1); // NHWC 转NCHW
auto *pvData1 = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
int nPlaneSize = input_size[0] * input_size[1];
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane1 = nChannels1[0][c];
memcpy((float *)(pvData1) + c * nPlaneSize,\
matPlane1.data, nPlaneSize * sizeof(float));
}
auto inTensor1 = net->getSessionInput(session1, NULL);
net->resizeTensor(inTensor1, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session1);
auto nchwTensor1 = new Tensor(inTensor1, Tensor::CAFFE);
::memcpy(nchwTensor1->host<float>(), pvData1, nPlaneSize * 3 * sizeof(float));
inTensor1->copyFromHostTensor(nchwTensor1);
// //推理
net->runSession(session1);
auto output1= net->getSessionOutput(session1, NULL);
std::vector<std::vector<cv::Mat>> nChannels2;
std::vector<cv::Mat> rgbChannels2(3);
cv::split(image2, rgbChannels2);
nChannels2.push_back(rgbChannels2); // NHWC 转NCHW
auto *pvData2 = malloc(1 * 3 * input_size[1] * input_size[0] *sizeof(float));
for (int c = 0; c < 3; ++c)
{
cv::Mat matPlane2 = nChannels2[0][c];
memcpy((float *)(pvData2) + c * nPlaneSize,\
matPlane2.data, nPlaneSize * sizeof(float));
}
auto inTensor2 = net->getSessionInput(session2, NULL);
net->resizeTensor(inTensor2, {1, 3, input_size[1],input_size[0]});
net->resizeSession(session2);
auto nchwTensor2 = new Tensor(inTensor2, Tensor::CAFFE);
::memcpy(nchwTensor2->host<float>(), pvData2, nPlaneSize * 3 * sizeof(float));
inTensor2->copyFromHostTensor(nchwTensor2);
// //推理
net->runSession(session2);
auto output2= net->getSessionOutput(session2, NULL);
MNN::Tensor feat_tensor1(output1, MNN::Tensor::CAFFE);
MNN::Tensor feat_tensor2(output2, MNN::Tensor::CAFFE);
output1->copyToHostTensor(&feat_tensor1);
output2->copyToHostTensor(&feat_tensor2);
auto feature1 = feat_tensor1.host<float>();
auto feature2 = feat_tensor2.host<float>();
vector<double> v1,v2;
for(int i=0;i<int(feat_tensor1.size()/4);i++){
v1.push_back((double)feature1[i]);
v2.push_back((double)feature2[i]);
}
double cos_score=cos_distance(v1,v2);
return cos_score;
}
#ifndef FACERECOGNIZE_H
#define FACERECOGNIZE_H
#include<opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace MNN;
using namespace std;
using namespace cv;
class FaceRecognize{
private:
vector<float> input_size={112,112};
std::shared_ptr<MNN::Interpreter> net;
Session *session1 = nullptr;
Session *session2 = nullptr;
ScheduleConfig config;
Scalar mean=Scalar(127.5f,127.5f,127.5f);
float scale = 1.0f/127.5f;
public:
FaceRecognize(){};
FaceRecognize(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session1 = net->createSession(config);//创建session
session2 = net->createSession(config);//创建session
}
//预处理
cv::Mat meanAxis0(const cv::Mat &src);
cv::Mat elementwiseMinus(const cv::Mat &A,const cv::Mat &B);
cv::Mat varAxis0(const cv::Mat &src);
int MatrixRank(cv::Mat M);
cv::Mat similarTransform(cv::Mat src,cv::Mat dst);
Mat preprocess_face(Mat image,vector<vector<float>> land);
double getMold(const vector<double>& vec);
double cos_distance(const vector<double>& base, const vector<double>& target);
// 推理
double get_samilar(Mat image1,Mat image2);
};
#endif
\ No newline at end of file
#include "faceLandmarks.h"
int main(){
FaceLandmarks face_landmarks1 = FaceLandmarks("/home/situ/qfs/sdk_project/gitlab_demo/face_recognize_mnn/model/det_landmarks_106_v0.0.1.mnn");
vector<string> filenames;
cv::glob("/home/situ/图片/img3", filenames, false);
for(auto path:filenames){
// cout<<path<<endl;
Mat img1 =cv::imread(path);
auto landmarks1 = face_landmarks1.detect_landmarks(path);
for(auto landm:landmarks1){
cv::circle(img1,Point2d(landm[0],landm[1]),2,Scalar(255,0,0));
}
cv::imshow("img",img1);
cv::waitKey(0);
}
// Mat image1 = cv::imread("/home/situ/图片/4.jpg");
// Mat image2 = cv::imread("/home/situ/图片/img3/1.jpg");
// string face_det_model = "/home/situ/qfs/sdk_project/face_recognize_mnn/model/mnn/det_face_retina_mnn_1.0.0_v0.1.1.mnn";
// string face_landm_model = "/home/situ/qfs/sdk_project/face_recognize_mnn/model/mnn/det_landmarks_106_v0.0.1.mnn";
// string face_rec_model = "/home/situ/qfs/mobile_face_recognize/models/cls_face_mnn_1.0.0_v0.1.0.mnn";
// FaceComparison face_rec = FaceComparison(face_det_model,face_landm_model,face_rec_model);
// bool result = face_rec.face_compare("/home/situ/图片/2.png","/home/situ/图片/2.png");
// cout<<result<<endl;
}
No preview for this file type
No preview for this file type
No preview for this file type
#ifndef RETINAFACE_H
#define RETINAFACE_H
#include<opencv2/opencv.hpp>
#include<MNN/Interpreter.hpp>
#include<MNN/ImageProcess.hpp>
#include<iostream>
using namespace MNN;
using namespace std;
using namespace cv;
struct Bbox{
float xmin;
float ymin;
float xmax;
float ymax;
float score;
float x1;
float y1;
float x2;
float y2;
float x3;
float y3;
float x4;
float y4;
float x5;
float y5;
};
class RetinaFace{
public:
float confidence_threshold = 0.5;
bool is_bbox_process=true;
private:
bool use_gpu=true;
vector<float> input_size={640,640};
vector<float> variances={0.1,0.2};
Scalar mean = Scalar(104.0f, 117.0f, 123.0f);
float keep_top_k = 100;
float nms_threshold = 0.4;
float resize_scale = 1.0;
std::shared_ptr<MNN::Interpreter> net;
Session *session = nullptr;
ScheduleConfig config;
vector<vector<float>> anchors;
private:
// 生成anchors
vector<vector<float>> priorBox(vector<float> image_size);
// 解析bounding box landmarks 包含置信度
vector<Bbox> decode(float *loc,float *score,float *pre,vector<vector<float>> priors,vector<float> variances);
// 解析landmarks
// vector<vector<float>> decode_landm(vector<vector<float>> pre,vector<vector<float>> priors,vector<float> variances);
//NMS
void nms_cpu(std::vector<Bbox> &bboxes, float threshold);
// 根据阈值筛选
vector<Bbox> select_score(vector<Bbox> bboxes,float threshold,float w_r,float h_r);
// 数据后处理
vector<Bbox> bbox_process(vector<Bbox> bboxes,float frame_w,float frame_h);
public:
RetinaFace(){};
RetinaFace(string model_path){
net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));//创建解释器
config.numThread = 8;
config.type = MNN_FORWARD_CPU;
session = net->createSession(config);//创建session
anchors=priorBox(input_size);
}
// 推理
vector<Bbox> detect(string image_path);
vector<Bbox> detect_image(Mat image);
};
#endif
\ No newline at end of file
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!