栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

opencv surf代码备份

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

opencv surf代码备份

别人的

import cv2 as cv
import numpy as np
import pdb
buttons=cv.imread('/home/pi/Desktop/myfile/python/surf/buttons.jpg')
interface=cv.imread('/home/pi/Desktop/myfile/python/surf/device1.png')
button1=buttons[26:86,66:134,:]
surf=cv.xfeatures2d.SURF_create(100)
kp1,des1=surf.detectAndCompute(button1,None)
kp2,des2=surf.detectAndCompute(interface,None)
bf=cv.BFMatcher()
matches=bf.knnMatch(des1,des2,k=2)
#good=[[m] for m,n in matches if m.distance<0.75*n.distance]
good = []
for m,n in matches:
    if m.distance < 0.7*n.distance:
        good.append(m)
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,_ = button1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img2 = cv.polylines(interface,[np.int32(dst)],True,255,3, cv.LINE_AA)
#img3=cv.drawMatchesKnn(button1,kp1,interface,kp2,good,None,flags=2)
cv.imshow('sp',img2)
cv.waitKey(0)
cv.destroyAllWindows()
exit()
#pdb.set_trace()

自己的

import numpy as np
import cv2 as cv

def GuassianKernel(sigma, dim):
    kernel=np.arange(dim)-dim//2
    kernel,_=np.meshgrid(kernel,kernel)
    kernel=(0.5/np.pi/sigma/sigma)*np.exp(-kernel**2)
    return result

def get_keypoints(img):
    sigma=1.52
    n=3
    Stack=n+3
    Octive=int(np.log2(min(img.shape[:2])))-3
    k=2**(1/n)
    sigma = [[(k ** s) * sigma0 * (1 << oct) for s in range(Stack)] for oct in range(Octave)]
    samplePyramid = [img[::(1 << oct), ::(1 << oct)] for oct in range(Octave)]
    GuassianPyramid = []
    for i in range(Octave):
        GuassianPyramid.append([])
        for j in range(Stack):
            dim = 2 * int(3 * sigma[i][j] + 0.5) + 1
            kernel = GuassianKernel(sigma[i][j], dim)
            conv = convolve(kernel, samplePyramid[i], [dim // 2, dim // 2, dim // 2, dim // 2], [1, 1])

def surfmatch(img1, img2, Hessian):
    if len(img1.shape):
        pass

surf测试test.cpp

#include 
#include "opencv2/opencv.hpp"
#include "surfmatch.hpp"
using namespace std;
using namespace cv;

int main()
{
    cout << "Locate 3D test." << CV_VERSION << endl;
	Mat imgshow;
	Mat box = imread("/home/pi/Desktop/myfile/hand.png");
	Mat scene = imread("/home/pi/Desktop/myfile/camera.png");
	Detector detector(box);
    scene = scene(Rect(640, 0, 640, 480));
    detector.find_known_object(scene, imgshow);
	imshow("test1", imgshow);
    waitKey(0);
    return 0;
}

摄像头检测camera.cpp

#include 
#include "opencv2/opencv.hpp"
#include "surfmatch.hpp"
using namespace std;
using namespace cv;

int main()
{
    cout << "Locate 3D test." << CV_VERSION << endl;
	Mat frame, Lcap, Rcap, imgshowL, imgshowR;
	Mat device = imread("/home/pi/Desktop/myfile/device2.png");
	Mat box = device(Rect(275, 108, 340, 222));
	Detector detector(box);
	VideoCapture cap(0);
	cap.set(CAP_PROP_frame_WIDTH, 1280);
	cap.set(CAP_PROP_frame_HEIGHT, 480);
	char ch;
	if (!cap.isOpened()) return 1;
	while (1){
		if (!cap.read(frame)) break;
        Lcap = frame(Rect(0,0,640,480));
        Rcap = frame(Rect(640, 0, 640, 480));
		if (detector.find_known_object(Lcap, imgshowL)) continue;
 		if (detector.find_known_object(Rcap, imgshowR)) continue;
		imshow("test1", imgshowL);
        imshow("test2", imgshowR);
		ch = (char)waitKey(10);
		if (ch == 'q') break;
	}
	cap.release();
    return 0;
}

检测器类detector.hpp

#include 
#include "opencv2/opencv.hpp"
#include "opencv2/xfeatures2d.hpp"
class Detector{
public:
	Detector(const cv::Mat& _box);
	int find_known_object(const cv::Mat& src, cv::Mat& dst);
private:
	cv::Mat box;
	cv::Ptr detector;
	std::vector objKeypoints;
	cv::Mat objDescriptors;
};

Detector::Detector(const cv::Mat& _box)
{
	box = _box.clone();
	detector = cv::xfeatures2d::SURF::create();
	detector->setHessianThreshold(400);
	detector->detectAndCompute(box, cv::Mat(), objKeypoints, objDescriptors);
}


int Detector::find_known_object(const cv::Mat& src, cv::Mat& dst)
{
	using namespace cv;
	using namespace cv::xfeatures2d;
	using namespace std;
	std::vector keypoints;
	Mat descriptors;
	FlannbasedMatcher matcher;
	vector matches, goodMatches;
	dst = src.clone();
	detector->detectAndCompute(src, Mat(), keypoints, descriptors);
	try { matcher.match(objDescriptors, descriptors, matches); }
	catch (const exception &) { return 1; }
	double dist,  mindist = 100;
	for (int i = 0; i < objDescriptors.rows; i++) {
		dist = matches[i].distance;
		if (dist < mindist)
			mindist = dist;
	}
	for (int i = 0; i < objDescriptors.rows; i++)
		if (matches[i].distance <= max(3 * mindist, 0.08))
			goodMatches.push_back(matches[i]);
	// drawMatches(box, keypoints1, src, keypoints2,
	// 	goodMatches, dst, Scalar::all(-1), Scalar::all(-1),
	// 	vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	vectorvecobj, vecsce;
	for (int i = 0; i < goodMatches.size(); i++) {
		vecobj.push_back(objKeypoints[goodMatches[i].queryIdx].pt);
		vecsce.push_back(keypoints[goodMatches[i].trainIdx].pt);
	}
	cv::Mat H;
	try { H = findHomography(vecobj, vecsce, RHO); }
	catch (const exception &) { return 2; }
	vectorobjCorners(4);
	objCorners[0] = Point(0, 0);
	objCorners[1] = Point(box.cols, 0);
	objCorners[2] = Point(box.cols, box.rows);
	objCorners[3] = Point(0, box.rows);
	vectorsceneCorners(4);
	try { perspectiveTransform(objCorners, sceneCorners, H); }
	catch (const exception &) { return 3; }
	line(dst, sceneCorners[0], sceneCorners[1], Scalar(0, 255, 0), 1);
	line(dst, sceneCorners[1], sceneCorners[2], Scalar(0, 255, 0), 1);
	line(dst, sceneCorners[2], sceneCorners[3], Scalar(0, 255, 0), 1);
	line(dst, sceneCorners[3], sceneCorners[0], Scalar(0, 255, 0), 1);
	// line(dst, sceneCorners[0] + Point2f(box.cols, 0), sceneCorners[1] + Point2f(box.cols, 0), Scalar(0, 255, 0), 1);
	// line(dst, sceneCorners[1] + Point2f(box.cols, 0), sceneCorners[2] + Point2f(box.cols, 0), Scalar(0, 255, 0), 1);
	// line(dst, sceneCorners[2] + Point2f(box.cols, 0), sceneCorners[3] + Point2f(box.cols, 0), Scalar(0, 255, 0), 1);
	// line(dst, sceneCorners[3] + Point2f(box.cols, 0), sceneCorners[0] + Point2f(box.cols, 0), Scalar(0, 255, 0), 1);
	// Point2f vecbox;
	// Point2f vecscene;
	// Point2f kptmp;
	// int cnt = goodMatches.size();
	// for (int i = 0; i < cnt; i++)
	// {
	// 	vecbox += keypoints1[goodMatches[i].queryIdx].pt;
	// 	vecscene += keypoints2[goodMatches[i].trainIdx].pt;
	// }
	// vecbox /= (double)cnt;
	// vecscene /= (double)cnt;
	// circle(dst, (Point)vecscene, 10, Scalar(0, 0, 255), 3);
	return 0;
}
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/341894.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号