在计算机视觉中,平面的单应性被定义为一个平面到另外一个平面的投影映射。这个投影映射由一个转换矩阵刻画。
该矩阵的计算需要在源和目标图像上寻找四个对应点。
利用OpenCV,整个流程的大致步骤如下
- 提取特征点 (SURF or SIFT or ORB or others…)
- 特征点匹配 (FLANN or BruteForce…)
- 寻找几何转换 (RANSAC or LMeds…)
1 2 3 4
| from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] mpl.rcParams['axes.unicode_minus'] = False
|
1 2 3 4
| import cv2 import numpy as np import matplotlib.pyplot as plt
|
Plot Image
1 2 3 4 5 6 7 8 9
| refFilename = "mine1.jpg" print("Reading 目标影像 : ", refFilename) im1 = cv2.imread(refFilename, cv2.IMREAD_COLOR) im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)
imFilename = "mine2.jpg" print("Reading 待纠正影像 : ", imFilename) im2 = cv2.imread(imFilename, cv2.IMREAD_COLOR) im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2RGB)
|
Reading 目标影像 : mine1.jpg
Reading 待纠正影像 : mine2.jpg
1 2 3
| plt.figure(figsize=[20,10]); plt.subplot(121); plt.axis('off'); plt.imshow(im1); plt.title("目标影像") plt.subplot(122); plt.axis('off'); plt.imshow(im2); plt.title("待纠正影像")
|
计算特征点
1 2 3 4 5 6 7 8 9 10 11 12 13
| im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
MAX_NUM_FEATURES = 500 orb = cv2.ORB_create(MAX_NUM_FEATURES) keypoints1, descriptors1 = orb.detectAndCompute(im1_gray, None) keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None)
im1_display = cv2.drawKeypoints(im1, keypoints1, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) im2_display = cv2.drawKeypoints(im2, keypoints2, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
|
1 2 3
| plt.figure(figsize=[20,10]) plt.subplot(121); plt.axis('off'); plt.imshow(im1_display); plt.title("目标影像"); plt.subplot(122); plt.axis('off'); plt.imshow(im2_display); plt.title("待纠正影像");
|
匹配特征点
1 2 3 4 5 6 7 8 9 10
| matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) matches = matcher.match(descriptors1, descriptors2, None)
matches.sort(key=lambda x: x.distance, reverse=False)
numGoodMatches = int(len(matches) * 0.1) matches = matches[:numGoodMatches]
|
1 2 3 4 5
| im_matches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,outImg=None, matchColor=(255, 255, 0))
plt.figure(figsize=[40,10]) plt.imshow(im_matches); plt.axis('off'); plt.title("Key points matches");
|
影像纠正(计算转换系数)
1 2 3 4 5 6 7 8 9 10
| points1 = np.zeros((len(matches), 2), dtype=np.float32) points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches): points1[i, :] = keypoints1[match.queryIdx].pt points2[i, :] = keypoints2[match.trainIdx].pt
h, mask = cv2.findHomography(points2, points1, cv2.RANSAC)
|
1 2 3 4 5 6 7 8
| height, width, channels = im1.shape im2_reg = cv2.warpPerspective(im2, h, (width, height))
plt.figure(figsize=[20,10]); plt.subplot(131); plt.imshow(im1); plt.axis('off'); plt.title("原始图像"); plt.subplot(132); plt.imshow(im2_reg); plt.axis('off'); plt.title("纠正后图像"); plt.subplot(133); plt.imshow(im2); plt.axis('off'); plt.title("待纠正图像")
|