图像单应性变换

文章目录
  1. 1. 计算特征点
  2. 2. 匹配特征点
  3. 3. 影像纠正(计算转换系数)

在计算机视觉中,平面的单应性被定义为一个平面到另外一个平面的投影映射。这个投影映射由一个转换矩阵刻画。

该矩阵的计算需要在源和目标图像上寻找四个对应点。

利用OpenCV,整个流程的大致步骤如下

  1. 提取特征点 (SURF or SIFT or ORB or others…)
  2. 特征点匹配 (FLANN or BruteForce…)
  3. 寻找几何转换 (RANSAC or LMeds…)
1
2
3
4
from pylab import mpl

mpl.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
1
2
3
4
# Imports
import cv2
import numpy as np
import matplotlib.pyplot as plt

Plot Image

1
2
3
4
5
6
7
8
9
refFilename = "mine1.jpg" # 目标影像
print("Reading 目标影像 : ", refFilename)
im1 = cv2.imread(refFilename, cv2.IMREAD_COLOR)
im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)

imFilename = "mine2.jpg"# 待纠正影像
print("Reading 待纠正影像 : ", imFilename)
im2 = cv2.imread(imFilename, cv2.IMREAD_COLOR)
im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2RGB)
Reading 目标影像 :  mine1.jpg
Reading 待纠正影像 :  mine2.jpg
1
2
3
plt.figure(figsize=[20,10]); 
plt.subplot(121); plt.axis('off'); plt.imshow(im1); plt.title("目标影像")
plt.subplot(122); plt.axis('off'); plt.imshow(im2); plt.title("待纠正影像")

png

计算特征点

1
2
3
4
5
6
7
8
9
10
11
12
13
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

# Detect ORB features and compute descriptors.
MAX_NUM_FEATURES = 500
orb = cv2.ORB_create(MAX_NUM_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1_gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None)

# Display
im1_display = cv2.drawKeypoints(im1, keypoints1, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
im2_display = cv2.drawKeypoints(im2, keypoints2, outImage=np.array([]), color=(255, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
1
2
3
plt.figure(figsize=[20,10])
plt.subplot(121); plt.axis('off'); plt.imshow(im1_display); plt.title("目标影像");
plt.subplot(122); plt.axis('off'); plt.imshow(im2_display); plt.title("待纠正影像");


png

匹配特征点

1
2
3
4
5
6
7
8
9
10
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)

# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)

# Remove not so good matches
numGoodMatches = int(len(matches) * 0.1)
matches = matches[:numGoodMatches]
1
2
3
4
5
# Draw top matches
im_matches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,outImg=None, matchColor=(255, 255, 0))

plt.figure(figsize=[40,10])
plt.imshow(im_matches); plt.axis('off'); plt.title("Key points matches");


png

影像纠正(计算转换系数)

1
2
3
4
5
6
7
8
9
10
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)

for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt

# Find homography
h, mask = cv2.findHomography(points2, points1, cv2.RANSAC)
1
2
3
4
5
6
7
8
# Use homography to warp image
height, width, channels = im1.shape
im2_reg = cv2.warpPerspective(im2, h, (width, height))
# Display results
plt.figure(figsize=[20,10]);
plt.subplot(131); plt.imshow(im1); plt.axis('off'); plt.title("原始图像");
plt.subplot(132); plt.imshow(im2_reg); plt.axis('off'); plt.title("纠正后图像");
plt.subplot(133); plt.imshow(im2); plt.axis('off'); plt.title("待纠正图像")

png