极几何中的基本矩阵与极限约束

1. 拍摄左右视图

2. SIFT检测keypoints(关键点)

3. KNN 查找匹配关键点

4. 筛选关键点对

5. 利用至少8对关键点进行基本矩阵F的计算

6. 利用x‘’.T * F * x = 0求任意点x=(x, y, 1)在另一个视图上的极线(a, b, c) * (x, y, 1) = 0

7. 将x的横坐标带入计算求得在另一个视图上的所有候选匹配点。

8. 匹配,计算代价误差

9. 根据横坐标差值计算深度

10. Finished.

附上1-7步骤的python代码。

import cv2
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
import glob
import tensorflow as tf


def to_uint8(data):
    # maximum pixel
    latch = np.zeros_like(data)
    latch[:] = 255
    # minimum pixel
    zeros = np.zeros_like(data)

    # unrolled to illustrate steps
    d = np.maximum(zeros, data)
    d = np.minimum(latch, d)

    # cast to uint8
    return np.asarray(d, dtype="uint8")


# x is the point form right view
# F is the fundamental matrix
# return the corresponding epilines in the left view
def find_epiline(x, F):
    return (F.T.dot(x.T)).T


# x is the position of pixel to search of
# L is the epiline
def find_point(x, L):
    assert np.abs(L[1]) > 0
    return (int(x), int((1.0/L[1]) * (-L[2] - L[0] * x)))


# to find all candidates in the left view to match the point in the right
# im : the left view
# p: the pixel in the right view
# r: the search range
# F: the fundamental matrix
def find_candidates(p, r, F):
    L = find_epiline(p, F)
    cand = []
    for i in range(-r, r):
        cand.append(find_point(p[0] + i, L))
    return cand


def BGR2RGB(bgr):
    return np.stack([bgr[:, :, 2], bgr[:, :, 1], bgr[:, :, 0]], axis=2)


def resize_image(input_, ratio_):
    dim_expanded = False
    if len(input_.shape)==2:
        input_ = np.expand_dims(np.expand_dims(input_, axis=0), axis=3)
        dim_expanded = True
    elif len(input_.shape)==3:
        input_ = np.expand_dims(input_, axis=0)
    t_input = tf.placeholder(dtype=tf.float32, shape=input_.shape)
    if ratio_ > 0:
        t_output = tf.image.resize_bilinear(t_input, size=[input_.shape[1] * ratio_, input_.shape[2] * ratio_])
    else:
        t_output = tf.nn.avg_pool(t_input, (1, -ratio_, -ratio_, 1), (1, -ratio_, -ratio_, 1), 'VALID')
    sess = tf.Session()
    output_ = sess.run(t_output, feed_dict={t_input: input_})
    if input_.dtype == np.uint8:
        output_ = np.uint8(output_)
    if dim_expanded:
        return output_[0, :, :, 0]
    else:
        return output_[0, :, :, :]


#dir_ = 'F:/WorkFiles/BackgroundBlur/2.TCLStereoDepthAlg-Final/2.TCLStereoDepthAlg/TCLStereoDepthAlg/test0104/phone2'
dir_ = 'F:/WorkFiles/BackgroundBlur/Test-3-19'

views_left = glob.glob(dir_ + '/*aux_dump.jpg')
views_right = glob.glob(dir_ + '/*main_dump.jpg')

assert len(views_left) == len(views_right)

# img1 = to_uint8(cv2.pyrDown(cv2.imread(views_left[0], cv2.COLOR_BGR2GRAY)))
# img2 = to_uint8(cv2.pyrDown(cv2.imread(views_right[0], cv2.COLOR_BGR2GRAY)))

img1 = to_uint8(cv2.imread(views_left[0], cv2.COLOR_BGR2GRAY))
img2 = to_uint8(cv2.imread(views_right[0], cv2.COLOR_BGR2GRAY))

img1 = resize_image(img1, -3)
img2 = resize_image(img2, -8)

sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)

# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)

good = []
pts1 = []
pts2 = []

# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
    if m.distance < 0.3 * n.distance:
        good.append(m)
        pts2.append(kp2[m.trainIdx].pt)
        pts1.append(kp1[m.queryIdx].pt)

pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

# visualize the keypoints on both images
r = 1
img1 = BGR2RGB(img1)
for p in pts1:
    x, y= p[0], p[1]
    img1[y-r:y+r+1, x-r:x+r+1, 0] = 200
    img1[y-r:y+r+1, x-r:x+r+1, 1] = 0
    img1[y-r:y+r+1, x-r:x+r+1, 2] = 0
Image.fromarray(img1).show()

img2 = BGR2RGB(img2)
for p in pts2:
    x, y= p[0], p[1]
    img2[y-r:y+r+1, x-r:x+r+1, 0] = 200
    img2[y-r:y+r+1, x-r:x+r+1, 1] = 0
    img2[y-r:y+r+1, x-r:x+r+1, 2] = 0
Image.fromarray(img2).show()

exit(0)

n = len(views_left)
for i in range(5):
    img1 = to_uint8(cv2.imread(views_left[i], cv2.COLOR_BGR2GRAY))
    img2 = to_uint8(cv2.imread(views_right[i], cv2.COLOR_BGR2GRAY))

    img1 = resize_image(img1, -3)
    img2 = resize_image(img2, -8)

    # use the F to search for matched points
    y = 50
    x = 100
    h, w = img2.shape[0], img2.shape[1]
    p = np.array([x, y, 1.0])
    # lines1 = cv2.computeCorrespondEpilines(np.array([[x, y]]), 2, F)
    # print(lines1)
    # lines = find_epiline(p, F)
    # print(lines1[0]/lines)
    candidates = find_candidates(p, 200, F)
    #print(candidates)

    r = 1
    img2 = BGR2RGB(img2)
    img2[y-r:y+r+1, x-r:x+r+1, 0] = 200
    img2[y-r:y+r+1, x-r:x+r+1, 1] = 0
    img2[y-r:y+r+1, x-r:x+r+1, 2] = 0
    Image.fromarray(img2).show()

    img1 = BGR2RGB(img1)
    for c in candidates:
        if c[0] >=0 and c[0] < h and c[1] >= 0 and c[1] < w:
            img1[c[1], c[0], 0] = 200
            img1[c[1], c[0], 1] = 0
            img1[c[1], c[0], 2] = 0
    Image.fromarray(img1).show()

 

上一篇:keil 编译器V6 定义函数在ram中运行-和在指定地址定义常量


下一篇:grouparoo 数据流