标签 OpenCV 下的文章

转载请注明出处:小锋学长生活大爆炸[xfxuezhang.cn]

透视变换矩阵计算:
http://jlouthan.github.io/perspective-transform/examples/test-ui/index.html

效果演示

以这个图为例:

需要调用的函数为:

def warpImage(src, theta, phi, gamma, scale, fovy, corners=None):
  '''
  src: 待处理的图像
  theta, phi, gamma: 三个方位角
  scale: 缩放值
  fovy: 相机的FOV
  corners: 图像中待处理目标的四个角的坐标,暂未用到
  '''
  pass

当取值theta=30, phi=0, gamma=0, scale=1.0, fovy=83时候:
lbrp2k8s.png

当取值theta=0, phi=30, gamma=0, scale=1.0, fovy=83时候:
lbrp92at.png

当取值theta=30, phi=30, gamma=0, scale=1.0, fovy=83时候:
lbrp9opd.png

更多的不再演示。

{dotted startColor="#ff6c6c" endColor="#1989fa"/}

Python代码

{hide}

import cv2
import matplotlib.pyplot as plt
import numpy as np
from functools import reduce

# Construct 3D rotation matrix when rotations around x,y,z axes are specified
def construct_RotationMatrixHomogenous(rotation_angles):
    assert (type(rotation_angles) == list and len(rotation_angles) == 3)
    RH = np.eye(4, 4)
    cv2.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3])
    return RH

# https://en.wikipedia.org/wiki/Rotation_matrix
def getRotationMatrixManual(rotation_angles):
    rotation_angles = list(map(lambda x: np.deg2rad(x), rotation_angles))

    phi = rotation_angles[0]  # around x
    gamma = rotation_angles[1]  # around y
    theta = rotation_angles[2]  # around z

    # X rotation
    Rphi = np.eye(4, 4)
    sp = np.sin(phi)
    cp = np.cos(phi)
    Rphi[1, 1] = cp
    Rphi[2, 2] = Rphi[1, 1]
    Rphi[1, 2] = -sp
    Rphi[2, 1] = sp

    # Y rotation
    Rgamma = np.eye(4, 4)
    sg = np.sin(gamma)
    cg = np.cos(gamma)
    Rgamma[0, 0] = cg
    Rgamma[2, 2] = Rgamma[0, 0]
    Rgamma[0, 2] = sg
    Rgamma[2, 0] = -sg

    # Z rotation (in-image-plane)
    Rtheta = np.eye(4, 4)
    st = np.sin(theta)
    ct = np.cos(theta)
    Rtheta[0, 0] = ct
    Rtheta[1, 1] = Rtheta[0, 0]
    Rtheta[0, 1] = -st
    Rtheta[1, 0] = st

    R = reduce(lambda x, y: np.matmul(x, y), [Rphi, Rgamma, Rtheta])
    return R

def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength):
    ptsIn2D = ptsIn[0, :]
    ptsOut2D = ptsOut[0, :]
    ptsOut2Dlist = []
    ptsIn2Dlist = []

    for i in range(0, 4):
        ptsOut2Dlist.append([ptsOut2D[i, 0], ptsOut2D[i, 1]])
        ptsIn2Dlist.append([ptsIn2D[i, 0], ptsIn2D[i, 1]])

    pin = np.array(ptsIn2Dlist) + [W / 2., H / 2.]
    pout = (np.array(ptsOut2Dlist) + [1., 1.]) * (0.5 * sidelength)
    pin = pin.astype(np.float32)
    pout = pout.astype(np.float32)

    return pin, pout

def warpMatrix(W, H, theta, phi, gamma, scale, fV):
    # M is to be estimated
    M = np.eye(4, 4)

    fVhalf = np.deg2rad(fV / 2.)
    d = np.sqrt(W * W + H * H)
    sideLength = scale * d / np.cos(fVhalf)
    h = d / (2.0 * np.sin(fVhalf))
    n = h - (d / 2.0)
    f = h + (d / 2.0)

    # Translation along Z-axis by -h
    T = np.eye(4, 4)
    T[2, 3] = -h

    # Rotation matrices around x,y,z
    R = getRotationMatrixManual([phi, gamma, theta])

    # Projection Matrix 
    P = np.eye(4, 4)
    P[0, 0] = 1.0 / np.tan(fVhalf)
    P[1, 1] = P[0, 0]
    P[2, 2] = -(f + n) / (f - n)
    P[2, 3] = -(2.0 * f * n) / (f - n)
    P[3, 2] = -1.0

    # pythonic matrix multiplication
    F = reduce(lambda x, y: np.matmul(x, y), [P, T, R])

    # 对于ptsIn和ptsOut, shape应该是1,4,3,因为perspectiveTransform()期望这样的数据。  
    # 在c++中,可通过Mat ptsIn(1,4,CV_64FC3)实现;  
    ptsIn = np.array([[
        [-W / 2., H / 2., 0.], [W / 2., H / 2., 0.], [W / 2., -H / 2., 0.], [-W / 2., -H / 2., 0.]
    ]])
    ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype))
    ptsOut = cv2.perspectiveTransform(ptsIn, F)

    ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength)

    # check float32 otherwise OpenCV throws an error
    assert (ptsInPt2f.dtype == np.float32)
    assert (ptsOutPt2f.dtype == np.float32)
    M33 = cv2.getPerspectiveTransform(ptsInPt2f, ptsOutPt2f)

    return M33, sideLength

def warpImage(src, theta, phi, gamma, scale, fovy, corners=None):
    '''
    src: 待处理的图像
    theta, phi, gamma: 三个方位角
    scale: 缩放值
    fovy: 相机的FOV
    corners: 图像中待处理目标的四个角的坐标,暂未用到
    '''
    H, W, Nc = src.shape
    M, sl = warpMatrix(W, H, theta, phi, gamma, scale, fovy)  # 计算变形矩阵
    sl = int(sl)
    print('Output image dimension = {}'.format(sl))
    print('Output M = {}'.format(M))
    dst = cv2.warpPerspective(src, M, (sl, sl))  # 进行图像扭曲
    return dst


src = cv2.imread('test.jpg')
src = src[..., ::-1]  # BGR to RGB
H, W, Nc = src.shape
imgwarped = warpImage(src, 30, 30, 0, 1., 83)
plt.imshow(imgwarped)

plt.show()

{/hide}