import numpy as np import cv2 import pickle def pixel2robot(p_x, p_y, Zc=380): """ 将像素坐标转换为机器人坐标系坐标 参数: p_x (int):像素坐标系x轴坐标 p_y (int):像素坐标系y轴坐标 返回值: robot_coords (numpy.ndarray):机器人基坐标系下的坐标 示例: >>> pixel2robot(100, 200) array([-51.464, -41.334, 774.52]) """ # 摄像头内参矩阵 camera_matrix = np.array([[1461.09955384437, 0, 963.862411991572], [0, 1461.89533915958, 545.053909593624], [0, 0, 1]]) # R = np.array([[-0.67004953, -0.74115346, 0.04153518], # [0.73884813, -0.67127662, -0.05908594], # [0.07167334, -0.00890232, 0.99738843]]) # print(R) # T = np.array([0.06106497, -0.06272417, 0.7103077]) npzfile = np.load('camera_params.npz') R = npzfile['R'] T = npzfile['T'] T = np.reshape(T, (3,)) # 336, 174 u, v = p_x, p_y Zc = Zc Xc = (u - camera_matrix[0, 2]) * Zc / camera_matrix[0, 0] Yc = (v - camera_matrix[1, 2]) * Zc / camera_matrix[1, 1] # 相机坐标系到标定板坐标系的转换 camera_coords = np.array([Xc, Yc, Zc]) base_coords = np.dot(R, camera_coords) + T # print(base_coords) taR = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]]) taT = np.array([-1.5, -216, -30]) # print(np.dot(taR, base_coords)) base_coords = np.dot(taR, base_coords) + taT # base_coords = base_coords + taT return base_coords # import cv2 # import numpy as np # camera_matrix = np.array([[1461.09955384437, 0, 963.862411991572], # [0, 1461.89533915958, 545.053909593624], # [0, 0, 1]]) # dist_coeffs = np.array([0.123539164198816, -0.224109122219162, 0, 0, 0]) # def covert(point=[[0.0, 0.0]], z=260): # point = np.array(point, dtype=np.float) # pts_uv = cv2.undistortPoints(point, camera_matrix, dist_coeffs) * z # return pts_uv[0][0] # print(covert([993,10],260)) # print(covert([554,559],260)) # 604, 109 [-115, -150.94, -25] if __name__ == '__main__': # print(pixel2robot(468, 254)) print(pixel2robot(469, 254)) # print(pixel2robot(956, 448))