Transformation of coordinates of aruko vectors
import numpy as np import cv2 ARUCO_DICT = { "DICT_4X4_50": cv2.aruco.DICT_4X4_50, "DICT_4X4_100": cv2.aruco.DICT_4X4_100, "DICT_4X4_250": cv2.aruco.DICT_4X4_250, "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, "DICT_5X5_50": cv2.aruco.DICT_5X5_50, "DICT_5X5_100": cv2.aruco.DICT_5X5_100, "DICT_5X5_250": cv2.aruco.DICT_5X5_250, "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, "DICT_6X6_50": cv2.aruco.DICT_6X6_50, "DICT_6X6_100": cv2.aruco.DICT_6X6_100, "DICT_6X6_250": cv2.aruco.DICT_6X6_250, "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, "DICT_7X7_50": cv2.aruco.DICT_7X7_50, "DICT_7X7_100": cv2.aruco.DICT_7X7_100, "DICT_7X7_250": cv2.aruco.DICT_7X7_250, "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL, "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 } # Global variables to store the latest rvec and tvec latest_tvecs = {} def bilinear_interpolation(x, y, points): '''Interpolate (x,y) from values associated with four points. The four points are a list of four triplets: (x, y, value). The four points can be in any order. They should form a rectangle. Return the interpolated value at (x,y) ''' points = sorted(points) # order points by x, then by y (x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points # if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2: # raise ValueError('points do not form a rectangle') # if not x1
import numpy as np
import cv2
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}
# Global variables to store the latest rvec and tvec
latest_tvecs = {}
def bilinear_interpolation(x, y, points):
'''Interpolate (x,y) from values associated with four points.
The four points are a list of four triplets: (x, y, value).
The four points can be in any order. They should form a rectangle.
Return the interpolated value at (x,y)
'''
points = sorted(points) # order points by x, then by y
(x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points
# if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
# raise ValueError('points do not form a rectangle')
# if not x1 <= x <= x2 or not y1 <= y <= y2:
# raise ValueError('(x, y) not within the rectangle')
return (q11 * (x2 - x) * (y2 - y) +
q21 * (x - x1) * (y2 - y) +
q12 * (x2 - x) * (y - y1) +
q22 * (x - x1) * (y - y1)
) / ((x2 - x1) * (y2 - y1) + 0.0)
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, str(markerID), (topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print(f"[Inference] ArUco marker ID: {markerID}")
return image
def pose_estimation(frame, aruco_dict_type, matrix_coefficients, distortion_coefficients):
global latest_tvecs
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = cv2.aruco.getPredefinedDictionary(aruco_dict_type)
parameters = cv2.aruco.DetectorParameters()
detector = cv2.aruco.ArucoDetector(aruco_dict, parameters)
corners, ids, rejected_img_points = detector.detectMarkers(frame)
if len(corners) > 0:
cv2.aruco.drawDetectedMarkers(frame, corners, ids)
ids = ids.flatten()
centroids = []
for i, corner in enumerate(corners):
markerSize = 5.292
objPoints = np.array([
[-markerSize / 2, markerSize / 2, 0],
[markerSize / 2, markerSize / 2, 0],
[markerSize / 2, -markerSize / 2, 0],
[-markerSize / 2, -markerSize / 2, 0]
], dtype=np.float32)
imgPoints = corner.reshape((4, 2))
success, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, matrix_coefficients, distortion_coefficients)
if success:
marker_id = ids[i]
latest_tvecs[marker_id] = tvec
cv2.drawFrameAxes(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)
# Calculate centroid
centroid = np.mean(corner[0], axis=0)
centroids.append(centroid)
# Calculate pixel coordinates
cx, cy = int(centroid[0]), int(centroid[1])
# Display pixel coordinates above the marker in red
coord_text = f"({cx}, {cy})"
cv2.putText(frame, coord_text, (cx, cy - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# Create a separate text area for each marker
text_y = 30 + i * 120
cv2.rectangle(frame, (10, text_y - 20), (300, text_y + 100), -1)
cv2.rectangle(frame, (10, text_y - 20), (300, text_y + 100), (0, 0, 0), 1)
tvec_str = f"tvec: ({tvec[0][0]:.2f}, {tvec[1][0]:.2f}, {tvec[2][0]:.2f})"
cv2.putText(frame, tvec_str, (15, text_y + 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.putText(frame, f"Marker ID: {marker_id}", (15, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
if len(centroids) == 4:
centroids = np.array(centroids, dtype=np.int32)
# Sort centroids in clockwise order
center = np.mean(centroids, axis=0)
sorted_centroids = sorted(centroids, key=lambda p: np.arctan2(p[1] - center[1], p[0] - center[0]))
sorted_centroids = np.array(sorted_centroids, dtype=np.int32)
# Draw the quadrilateral
cv2.polylines(frame, [sorted_centroids], True, (255, 0, 0), 2) # Blue quadrilateral
# The origin is the first marker in the sorted list
origin = sorted_centroids[0]
# Calculate and display relative positions
for i, centroid in enumerate(sorted_centroids[0:], 1):
relative_pos = centroid - origin
pos_text = f"Marker {i}: ({relative_pos[0]}, {relative_pos[1]})"
cv2.putText(frame, pos_text, tuple(centroid + [10, 10]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
specific_point = (637, 423)
cv2.circle(frame, specific_point, 5, (0, 255, 255), -1) # Yellow dot
cv2.putText(frame, f"({specific_point[0]}, {specific_point[1]})",
(specific_point[0] + 25, specific_point[1] - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
# Perform bilinear interpolation
try:
interpolated_x = bilinear_interpolation(specific_point[0], specific_point[1], [
(sorted_centroids[0][0], sorted_centroids[0][1], 0),
(sorted_centroids[1][0], sorted_centroids[1][1], 1),
(sorted_centroids[2][0], sorted_centroids[2][1], 1),
(sorted_centroids[3][0], sorted_centroids[3][1], 0)
])
interpolated_y = bilinear_interpolation(specific_point[0], specific_point[1], [
(sorted_centroids[0][0], sorted_centroids[0][1], 0),
(sorted_centroids[1][0], sorted_centroids[1][1], 0),
(sorted_centroids[2][0], sorted_centroids[2][1], 1),
(sorted_centroids[3][0], sorted_centroids[3][1], 1)
])
# Calculate the relative position to the origin
relative_x = interpolated_x * (sorted_centroids[1][0] - origin[0])
relative_y = interpolated_y * (sorted_centroids[3][1] - origin[1])
interpolated_pos = f"Relative: ({relative_x:.2f}, {relative_y:.2f})"
cv2.putText(frame, interpolated_pos,
(specific_point[0] + 10, specific_point[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 1)
except ValueError as e:
print(f"Interpolation error: {e}")
return frame
aruco_type = "DICT_6X6_250"
# These are camera intrinsic properties and need to be changed for the camera used.
intrinsic_camera = np.array(((933.15867, 0, 657.59), (0, 933.1586, 400.36993), (0, 0, 1)))
distortion = np.array((-0.43948, 0.18514, 0, 0))
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
while cap.isOpened():
ret, img = cap.read()
output = pose_estimation(img, ARUCO_DICT[aruco_type], intrinsic_camera, distortion)
cv2.imshow('Estimated Pose', output)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
When I am moving the camera then the relative positions of aruko markers get's changed. I want that no matter what is the location of my camera the relative coordinates of my marker remain same.
Now what I want is that the topmost corner of the graph paper is considered as origin and the relative postion of aruko markers must be according to that origin.