Enhance tracking point management in VideoEditor and MotionTracker with dual coordinate storage

This commit introduces a new TrackingPoint class to encapsulate both original and display coordinates for tracking points, improving the accuracy and consistency of point transformations. The VideoEditor class has been updated to utilize this new structure, allowing for better handling of tracking points during video editing. Additionally, logging has been enhanced to provide clearer insights into the addition and processing of tracking points, while redundant verification steps have been removed for efficiency. This change streamlines the tracking process and improves the overall user experience.
This commit is contained in:
2025-09-16 21:33:28 +02:00
parent 33a553c092
commit cd86cfc9f2
2 changed files with 73 additions and 51 deletions

View File

@@ -1545,27 +1545,34 @@ class VideoEditor:
cv2.line(canvas, (bl_x, bl_y), (tl_x, tl_y), (255, 0, 255), 1)
# Process each tracking point
for i, point in enumerate(tracking_points):
print(f"draw_tracking_points: processing point {i}: {point}")
for i, tracking_point in enumerate(tracking_points):
# Get the original coordinates
orig_x, orig_y = tracking_point.original
print(f"draw_tracking_points: processing point {i}: original={tracking_point.original}")
# Check if the point is within the frame bounds
is_in_frame = (0 <= point[0] < frame_width and 0 <= point[1] < frame_height)
is_in_frame = (0 <= orig_x < frame_width and 0 <= orig_y < frame_height)
print(f"draw_tracking_points: point {i} is {'inside' if is_in_frame else 'outside'} frame bounds")
# Check if the point is within the crop area (if cropping is active)
is_in_crop = True
if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect
is_in_crop = (crop_x <= point[0] < crop_x + crop_w and
crop_y <= point[1] < crop_y + crop_h)
is_in_crop = (crop_x <= orig_x < crop_x + crop_w and
crop_y <= orig_y < crop_y + crop_h)
print(f"draw_tracking_points: point {i} is {'inside' if is_in_crop else 'outside'} crop area")
# Transform point from original frame coordinates to display coordinates
display_point = self.transform_point(point)
# Get the display coordinates - either from stored value or transform now
if tracking_point.display:
# Use stored display coordinates
display_point = tracking_point.display
print(f"draw_tracking_points: using stored display coordinates {display_point}")
else:
# Transform point from original frame coordinates to display coordinates
display_point = self.transform_point(tracking_point.original)
print(f"draw_tracking_points: transformed to display coordinates {display_point}")
if display_point is not None:
print(f"draw_tracking_points: point {i} transformed to {display_point}")
# Scale and offset the point to match the canvas
x = int(offset_x + display_point[0] * scale)
y = int(offset_y + display_point[1] * scale)
@@ -2213,33 +2220,17 @@ class VideoEditor:
if removed:
print(f"mouse_callback: removed tracking point at {original_point}")
else:
# If no point was removed, add a new tracking point
# Add a new tracking point with both original and display coordinates
# This is the key change - we store both coordinate systems
self.motion_tracker.add_tracking_point(
self.current_frame,
original_point[0],
original_point[1]
original_point[1],
display_coords=(display_x, display_y) # Store the display coordinates directly
)
print(f"mouse_callback: added tracking point at {original_point}")
print(f"mouse_callback: added tracking point at {original_point} (display: {display_x}, {display_y})")
# Perform a round-trip verification to ensure our coordinate system is consistent
verification_point = self.transform_point(original_point)
if verification_point:
print(f"mouse_callback: verification - point transforms back to {verification_point}")
# Calculate expected canvas position for verification
expected_x = int(start_x + verification_point[0] * scale)
expected_y = int(start_y + verification_point[1] * scale)
print(f"mouse_callback: verification - expected canvas position: ({expected_x}, {expected_y}), actual: ({x}, {y})")
# Calculate the error between click and expected position
error_x = abs(expected_x - x)
error_y = abs(expected_y - y)
print(f"mouse_callback: verification - position error: ({error_x}, {error_y}) pixels")
# If error is significant, print a warning
if error_x > 2 or error_y > 2:
print(f"WARNING: Significant coordinate transformation error detected!")
print(f"This may indicate a problem with the transform/untransform functions.")
# No need for verification - we're storing both coordinate systems directly
# Save state when tracking points change
self.save_state()