Refine VideoEditor point transformation and crop handling with enhanced logging

This commit improves the point transformation methods in the VideoEditor class by incorporating interpolated positions for cropping and refining the handling of coordinates during transformations. It adds detailed logging to track the adjustments made to crop centers and the verification of transformed points, ensuring better debugging and visibility into the state of the video editing process. Additionally, it enhances bounds checking for transformed points, maintaining consistency with the original frame dimensions.
This commit is contained in:
2025-09-16 20:38:16 +02:00
parent cb097c55f1
commit 2979dca40a

View File

@@ -1124,9 +1124,18 @@ class VideoEditor:
center_x = x + w // 2
center_y = y + h // 2
# Apply offset to center
# Get the interpolated position for the current frame
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
if current_pos:
# If we have a current position, center the crop directly on it
new_center_x = int(current_pos[0])
new_center_y = int(current_pos[1])
print(f"apply_crop_zoom_and_rotation: centering crop on interpolated position {current_pos}")
else:
# Otherwise use the tracking offset
new_center_x = center_x + int(tracking_offset[0])
new_center_y = center_y + int(tracking_offset[1])
print(f"apply_crop_zoom_and_rotation: applying offset to crop center: ({center_x}, {center_y}) -> ({new_center_x}, {new_center_y})")
# Calculate new top-left corner
x = new_center_x - w // 2
@@ -1196,10 +1205,14 @@ class VideoEditor:
1. Crop
2. Rotation
3. Zoom
The key insight is that we need to handle coordinates in the same way
the frame is processed in apply_crop_zoom_and_rotation.
"""
if point is None:
return None
# Get original coordinates and convert to float for precise calculations
orig_x, orig_y = float(point[0]), float(point[1])
print(f"transform_point: original point ({orig_x}, {orig_y})")
@@ -1209,12 +1222,8 @@ class VideoEditor:
frame_height, frame_width = self.current_display_frame.shape[:2]
# Step 1: Check if point is within frame bounds
if not (0 <= orig_x < frame_width and 0 <= orig_y < frame_height):
print(f"transform_point: point is outside frame bounds ({frame_width}x{frame_height})")
# We'll still transform it, but it might not be visible
# Step 1: Apply crop (adjust point relative to crop origin)
# STEP 1: Apply crop
# If we're cropped, check if the point is within the crop area
x, y = orig_x, orig_y
if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect
@@ -1227,37 +1236,37 @@ class VideoEditor:
# Adjust coordinates relative to crop origin
x -= crop_x
y -= crop_y
# If point is outside crop area, it will have negative coordinates or coordinates > crop dimensions
# We'll still transform it for consistent behavior
print(f"transform_point: after crop adjustment ({x}, {y})")
# Step 2: Apply rotation
if self.rotation_angle != 0:
# Get dimensions after crop
if self.crop_rect:
crop_w, crop_h = float(self.crop_rect[2]), float(self.crop_rect[3])
else:
crop_h, crop_w = float(frame_height), float(frame_width)
# Update dimensions for rotation calculations
frame_width, frame_height = crop_w, crop_h
print(f"transform_point: rotation_angle = {self.rotation_angle}, dimensions = ({crop_w}, {crop_h})")
# STEP 2: Apply rotation
if self.rotation_angle != 0:
print(f"transform_point: rotation_angle = {self.rotation_angle}, dimensions = ({frame_width}, {frame_height})")
# Apply rotation to coordinates
if self.rotation_angle == 90:
# 90° clockwise: (x,y) -> (y, width-x)
new_x = y
new_y = crop_w - x
new_y = frame_width - x
x, y = new_x, new_y
elif self.rotation_angle == 180:
# 180° rotation: (x,y) -> (width-x, height-y)
x = crop_w - x
y = crop_h - y
x = frame_width - x
y = frame_height - y
elif self.rotation_angle == 270:
# 270° clockwise: (x,y) -> (height-y, x)
new_x = crop_h - y
new_x = frame_height - y
new_y = x
x, y = new_x, new_y
print(f"transform_point: after rotation ({x}, {y})")
# Step 3: Apply zoom
# STEP 3: Apply zoom
if self.zoom_factor != 1.0:
x *= self.zoom_factor
y *= self.zoom_factor
@@ -1273,62 +1282,65 @@ class VideoEditor:
1. Reverse zoom
2. Reverse rotation
3. Reverse crop
The key insight is that we need to handle coordinates in the exact reverse
order as they are processed in apply_crop_zoom_and_rotation.
"""
if point is None or self.current_display_frame is None:
return None
# Get display coordinates and convert to float for precise calculations
display_x, display_y = float(point[0]), float(point[1])
print(f"untransform_point: original display point ({display_x}, {display_y})")
# Get frame dimensions
frame_height, frame_width = self.current_display_frame.shape[:2]
orig_frame_height, orig_frame_width = self.current_display_frame.shape[:2]
# Step 1: Reverse zoom
# Get dimensions of the frame after crop (if any)
if self.crop_rect:
frame_width, frame_height = float(self.crop_rect[2]), float(self.crop_rect[3])
else:
frame_width, frame_height = float(orig_frame_width), float(orig_frame_height)
# STEP 1: Reverse zoom
x, y = display_x, display_y
if self.zoom_factor != 1.0:
x /= self.zoom_factor
y /= self.zoom_factor
print(f"untransform_point: after reverse zoom ({x}, {y}), zoom_factor = {self.zoom_factor}")
# Step 2: Reverse rotation
# STEP 2: Reverse rotation
if self.rotation_angle != 0:
# Get dimensions after crop but before rotation
if self.crop_rect:
crop_w, crop_h = float(self.crop_rect[2]), float(self.crop_rect[3])
else:
crop_h, crop_w = float(frame_height), float(frame_width)
print(f"untransform_point: rotation_angle = {self.rotation_angle}, dimensions = ({crop_w}, {crop_h})")
print(f"untransform_point: rotation_angle = {self.rotation_angle}, dimensions = ({frame_width}, {frame_height})")
# Apply inverse rotation to coordinates
if self.rotation_angle == 90:
# Reverse 90° clockwise: (x,y) -> (width-y, x)
new_x = crop_w - y
new_x = frame_width - y
new_y = x
x, y = new_x, new_y
elif self.rotation_angle == 180:
# Reverse 180° rotation: (x,y) -> (width-x, height-y)
x = crop_w - x
y = crop_h - y
x = frame_width - x
y = frame_height - y
elif self.rotation_angle == 270:
# Reverse 270° clockwise: (x,y) -> (y, height-x)
new_x = y
new_y = crop_h - x
new_y = frame_height - x
x, y = new_x, new_y
print(f"untransform_point: after reverse rotation ({x}, {y})")
# Step 3: Reverse crop (add crop offset)
# STEP 3: Reverse crop (add crop offset)
if self.crop_rect:
crop_x, crop_y = float(self.crop_rect[0]), float(self.crop_rect[1])
x += crop_x
y += crop_y
print(f"untransform_point: after reverse crop ({x}, {y}), crop_rect = {self.crop_rect}")
# Check if the resulting point is within frame bounds
if not (0 <= x < frame_width and 0 <= y < frame_height):
print(f"untransform_point: result is outside frame bounds ({frame_width}x{frame_height})")
# We'll still return it, but it might not be visible
# Ensure the point is within the original frame bounds
x = max(0, min(x, orig_frame_width - 1))
y = max(0, min(y, orig_frame_height - 1))
print(f"untransform_point: final result = ({x}, {y})")
return (x, y)
@@ -2191,17 +2203,6 @@ class VideoEditor:
print(f"mouse_callback: untransformed to original coords {original_point}")
if original_point:
# Get original frame dimensions for validation
frame_height, frame_width = self.current_display_frame.shape[:2]
# Ensure point is within the original frame bounds
x_clamped = max(0, min(frame_width - 1, original_point[0]))
y_clamped = max(0, min(frame_height - 1, original_point[1]))
if x_clamped != original_point[0] or y_clamped != original_point[1]:
print(f"mouse_callback: clamped point from {original_point} to ({x_clamped}, {y_clamped})")
original_point = (x_clamped, y_clamped)
# Check if clicking on an existing tracking point to remove it
removed = self.motion_tracker.remove_tracking_point(
self.current_frame,
@@ -2221,13 +2222,22 @@ class VideoEditor:
)
print(f"mouse_callback: added tracking point at {original_point}")
# Draw a debug marker at the exact point for visualization
if display_frame is not None:
# Transform the point back to display coordinates for verification
# Perform a round-trip verification to ensure our coordinate system is consistent
# This will help debug any issues with the transformation
verification_point = self.transform_point(original_point)
if verification_point:
print(f"mouse_callback: verification - point transforms back to {verification_point}")
# Calculate expected canvas position for verification
expected_x = int(offset_x + verification_point[0] * scale)
expected_y = int(offset_y + verification_point[1] * scale)
print(f"mouse_callback: verification - expected canvas position: ({expected_x}, {expected_y}), actual: ({x}, {y})")
# Calculate the error between click and expected position
error_x = abs(expected_x - x)
error_y = abs(expected_y - y)
print(f"mouse_callback: verification - position error: ({error_x}, {error_y}) pixels")
# Save state when tracking points change
self.save_state()
self.display_needs_update = True