Refactor crop and motion tracking logic in VideoEditor

This commit simplifies the crop application process by removing unnecessary motion tracking offset calculations in certain areas. It enhances the distance calculation for tracking point removal by switching from screen to video coordinates, improving reliability. Additionally, it updates the HJKL key mapping to ensure visual directions correspond correctly to the current video rotation, enhancing user interaction during editing.
This commit is contained in:
2025-09-16 14:38:48 +02:00
parent 4960812cba
commit 04d914834e

View File

@@ -202,7 +202,7 @@ class MotionTracker:
return (0.0, 0.0)
# Calculate offset to center the crop on the tracked point
# The offset should move the crop so the tracked point stays centered
# The offset should move the display so the tracked point stays centered
offset_x = current_pos[0] - self.base_zoom_center[0]
offset_y = current_pos[1] - self.base_zoom_center[1]
@@ -1241,17 +1241,10 @@ class VideoEditor:
# Apply brightness/contrast first (to original frame for best quality)
processed_frame = self.apply_brightness_contrast(processed_frame)
# Apply crop with motion tracking offset
# Apply crop
if self.crop_rect:
x, y, w, h = self.crop_rect
x, y, w, h = int(x), int(y), int(w), int(h)
# Apply motion tracking offset to center crop on tracked point
if self.motion_tracker.tracking_enabled:
tracking_offset_x, tracking_offset_y = self.motion_tracker.get_tracking_offset(self.current_frame)
x += int(tracking_offset_x)
y += int(tracking_offset_y)
# Ensure crop is within frame bounds
x = max(0, min(x, processed_frame.shape[1] - 1))
y = max(0, min(y, processed_frame.shape[0] - 1))
@@ -1273,11 +1266,14 @@ class VideoEditor:
processed_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR
)
# Handle zoom center and display offset
# Handle zoom center and display offset with motion tracking
if new_width > self.window_width or new_height > self.window_height:
# Apply motion tracking offset to display offset
tracking_offset_x, tracking_offset_y = self.motion_tracker.get_tracking_offset(self.current_frame)
# Calculate crop from zoomed image to fit window
start_x = max(0, self.display_offset[0])
start_y = max(0, self.display_offset[1])
start_x = max(0, self.display_offset[0] + int(tracking_offset_x))
start_y = max(0, self.display_offset[1] + int(tracking_offset_y))
end_x = min(new_width, start_x + self.window_width)
end_y = min(new_height, start_y + self.window_height)
processed_frame = processed_frame[start_y:end_y, start_x:end_x]
@@ -1993,26 +1989,9 @@ class VideoEditor:
point_removed = False
for i, (px, py) in enumerate(current_points):
# Convert point to screen coordinates to check distance
# We need to calculate the same parameters as in display_current_frame
if self.current_display_frame is not None:
# Apply transformations to get display frame
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
if display_frame is not None:
height, width = display_frame.shape[:2]
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
scale = min(self.window_width / width, available_height / height)
# Calculate display position
frame_height, frame_width = display_frame.shape[:2]
start_y = (available_height - frame_height) // 2
start_x = (self.window_width - frame_width) // 2
screen_px, screen_py = self.video_to_screen_coords(px, py, start_x, start_y, scale)
if screen_px is not None and screen_py is not None:
# Calculate distance in screen coordinates
distance = ((x - screen_px) ** 2 + (y - screen_py) ** 2) ** 0.5
if distance < 20: # Within 20 pixels
# Calculate distance in video coordinates (simpler and more reliable)
distance = ((video_x - px) ** 2 + (video_y - py) ** 2) ** 0.5
if distance < 50: # Within 50 pixels in video coordinates
self.motion_tracker.remove_tracking_point(self.current_frame, i)
self.set_feedback_message(f"Tracking point removed at frame {self.current_frame}")
point_removed = True
@@ -2388,42 +2367,44 @@ class VideoEditor:
# Normalize rotation to 0-270 degrees
rotation = self.rotation_angle % 360
if hjkl_key == 'h': # Left
# The mapping should be: when video is rotated, the visual directions change
# but HJKL should still correspond to the same visual directions
if hjkl_key == 'h': # Visual Left
if rotation == 0:
return 'left'
elif rotation == 90:
return 'down'
return 'up' # Visual left becomes up in rotated video
elif rotation == 180:
return 'right'
elif rotation == 270:
return 'up'
elif hjkl_key == 'j': # Down
return 'down' # Visual left becomes down in rotated video
elif hjkl_key == 'j': # Visual Down
if rotation == 0:
return 'down'
elif rotation == 90:
return 'left'
return 'left' # Visual down becomes left in rotated video
elif rotation == 180:
return 'up'
elif rotation == 270:
return 'right'
elif hjkl_key == 'k': # Up
return 'right' # Visual down becomes right in rotated video
elif hjkl_key == 'k': # Visual Up
if rotation == 0:
return 'up'
elif rotation == 90:
return 'right'
return 'right' # Visual up becomes right in rotated video
elif rotation == 180:
return 'down'
elif rotation == 270:
return 'left'
elif hjkl_key == 'l': # Right
return 'left' # Visual up becomes left in rotated video
elif hjkl_key == 'l': # Visual Right
if rotation == 0:
return 'right'
elif rotation == 90:
return 'up'
return 'down' # Visual right becomes down in rotated video
elif rotation == 180:
return 'left'
elif rotation == 270:
return 'down'
return 'up' # Visual right becomes up in rotated video
return hjkl_key # Fallback to original if not recognized