Enhance VideoEditor with tracking-adjusted cropping and improved transformation logic

This commit introduces a new method, _get_tracking_adjusted_crop, to calculate crop rectangles that center on tracked points when motion tracking is enabled. The transform_video_to_screen and transform_screen_to_video methods are refactored to utilize this new method, streamlining the handling of crop, rotation, and zoom transformations. These changes improve the accuracy and efficiency of video rendering and editing processes, enhancing the overall user experience.
This commit is contained in:
2025-09-16 17:49:50 +02:00
parent cacaa5f2ac
commit e97ce026da

View File

@@ -1764,6 +1764,28 @@ class VideoEditor:
self.feedback_message = message
self.feedback_message_time = time.time()
def _get_tracking_adjusted_crop(self) -> Tuple[int, int, int, int]:
"""Return crop rect adjusted to center on tracked point when enabled, clamped to frame."""
if self.crop_rect:
base_x, base_y, w, h = map(int, self.crop_rect)
else:
base_x, base_y, w, h = 0, 0, int(self.frame_width), int(self.frame_height)
x, y = base_x, base_y
if self.motion_tracker.tracking_enabled:
pos = self.motion_tracker.get_interpolated_position(self.current_frame)
if pos:
tx, ty = pos
x = int(tx - w // 2)
y = int(ty - h // 2)
# clamp
x = max(0, min(x, int(self.frame_width) - 1))
y = max(0, min(y, int(self.frame_height) - 1))
w = min(int(w), int(self.frame_width) - x)
h = min(int(h), int(self.frame_height) - y)
return (x, y, w, h)
def transform_video_to_screen(
self, video_x: int, video_y: int
) -> Optional[Tuple[int, int]]:
@@ -1771,93 +1793,47 @@ class VideoEditor:
if self.current_display_frame is None:
return None
# Get the original frame dimensions
original_height, original_width = self.current_display_frame.shape[:2]
available_height = self.window_height - (
0 if self.is_image_mode else self.TIMELINE_HEIGHT
)
angle = int(self.rotation_angle) % 360
zoom = float(self.zoom_factor)
crop_x, crop_y, crop_w, crop_h = self._get_tracking_adjusted_crop()
# Step 1: Apply crop (subtract crop offset, including motion tracking offset)
display_x = video_x
display_y = video_y
if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect
# translate to crop
display_x = float(video_x - crop_x)
display_y = float(video_y - crop_y)
# Apply motion tracking offset if enabled
if self.motion_tracker.tracking_enabled:
current_pos = self.motion_tracker.get_interpolated_position(
self.current_frame
)
if current_pos:
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
tracked_x, tracked_y = current_pos
new_x = int(tracked_x - crop_w // 2)
new_y = int(tracked_y - crop_h // 2)
crop_x, crop_y = new_x, new_y
display_x -= crop_x
display_y -= crop_y
# Step 2: Apply rotation
if self.rotation_angle != 0:
if self.crop_rect:
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
else:
crop_w, crop_h = original_width, original_height
if self.rotation_angle == 90:
# 90° clockwise rotation: (x,y) -> (y, crop_w-x)
new_x = display_y
new_y = crop_w - display_x
elif self.rotation_angle == 180:
# 180° rotation: (x,y) -> (crop_w-x, crop_h-y)
new_x = crop_w - display_x
new_y = crop_h - display_y
elif self.rotation_angle == 270:
# 270° clockwise rotation: (x,y) -> (crop_h-y, x)
new_x = crop_h - display_y
new_y = display_x
else:
new_x, new_y = display_x, display_y
display_x, display_y = new_x, new_y
# Step 3: Apply zoom
if self.zoom_factor != 1.0:
display_x *= self.zoom_factor
display_y *= self.zoom_factor
# Step 4: Apply display offset (panning when zoomed)
display_x += self.display_offset[0]
display_y += self.display_offset[1]
# Step 5: Scale to fit window
if self.crop_rect:
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
# rotate within crop space
if angle == 90:
new_x = display_y
new_y = float(crop_w) - display_x
elif angle == 180:
new_x = float(crop_w) - display_x
new_y = float(crop_h) - display_y
elif angle == 270:
new_x = float(crop_h) - display_y
new_y = display_x
else:
crop_w, crop_h = original_width, original_height
new_x, new_y = display_x, display_y
display_x, display_y = new_x, new_y
# Apply zoom factor to dimensions
if self.zoom_factor != 1.0:
crop_w = int(crop_w * self.zoom_factor)
crop_h = int(crop_h * self.zoom_factor)
# zoom and pan
display_x *= zoom
display_y *= zoom
display_x += float(self.display_offset[0])
display_y += float(self.display_offset[1])
# Calculate scale to fit window
scale_x = self.window_width / crop_w
scale_y = available_height / crop_h
scale = min(scale_x, scale_y)
# fit to window
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
rot_w = float(crop_h) if angle in (90, 270) else float(crop_w)
rot_h = float(crop_w) if angle in (90, 270) else float(crop_h)
rot_w *= zoom
rot_h *= zoom
scale = min(self.window_width / max(1.0, rot_w), available_height / max(1.0, rot_h))
start_x = (self.window_width - rot_w * scale) / 2.0
start_y = (available_height - rot_h * scale) / 2.0
# Center the scaled content
scaled_w = crop_w * scale
scaled_h = crop_h * scale
start_x = (self.window_width - scaled_w) // 2
start_y = (available_height - scaled_h) // 2
# Apply final scaling and centering
screen_x = start_x + display_x * scale
screen_y = start_y + display_y * scale
return (int(screen_x), int(screen_y))
return (int(round(screen_x)), int(round(screen_y)))
def transform_screen_to_video(
self, screen_x: int, screen_y: int
@@ -1866,93 +1842,44 @@ class VideoEditor:
if self.current_display_frame is None:
return None
# Get the original frame dimensions
original_height, original_width = self.current_display_frame.shape[:2]
available_height = self.window_height - (
0 if self.is_image_mode else self.TIMELINE_HEIGHT
)
angle = int(self.rotation_angle) % 360
zoom = float(self.zoom_factor)
crop_x, crop_y, crop_w, crop_h = self._get_tracking_adjusted_crop()
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
rot_w = float(crop_h) if angle in (90, 270) else float(crop_w)
rot_h = float(crop_w) if angle in (90, 270) else float(crop_h)
rot_w *= zoom
rot_h *= zoom
scale = min(self.window_width / max(1.0, rot_w), available_height / max(1.0, rot_h))
start_x = (self.window_width - rot_w * scale) / 2.0
start_y = (available_height - rot_h * scale) / 2.0
# Step 1: Reverse scaling and centering
if self.crop_rect:
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
# reverse fit
display_x = (float(screen_x) - start_x) / scale
display_y = (float(screen_y) - start_y) / scale
# reverse pan
display_x -= float(self.display_offset[0])
display_y -= float(self.display_offset[1])
# reverse zoom
if zoom != 1.0:
display_x /= zoom
display_y /= zoom
# reverse rotation
if angle == 90:
new_x = float(crop_w) - display_y
new_y = display_x
elif angle == 180:
new_x = float(crop_w) - display_x
new_y = float(crop_h) - display_y
elif angle == 270:
new_x = display_y
new_y = float(crop_h) - display_x
else:
crop_w, crop_h = original_width, original_height
new_x, new_y = display_x, display_y
# Apply zoom factor to dimensions
if self.zoom_factor != 1.0:
crop_w = int(crop_w * self.zoom_factor)
crop_h = int(crop_h * self.zoom_factor)
# Calculate scale to fit window
scale_x = self.window_width / crop_w
scale_y = available_height / crop_h
scale = min(scale_x, scale_y)
# Center the scaled content
scaled_w = crop_w * scale
scaled_h = crop_h * scale
start_x = (self.window_width - scaled_w) // 2
start_y = (available_height - scaled_h) // 2
# Reverse scaling and centering
display_x = (screen_x - start_x) / scale
display_y = (screen_y - start_y) / scale
# Step 2: Reverse display offset (panning when zoomed)
display_x -= self.display_offset[0]
display_y -= self.display_offset[1]
# Step 3: Reverse zoom
if self.zoom_factor != 1.0:
display_x /= self.zoom_factor
display_y /= self.zoom_factor
# Step 4: Reverse rotation
if self.rotation_angle != 0:
if self.crop_rect:
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
else:
crop_w, crop_h = original_width, original_height
if self.rotation_angle == 90:
# Reverse 90° clockwise rotation: (y, crop_w-x) -> (x,y)
new_x = crop_w - display_y
new_y = display_x
elif self.rotation_angle == 180:
# Reverse 180° rotation: (crop_w-x, crop_h-y) -> (x,y)
new_x = crop_w - display_x
new_y = crop_h - display_y
elif self.rotation_angle == 270:
# Reverse 270° clockwise rotation: (crop_h-y, x) -> (x,y)
new_x = display_y
new_y = crop_h - display_x
else:
new_x, new_y = display_x, display_y
display_x, display_y = new_x, new_y
# Step 5: Reverse crop (add crop offset, including motion tracking offset)
video_x = display_x
video_y = display_y
if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect
# Apply motion tracking offset if enabled
if self.motion_tracker.tracking_enabled:
current_pos = self.motion_tracker.get_interpolated_position(
self.current_frame
)
if current_pos:
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
tracked_x, tracked_y = current_pos
new_x = int(tracked_x - crop_w // 2)
new_y = int(tracked_y - crop_h // 2)
crop_x, crop_y = new_x, new_y
video_x += crop_x
video_y += crop_y
return (int(video_x), int(video_y))
video_x = new_x + float(crop_x)
video_y = new_y + float(crop_y)
return (int(round(video_x)), int(round(video_y)))
def transform_point_for_display(
self, video_x: int, video_y: int