Refactor motion tracking and display transformation in VideoEditor
This commit enhances the motion tracking logic by refining how the crop center is adjusted based on tracked points. It introduces a new method, transform_point_for_display, which applies cropping, rotation, and zoom transformations to video coordinates for accurate screen positioning. Additionally, it removes redundant motion tracking offset calculations, streamlining the overall crop and display handling process, thereby improving the user experience during video editing.
This commit is contained in:
115
croppa/main.py
115
croppa/main.py
@@ -1246,11 +1246,16 @@ class VideoEditor:
|
|||||||
x, y, w, h = self.crop_rect
|
x, y, w, h = self.crop_rect
|
||||||
x, y, w, h = int(x), int(y), int(w), int(h)
|
x, y, w, h = int(x), int(y), int(w), int(h)
|
||||||
|
|
||||||
# Apply motion tracking offset to move crop center to tracked point
|
# Apply motion tracking to move crop center to tracked point
|
||||||
if self.motion_tracker.tracking_enabled:
|
if self.motion_tracker.tracking_enabled:
|
||||||
tracking_offset_x, tracking_offset_y = self.motion_tracker.get_tracking_offset(self.current_frame)
|
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||||
x += int(tracking_offset_x)
|
if current_pos:
|
||||||
y += int(tracking_offset_y)
|
# Move crop center to tracked point
|
||||||
|
tracked_x, tracked_y = current_pos
|
||||||
|
# Calculate new crop position to center on tracked point
|
||||||
|
new_x = int(tracked_x - w // 2)
|
||||||
|
new_y = int(tracked_y - h // 2)
|
||||||
|
x, y = new_x, new_y
|
||||||
|
|
||||||
# Ensure crop is within frame bounds
|
# Ensure crop is within frame bounds
|
||||||
x = max(0, min(x, processed_frame.shape[1] - 1))
|
x = max(0, min(x, processed_frame.shape[1] - 1))
|
||||||
@@ -2228,13 +2233,6 @@ class VideoEditor:
|
|||||||
# Add the crop offset to get back to original frame coordinates
|
# Add the crop offset to get back to original frame coordinates
|
||||||
if self.crop_rect:
|
if self.crop_rect:
|
||||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||||
|
|
||||||
# Account for motion tracking offset that was applied to the crop
|
|
||||||
if self.motion_tracker.tracking_enabled:
|
|
||||||
tracking_offset_x, tracking_offset_y = self.motion_tracker.get_tracking_offset(self.current_frame)
|
|
||||||
crop_x -= int(tracking_offset_x)
|
|
||||||
crop_y -= int(tracking_offset_y)
|
|
||||||
|
|
||||||
original_x += crop_x
|
original_x += crop_x
|
||||||
original_y += crop_y
|
original_y += crop_y
|
||||||
|
|
||||||
@@ -2249,6 +2247,89 @@ class VideoEditor:
|
|||||||
self.feedback_message = message
|
self.feedback_message = message
|
||||||
self.feedback_message_time = time.time()
|
self.feedback_message_time = time.time()
|
||||||
|
|
||||||
|
def transform_point_for_display(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||||
|
"""Transform a point through the same pipeline as the frame (crop, zoom, rotation, display)"""
|
||||||
|
if self.current_display_frame is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get the original frame dimensions
|
||||||
|
original_height, original_width = self.current_display_frame.shape[:2]
|
||||||
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||||
|
|
||||||
|
# Step 1: Apply crop (subtract crop offset)
|
||||||
|
display_x = video_x
|
||||||
|
display_y = video_y
|
||||||
|
if self.crop_rect:
|
||||||
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||||
|
display_x -= crop_x
|
||||||
|
display_y -= crop_y
|
||||||
|
|
||||||
|
# Step 2: Apply rotation
|
||||||
|
if self.rotation_angle != 0:
|
||||||
|
if self.crop_rect:
|
||||||
|
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||||
|
else:
|
||||||
|
crop_w, crop_h = original_width, original_height
|
||||||
|
|
||||||
|
if self.rotation_angle == 90:
|
||||||
|
# 90° clockwise rotation: (x,y) -> (y, crop_w-x)
|
||||||
|
new_x = display_y
|
||||||
|
new_y = crop_w - display_x
|
||||||
|
elif self.rotation_angle == 180:
|
||||||
|
# 180° rotation: (x,y) -> (crop_w-x, crop_h-y)
|
||||||
|
new_x = crop_w - display_x
|
||||||
|
new_y = crop_h - display_y
|
||||||
|
elif self.rotation_angle == 270:
|
||||||
|
# 270° clockwise rotation: (x,y) -> (crop_h-y, x)
|
||||||
|
new_x = crop_h - display_y
|
||||||
|
new_y = display_x
|
||||||
|
else:
|
||||||
|
new_x, new_y = display_x, display_y
|
||||||
|
|
||||||
|
display_x, display_y = new_x, new_y
|
||||||
|
|
||||||
|
# Step 3: Apply zoom
|
||||||
|
if self.zoom_factor != 1.0:
|
||||||
|
display_x *= self.zoom_factor
|
||||||
|
display_y *= self.zoom_factor
|
||||||
|
|
||||||
|
# Step 4: Calculate final display dimensions and scale
|
||||||
|
if self.rotation_angle in [90, 270]:
|
||||||
|
# Width and height are swapped after rotation
|
||||||
|
display_width = int(original_height * self.zoom_factor)
|
||||||
|
display_height = int(original_width * self.zoom_factor)
|
||||||
|
else:
|
||||||
|
display_width = int(original_width * self.zoom_factor)
|
||||||
|
display_height = int(original_height * self.zoom_factor)
|
||||||
|
|
||||||
|
# Apply crop dimensions if there's a crop
|
||||||
|
if self.crop_rect:
|
||||||
|
if self.rotation_angle in [90, 270]:
|
||||||
|
display_width = int(self.crop_rect[3] * self.zoom_factor) # crop height
|
||||||
|
display_height = int(self.crop_rect[2] * self.zoom_factor) # crop width
|
||||||
|
else:
|
||||||
|
display_width = int(self.crop_rect[2] * self.zoom_factor) # crop width
|
||||||
|
display_height = int(self.crop_rect[3] * self.zoom_factor) # crop height
|
||||||
|
|
||||||
|
# Calculate scale for the display frame
|
||||||
|
scale = min(self.window_width / display_width, available_height / display_height)
|
||||||
|
if scale < 1.0:
|
||||||
|
final_display_width = int(display_width * scale)
|
||||||
|
final_display_height = int(display_height * scale)
|
||||||
|
else:
|
||||||
|
final_display_width = display_width
|
||||||
|
final_display_height = display_height
|
||||||
|
scale = 1.0
|
||||||
|
|
||||||
|
# Calculate final screen position
|
||||||
|
start_x = (self.window_width - final_display_width) // 2
|
||||||
|
start_y = (available_height - final_display_height) // 2
|
||||||
|
|
||||||
|
screen_x = start_x + display_x * scale
|
||||||
|
screen_y = start_y + display_y * scale
|
||||||
|
|
||||||
|
return (int(screen_x), int(screen_y))
|
||||||
|
|
||||||
def draw_tracking_points(self, canvas, start_x, start_y, scale):
|
def draw_tracking_points(self, canvas, start_x, start_y, scale):
|
||||||
"""Draw motion tracking points on the canvas"""
|
"""Draw motion tracking points on the canvas"""
|
||||||
if not self.motion_tracker.has_tracking_points():
|
if not self.motion_tracker.has_tracking_points():
|
||||||
@@ -2260,9 +2341,10 @@ class VideoEditor:
|
|||||||
# Draw current frame points
|
# Draw current frame points
|
||||||
for point in current_points:
|
for point in current_points:
|
||||||
video_x, video_y = point
|
video_x, video_y = point
|
||||||
# Convert video coordinates to screen coordinates
|
# Transform the point through the same pipeline as the frame
|
||||||
screen_x, screen_y = self.video_to_screen_coords(video_x, video_y, start_x, start_y, scale)
|
transformed_point = self.transform_point_for_display(video_x, video_y)
|
||||||
if screen_x is not None and screen_y is not None:
|
if transformed_point:
|
||||||
|
screen_x, screen_y = transformed_point
|
||||||
# Draw a filled circle for current frame points
|
# Draw a filled circle for current frame points
|
||||||
cv2.circle(canvas, (int(screen_x), int(screen_y)), 8, (0, 255, 0), -1)
|
cv2.circle(canvas, (int(screen_x), int(screen_y)), 8, (0, 255, 0), -1)
|
||||||
cv2.circle(canvas, (int(screen_x), int(screen_y)), 10, (255, 255, 255), 2)
|
cv2.circle(canvas, (int(screen_x), int(screen_y)), 10, (255, 255, 255), 2)
|
||||||
@@ -2272,8 +2354,9 @@ class VideoEditor:
|
|||||||
interp_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
interp_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||||
if interp_pos:
|
if interp_pos:
|
||||||
video_x, video_y = interp_pos
|
video_x, video_y = interp_pos
|
||||||
screen_x, screen_y = self.video_to_screen_coords(video_x, video_y, start_x, start_y, scale)
|
transformed_point = self.transform_point_for_display(video_x, video_y)
|
||||||
if screen_x is not None and screen_y is not None:
|
if transformed_point:
|
||||||
|
screen_x, screen_y = transformed_point
|
||||||
# Draw a cross for interpolated position
|
# Draw a cross for interpolated position
|
||||||
size = 12
|
size = 12
|
||||||
cv2.line(canvas,
|
cv2.line(canvas,
|
||||||
|
Reference in New Issue
Block a user