Implement coordinate transformation methods in VideoEditor
This commit introduces two new methods, transform_video_to_screen and transform_screen_to_video, to handle the conversion between video and screen coordinates, accounting for crop, rotation, and zoom transformations. Additionally, the existing transform_point_for_display method is updated to utilize the new transformation logic, enhancing the accuracy of point display during video editing. These changes improve the overall functionality and user experience in the VideoEditor.
This commit is contained in:
188
croppa/main.py
188
croppa/main.py
@@ -1991,7 +1991,11 @@ class VideoEditor:
|
||||
if event == cv2.EVENT_RBUTTONDOWN:
|
||||
if not self.is_image_mode: # Only for videos
|
||||
# Convert screen coordinates to video coordinates
|
||||
video_x, video_y = self.screen_to_video_coords(x, y)
|
||||
video_coords = self.transform_screen_to_video(x, y)
|
||||
if video_coords:
|
||||
video_x, video_y = video_coords
|
||||
else:
|
||||
return
|
||||
|
||||
# Check if there's a nearby point to remove
|
||||
current_points = self.motion_tracker.get_tracking_points_for_frame(self.current_frame)
|
||||
@@ -2247,6 +2251,184 @@ class VideoEditor:
|
||||
self.feedback_message = message
|
||||
self.feedback_message_time = time.time()
|
||||
|
||||
def transform_video_to_screen(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform video coordinates to screen coordinates through all transformations"""
|
||||
if self.current_display_frame is None:
|
||||
return None
|
||||
|
||||
# Get the original frame dimensions
|
||||
original_height, original_width = self.current_display_frame.shape[:2]
|
||||
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||
|
||||
# Step 1: Apply crop (subtract crop offset, including motion tracking offset)
|
||||
display_x = video_x
|
||||
display_y = video_y
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
|
||||
# Apply motion tracking offset if enabled
|
||||
if self.motion_tracker.tracking_enabled:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
|
||||
tracked_x, tracked_y = current_pos
|
||||
new_x = int(tracked_x - crop_w // 2)
|
||||
new_y = int(tracked_y - crop_h // 2)
|
||||
crop_x, crop_y = new_x, new_y
|
||||
|
||||
display_x -= crop_x
|
||||
display_y -= crop_y
|
||||
|
||||
# Step 2: Apply rotation
|
||||
if self.rotation_angle != 0:
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
if self.rotation_angle == 90:
|
||||
# 90° clockwise rotation: (x,y) -> (y, crop_w-x)
|
||||
new_x = display_y
|
||||
new_y = crop_w - display_x
|
||||
elif self.rotation_angle == 180:
|
||||
# 180° rotation: (x,y) -> (crop_w-x, crop_h-y)
|
||||
new_x = crop_w - display_x
|
||||
new_y = crop_h - display_y
|
||||
elif self.rotation_angle == 270:
|
||||
# 270° clockwise rotation: (x,y) -> (crop_h-y, x)
|
||||
new_x = crop_h - display_y
|
||||
new_y = display_x
|
||||
else:
|
||||
new_x, new_y = display_x, display_y
|
||||
|
||||
display_x, display_y = new_x, new_y
|
||||
|
||||
# Step 3: Apply zoom
|
||||
if self.zoom_factor != 1.0:
|
||||
display_x *= self.zoom_factor
|
||||
display_y *= self.zoom_factor
|
||||
|
||||
# Step 4: Apply display offset (panning when zoomed)
|
||||
display_x += self.display_offset[0]
|
||||
display_y += self.display_offset[1]
|
||||
|
||||
# Step 5: Scale to fit window
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
# Apply zoom factor to dimensions
|
||||
if self.zoom_factor != 1.0:
|
||||
crop_w = int(crop_w * self.zoom_factor)
|
||||
crop_h = int(crop_h * self.zoom_factor)
|
||||
|
||||
# Calculate scale to fit window
|
||||
scale_x = self.window_width / crop_w
|
||||
scale_y = available_height / crop_h
|
||||
scale = min(scale_x, scale_y)
|
||||
|
||||
# Center the scaled content
|
||||
scaled_w = crop_w * scale
|
||||
scaled_h = crop_h * scale
|
||||
start_x = (self.window_width - scaled_w) // 2
|
||||
start_y = (available_height - scaled_h) // 2
|
||||
|
||||
# Apply final scaling and centering
|
||||
screen_x = start_x + display_x * scale
|
||||
screen_y = start_y + display_y * scale
|
||||
|
||||
return (int(screen_x), int(screen_y))
|
||||
|
||||
def transform_screen_to_video(self, screen_x: int, screen_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform screen coordinates to video coordinates through all transformations (reverse of transform_video_to_screen)"""
|
||||
if self.current_display_frame is None:
|
||||
return None
|
||||
|
||||
# Get the original frame dimensions
|
||||
original_height, original_width = self.current_display_frame.shape[:2]
|
||||
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||
|
||||
# Step 1: Reverse scaling and centering
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
# Apply zoom factor to dimensions
|
||||
if self.zoom_factor != 1.0:
|
||||
crop_w = int(crop_w * self.zoom_factor)
|
||||
crop_h = int(crop_h * self.zoom_factor)
|
||||
|
||||
# Calculate scale to fit window
|
||||
scale_x = self.window_width / crop_w
|
||||
scale_y = available_height / crop_h
|
||||
scale = min(scale_x, scale_y)
|
||||
|
||||
# Center the scaled content
|
||||
scaled_w = crop_w * scale
|
||||
scaled_h = crop_h * scale
|
||||
start_x = (self.window_width - scaled_w) // 2
|
||||
start_y = (available_height - scaled_h) // 2
|
||||
|
||||
# Reverse scaling and centering
|
||||
display_x = (screen_x - start_x) / scale
|
||||
display_y = (screen_y - start_y) / scale
|
||||
|
||||
# Step 2: Reverse display offset (panning when zoomed)
|
||||
display_x -= self.display_offset[0]
|
||||
display_y -= self.display_offset[1]
|
||||
|
||||
# Step 3: Reverse zoom
|
||||
if self.zoom_factor != 1.0:
|
||||
display_x /= self.zoom_factor
|
||||
display_y /= self.zoom_factor
|
||||
|
||||
# Step 4: Reverse rotation
|
||||
if self.rotation_angle != 0:
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
if self.rotation_angle == 90:
|
||||
# Reverse 90° clockwise rotation: (y, crop_w-x) -> (x,y)
|
||||
new_x = crop_w - display_y
|
||||
new_y = display_x
|
||||
elif self.rotation_angle == 180:
|
||||
# Reverse 180° rotation: (crop_w-x, crop_h-y) -> (x,y)
|
||||
new_x = crop_w - display_x
|
||||
new_y = crop_h - display_y
|
||||
elif self.rotation_angle == 270:
|
||||
# Reverse 270° clockwise rotation: (crop_h-y, x) -> (x,y)
|
||||
new_x = display_y
|
||||
new_y = crop_h - display_x
|
||||
else:
|
||||
new_x, new_y = display_x, display_y
|
||||
|
||||
display_x, display_y = new_x, new_y
|
||||
|
||||
# Step 5: Reverse crop (add crop offset, including motion tracking offset)
|
||||
video_x = display_x
|
||||
video_y = display_y
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
|
||||
# Apply motion tracking offset if enabled
|
||||
if self.motion_tracker.tracking_enabled:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
|
||||
tracked_x, tracked_y = current_pos
|
||||
new_x = int(tracked_x - crop_w // 2)
|
||||
new_y = int(tracked_y - crop_h // 2)
|
||||
crop_x, crop_y = new_x, new_y
|
||||
|
||||
video_x += crop_x
|
||||
video_y += crop_y
|
||||
|
||||
return (int(video_x), int(video_y))
|
||||
|
||||
def transform_point_for_display(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform a point through the same pipeline as the frame (crop, zoom, rotation, display)"""
|
||||
if self.current_display_frame is None:
|
||||
@@ -2330,6 +2512,10 @@ class VideoEditor:
|
||||
|
||||
return (int(screen_x), int(screen_y))
|
||||
|
||||
def transform_point_for_display(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform a point through the same pipeline as the frame (crop, zoom, rotation, display)"""
|
||||
return self.transform_video_to_screen(video_x, video_y)
|
||||
|
||||
def draw_tracking_points(self, canvas, start_x, start_y, scale):
|
||||
"""Draw motion tracking points on the canvas"""
|
||||
if not self.motion_tracker.has_tracking_points():
|
||||
|
Reference in New Issue
Block a user