Compare commits
3 Commits
c1b6567e42
...
cf44597268
Author | SHA1 | Date | |
---|---|---|---|
cf44597268 | |||
fd35c6ac13 | |||
d181644b50 |
217
croppa/main.py
217
croppa/main.py
@@ -1991,7 +1991,11 @@ class VideoEditor:
|
||||
if event == cv2.EVENT_RBUTTONDOWN:
|
||||
if not self.is_image_mode: # Only for videos
|
||||
# Convert screen coordinates to video coordinates
|
||||
video_x, video_y = self.screen_to_video_coords(x, y)
|
||||
video_coords = self.transform_screen_to_video(x, y)
|
||||
if video_coords:
|
||||
video_x, video_y = video_coords
|
||||
else:
|
||||
return
|
||||
|
||||
# Check if there's a nearby point to remove
|
||||
current_points = self.motion_tracker.get_tracking_points_for_frame(self.current_frame)
|
||||
@@ -2247,6 +2251,184 @@ class VideoEditor:
|
||||
self.feedback_message = message
|
||||
self.feedback_message_time = time.time()
|
||||
|
||||
def transform_video_to_screen(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform video coordinates to screen coordinates through all transformations"""
|
||||
if self.current_display_frame is None:
|
||||
return None
|
||||
|
||||
# Get the original frame dimensions
|
||||
original_height, original_width = self.current_display_frame.shape[:2]
|
||||
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||
|
||||
# Step 1: Apply crop (subtract crop offset, including motion tracking offset)
|
||||
display_x = video_x
|
||||
display_y = video_y
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
|
||||
# Apply motion tracking offset if enabled
|
||||
if self.motion_tracker.tracking_enabled:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
|
||||
tracked_x, tracked_y = current_pos
|
||||
new_x = int(tracked_x - crop_w // 2)
|
||||
new_y = int(tracked_y - crop_h // 2)
|
||||
crop_x, crop_y = new_x, new_y
|
||||
|
||||
display_x -= crop_x
|
||||
display_y -= crop_y
|
||||
|
||||
# Step 2: Apply rotation
|
||||
if self.rotation_angle != 0:
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
if self.rotation_angle == 90:
|
||||
# 90° clockwise rotation: (x,y) -> (y, crop_w-x)
|
||||
new_x = display_y
|
||||
new_y = crop_w - display_x
|
||||
elif self.rotation_angle == 180:
|
||||
# 180° rotation: (x,y) -> (crop_w-x, crop_h-y)
|
||||
new_x = crop_w - display_x
|
||||
new_y = crop_h - display_y
|
||||
elif self.rotation_angle == 270:
|
||||
# 270° clockwise rotation: (x,y) -> (crop_h-y, x)
|
||||
new_x = crop_h - display_y
|
||||
new_y = display_x
|
||||
else:
|
||||
new_x, new_y = display_x, display_y
|
||||
|
||||
display_x, display_y = new_x, new_y
|
||||
|
||||
# Step 3: Apply zoom
|
||||
if self.zoom_factor != 1.0:
|
||||
display_x *= self.zoom_factor
|
||||
display_y *= self.zoom_factor
|
||||
|
||||
# Step 4: Apply display offset (panning when zoomed)
|
||||
display_x += self.display_offset[0]
|
||||
display_y += self.display_offset[1]
|
||||
|
||||
# Step 5: Scale to fit window
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
# Apply zoom factor to dimensions
|
||||
if self.zoom_factor != 1.0:
|
||||
crop_w = int(crop_w * self.zoom_factor)
|
||||
crop_h = int(crop_h * self.zoom_factor)
|
||||
|
||||
# Calculate scale to fit window
|
||||
scale_x = self.window_width / crop_w
|
||||
scale_y = available_height / crop_h
|
||||
scale = min(scale_x, scale_y)
|
||||
|
||||
# Center the scaled content
|
||||
scaled_w = crop_w * scale
|
||||
scaled_h = crop_h * scale
|
||||
start_x = (self.window_width - scaled_w) // 2
|
||||
start_y = (available_height - scaled_h) // 2
|
||||
|
||||
# Apply final scaling and centering
|
||||
screen_x = start_x + display_x * scale
|
||||
screen_y = start_y + display_y * scale
|
||||
|
||||
return (int(screen_x), int(screen_y))
|
||||
|
||||
def transform_screen_to_video(self, screen_x: int, screen_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform screen coordinates to video coordinates through all transformations (reverse of transform_video_to_screen)"""
|
||||
if self.current_display_frame is None:
|
||||
return None
|
||||
|
||||
# Get the original frame dimensions
|
||||
original_height, original_width = self.current_display_frame.shape[:2]
|
||||
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||
|
||||
# Step 1: Reverse scaling and centering
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
# Apply zoom factor to dimensions
|
||||
if self.zoom_factor != 1.0:
|
||||
crop_w = int(crop_w * self.zoom_factor)
|
||||
crop_h = int(crop_h * self.zoom_factor)
|
||||
|
||||
# Calculate scale to fit window
|
||||
scale_x = self.window_width / crop_w
|
||||
scale_y = available_height / crop_h
|
||||
scale = min(scale_x, scale_y)
|
||||
|
||||
# Center the scaled content
|
||||
scaled_w = crop_w * scale
|
||||
scaled_h = crop_h * scale
|
||||
start_x = (self.window_width - scaled_w) // 2
|
||||
start_y = (available_height - scaled_h) // 2
|
||||
|
||||
# Reverse scaling and centering
|
||||
display_x = (screen_x - start_x) / scale
|
||||
display_y = (screen_y - start_y) / scale
|
||||
|
||||
# Step 2: Reverse display offset (panning when zoomed)
|
||||
display_x -= self.display_offset[0]
|
||||
display_y -= self.display_offset[1]
|
||||
|
||||
# Step 3: Reverse zoom
|
||||
if self.zoom_factor != 1.0:
|
||||
display_x /= self.zoom_factor
|
||||
display_y /= self.zoom_factor
|
||||
|
||||
# Step 4: Reverse rotation
|
||||
if self.rotation_angle != 0:
|
||||
if self.crop_rect:
|
||||
crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
|
||||
else:
|
||||
crop_w, crop_h = original_width, original_height
|
||||
|
||||
if self.rotation_angle == 90:
|
||||
# Reverse 90° clockwise rotation: (y, crop_w-x) -> (x,y)
|
||||
new_x = crop_w - display_y
|
||||
new_y = display_x
|
||||
elif self.rotation_angle == 180:
|
||||
# Reverse 180° rotation: (crop_w-x, crop_h-y) -> (x,y)
|
||||
new_x = crop_w - display_x
|
||||
new_y = crop_h - display_y
|
||||
elif self.rotation_angle == 270:
|
||||
# Reverse 270° clockwise rotation: (crop_h-y, x) -> (x,y)
|
||||
new_x = display_y
|
||||
new_y = crop_h - display_x
|
||||
else:
|
||||
new_x, new_y = display_x, display_y
|
||||
|
||||
display_x, display_y = new_x, new_y
|
||||
|
||||
# Step 5: Reverse crop (add crop offset, including motion tracking offset)
|
||||
video_x = display_x
|
||||
video_y = display_y
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
|
||||
# Apply motion tracking offset if enabled
|
||||
if self.motion_tracker.tracking_enabled:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
|
||||
tracked_x, tracked_y = current_pos
|
||||
new_x = int(tracked_x - crop_w // 2)
|
||||
new_y = int(tracked_y - crop_h // 2)
|
||||
crop_x, crop_y = new_x, new_y
|
||||
|
||||
video_x += crop_x
|
||||
video_y += crop_y
|
||||
|
||||
return (int(video_x), int(video_y))
|
||||
|
||||
def transform_point_for_display(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform a point through the same pipeline as the frame (crop, zoom, rotation, display)"""
|
||||
if self.current_display_frame is None:
|
||||
@@ -2262,11 +2444,22 @@ class VideoEditor:
|
||||
original_height, original_width = self.current_display_frame.shape[:2]
|
||||
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||
|
||||
# Step 1: Apply crop (subtract crop offset)
|
||||
# Step 1: Apply crop (subtract crop offset, including motion tracking offset)
|
||||
display_x = video_x
|
||||
display_y = video_y
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
|
||||
# Apply motion tracking offset if enabled
|
||||
if self.motion_tracker.tracking_enabled:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point (same logic as in apply_crop_zoom_and_rotation)
|
||||
tracked_x, tracked_y = current_pos
|
||||
new_x = int(tracked_x - crop_w // 2)
|
||||
new_y = int(tracked_y - crop_h // 2)
|
||||
crop_x, crop_y = new_x, new_y
|
||||
|
||||
display_x -= crop_x
|
||||
display_y -= crop_y
|
||||
|
||||
@@ -2319,6 +2512,10 @@ class VideoEditor:
|
||||
|
||||
return (int(screen_x), int(screen_y))
|
||||
|
||||
def transform_point_for_display(self, video_x: int, video_y: int) -> Optional[Tuple[int, int]]:
|
||||
"""Transform a point through the same pipeline as the frame (crop, zoom, rotation, display)"""
|
||||
return self.transform_video_to_screen(video_x, video_y)
|
||||
|
||||
def draw_tracking_points(self, canvas, start_x, start_y, scale):
|
||||
"""Draw motion tracking points on the canvas"""
|
||||
if not self.motion_tracker.has_tracking_points():
|
||||
@@ -2795,13 +2992,24 @@ class VideoEditor:
|
||||
return False
|
||||
|
||||
|
||||
def _process_frame_for_render(self, frame, output_width: int, output_height: int):
|
||||
def _process_frame_for_render(self, frame, output_width: int, output_height: int, frame_number: int = None):
|
||||
"""Process a single frame for rendering (optimized for speed)"""
|
||||
try:
|
||||
# Apply crop (vectorized operation)
|
||||
if self.crop_rect:
|
||||
x, y, w, h = map(int, self.crop_rect)
|
||||
|
||||
# Apply motion tracking to move crop center to tracked point
|
||||
if self.motion_tracker.tracking_enabled and frame_number is not None:
|
||||
current_pos = self.motion_tracker.get_interpolated_position(frame_number)
|
||||
if current_pos:
|
||||
# Move crop center to tracked point
|
||||
tracked_x, tracked_y = current_pos
|
||||
# Calculate new crop position to center on tracked point
|
||||
new_x = int(tracked_x - w // 2)
|
||||
new_y = int(tracked_y - h // 2)
|
||||
x, y = new_x, new_y
|
||||
|
||||
# Clamp coordinates to frame bounds
|
||||
h_frame, w_frame = frame.shape[:2]
|
||||
x = max(0, min(x, w_frame - 1))
|
||||
@@ -2902,6 +3110,7 @@ class VideoEditor:
|
||||
last_progress_update = 0
|
||||
|
||||
self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0))
|
||||
|
||||
with open(self.temp_file_name, 'wb') as temp_file:
|
||||
for i in range(total_frames):
|
||||
if self.render_cancelled:
|
||||
@@ -2913,7 +3122,7 @@ class VideoEditor:
|
||||
if not ret:
|
||||
break
|
||||
|
||||
processed_frame = self._process_frame_for_render(frame, output_width, output_height)
|
||||
processed_frame = self._process_frame_for_render(frame, output_width, output_height, start_frame + i)
|
||||
if processed_frame is not None:
|
||||
if i == 0:
|
||||
print(f"Processed frame dimensions: {processed_frame.shape[1]}x{processed_frame.shape[0]}")
|
||||
|
Reference in New Issue
Block a user