Enhance VideoEditor and MotionTracker with improved logging and crop handling

This commit adds detailed logging to the VideoEditor and MotionTracker classes, providing insights into the transformations and adjustments made during point processing and cropping. It refines the crop position adjustment logic to ensure offsets are only applied when necessary and enhances the visibility of tracking points based on their position relative to the crop area. Additionally, it improves the handling of motion tracking toggling, ensuring a default crop rect is created when needed, thus enhancing the overall user experience during video editing.
This commit is contained in:
2025-09-16 20:17:54 +02:00
parent 9085a82bdd
commit c88c2cc354
2 changed files with 116 additions and 14 deletions

View File

@@ -1116,17 +1116,32 @@ class VideoEditor:
# Apply tracking offset to crop position if motion tracking is enabled # Apply tracking offset to crop position if motion tracking is enabled
if self.motion_tracker.tracking_enabled: if self.motion_tracker.tracking_enabled:
tracking_offset = self.motion_tracker.get_tracking_offset(self.current_frame) tracking_offset = self.motion_tracker.get_tracking_offset(self.current_frame)
x += int(tracking_offset[0]) print(f"apply_crop_zoom_and_rotation: tracking_offset = {tracking_offset}")
y += int(tracking_offset[1])
# Only apply offset if it's not zero
if tracking_offset[0] != 0 or tracking_offset[1] != 0:
x += int(tracking_offset[0])
y += int(tracking_offset[1])
print(f"apply_crop_zoom_and_rotation: adjusted crop position to ({x}, {y})")
x, y, w, h = int(x), int(y), int(w), int(h) x, y, w, h = int(x), int(y), int(w), int(h)
print(f"apply_crop_zoom_and_rotation: final crop = ({x}, {y}, {w}, {h})")
# Ensure crop is within frame bounds # Ensure crop is within frame bounds
orig_x, orig_y = x, y
x = max(0, min(x, processed_frame.shape[1] - 1)) x = max(0, min(x, processed_frame.shape[1] - 1))
y = max(0, min(y, processed_frame.shape[0] - 1)) y = max(0, min(y, processed_frame.shape[0] - 1))
w = min(w, processed_frame.shape[1] - x) w = min(w, processed_frame.shape[1] - x)
h = min(h, processed_frame.shape[0] - y) h = min(h, processed_frame.shape[0] - y)
if orig_x != x or orig_y != y:
print(f"apply_crop_zoom_and_rotation: crop adjusted from ({orig_x}, {orig_y}) to ({x}, {y}) to stay in bounds")
if w > 0 and h > 0: if w > 0 and h > 0:
processed_frame = processed_frame[y : y + h, x : x + w] processed_frame = processed_frame[y : y + h, x : x + w]
print(f"apply_crop_zoom_and_rotation: crop applied, new shape = {processed_frame.shape}")
else:
print(f"apply_crop_zoom_and_rotation: invalid crop dimensions, skipping crop")
# Apply rotation # Apply rotation
if self.rotation_angle != 0: if self.rotation_angle != 0:
@@ -1177,18 +1192,23 @@ class VideoEditor:
x, y = float(point[0]), float(point[1]) x, y = float(point[0]), float(point[1])
# Log original point
print(f"transform_point: original point ({x}, {y})")
# Step 1: Apply crop (adjust point relative to crop origin) # Step 1: Apply crop (adjust point relative to crop origin)
if self.crop_rect: if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect crop_x, crop_y, crop_w, crop_h = self.crop_rect
print(f"transform_point: crop_rect = {self.crop_rect}")
# Check if point is inside the crop area - but don't filter out points
# We'll still transform them and let the drawing code decide visibility
is_inside = (crop_x <= x < crop_x + crop_w and crop_y <= y < crop_y + crop_h)
print(f"transform_point: point ({x}, {y}) is {'inside' if is_inside else 'outside'} crop area")
# Check if point is inside the crop area
if not (crop_x <= x < crop_x + crop_w and crop_y <= y < crop_y + crop_h):
# Point is outside the crop area
return None
# Adjust coordinates relative to crop origin # Adjust coordinates relative to crop origin
x -= crop_x x -= crop_x
y -= crop_y y -= crop_y
print(f"transform_point: after crop adjustment ({x}, {y})")
# Step 2: Apply rotation # Step 2: Apply rotation
if self.rotation_angle != 0: if self.rotation_angle != 0:
@@ -1201,6 +1221,8 @@ class VideoEditor:
else: else:
return None return None
print(f"transform_point: rotation_angle = {self.rotation_angle}, dimensions = ({crop_w}, {crop_h})")
# Apply rotation to coordinates # Apply rotation to coordinates
if self.rotation_angle == 90: if self.rotation_angle == 90:
# 90° clockwise: (x,y) -> (y, width-x) # 90° clockwise: (x,y) -> (y, width-x)
@@ -1217,11 +1239,15 @@ class VideoEditor:
new_y = x new_y = x
x, y = new_x, new_y x, y = new_x, new_y
print(f"transform_point: after rotation ({x}, {y})")
# Step 3: Apply zoom # Step 3: Apply zoom
if self.zoom_factor != 1.0: if self.zoom_factor != 1.0:
x *= self.zoom_factor x *= self.zoom_factor
y *= self.zoom_factor y *= self.zoom_factor
print(f"transform_point: after zoom ({x}, {y}), zoom_factor = {self.zoom_factor}")
print(f"transform_point: final result = ({x}, {y})")
return (x, y) return (x, y)
def untransform_point(self, point: Tuple[float, float]) -> Tuple[float, float]: def untransform_point(self, point: Tuple[float, float]) -> Tuple[float, float]:
@@ -1236,11 +1262,13 @@ class VideoEditor:
return None return None
x, y = float(point[0]), float(point[1]) x, y = float(point[0]), float(point[1])
print(f"untransform_point: original display point ({x}, {y})")
# Step 1: Reverse zoom # Step 1: Reverse zoom
if self.zoom_factor != 1.0: if self.zoom_factor != 1.0:
x /= self.zoom_factor x /= self.zoom_factor
y /= self.zoom_factor y /= self.zoom_factor
print(f"untransform_point: after reverse zoom ({x}, {y}), zoom_factor = {self.zoom_factor}")
# Step 2: Reverse rotation # Step 2: Reverse rotation
if self.rotation_angle != 0: if self.rotation_angle != 0:
@@ -1251,6 +1279,8 @@ class VideoEditor:
crop_h, crop_w = self.current_display_frame.shape[:2] crop_h, crop_w = self.current_display_frame.shape[:2]
crop_h, crop_w = float(crop_h), float(crop_w) crop_h, crop_w = float(crop_h), float(crop_w)
print(f"untransform_point: rotation_angle = {self.rotation_angle}, dimensions = ({crop_w}, {crop_h})")
# Apply inverse rotation to coordinates # Apply inverse rotation to coordinates
if self.rotation_angle == 90: if self.rotation_angle == 90:
# Reverse 90° clockwise: (x,y) -> (width-y, x) # Reverse 90° clockwise: (x,y) -> (width-y, x)
@@ -1267,18 +1297,25 @@ class VideoEditor:
new_y = crop_h - x new_y = crop_h - x
x, y = new_x, new_y x, y = new_x, new_y
print(f"untransform_point: after reverse rotation ({x}, {y})")
# Step 3: Reverse crop (add crop offset) # Step 3: Reverse crop (add crop offset)
if self.crop_rect: if self.crop_rect:
crop_x, crop_y = float(self.crop_rect[0]), float(self.crop_rect[1]) crop_x, crop_y = float(self.crop_rect[0]), float(self.crop_rect[1])
x += crop_x x += crop_x
y += crop_y y += crop_y
print(f"untransform_point: after reverse crop ({x}, {y}), crop_rect = {self.crop_rect}")
# Ensure coordinates are within the frame bounds # Ensure coordinates are within the frame bounds
if self.current_display_frame is not None: if self.current_display_frame is not None:
height, width = self.current_display_frame.shape[:2] height, width = self.current_display_frame.shape[:2]
orig_x, orig_y = x, y
x = max(0, min(width - 1, x)) x = max(0, min(width - 1, x))
y = max(0, min(height - 1, y)) y = max(0, min(height - 1, y))
if orig_x != x or orig_y != y:
print(f"untransform_point: clamped from ({orig_x}, {orig_y}) to ({x}, {y})")
print(f"untransform_point: final result = ({x}, {y})")
return (x, y) return (x, y)
@@ -1452,32 +1489,67 @@ class VideoEditor:
if self.current_frame is None: if self.current_frame is None:
return return
print(f"draw_tracking_points: offset=({offset_x},{offset_y}), scale={scale}")
# Draw tracking points for the current frame (green circles with white border) # Draw tracking points for the current frame (green circles with white border)
tracking_points = self.motion_tracker.get_tracking_points_for_frame(self.current_frame) tracking_points = self.motion_tracker.get_tracking_points_for_frame(self.current_frame)
for point in tracking_points: print(f"draw_tracking_points: found {len(tracking_points)} tracking points for frame {self.current_frame}")
for i, point in enumerate(tracking_points):
print(f"draw_tracking_points: processing point {i}: {point}")
# Transform point from original frame coordinates to display coordinates # Transform point from original frame coordinates to display coordinates
display_point = self.transform_point(point) display_point = self.transform_point(point)
# Check if the point is within the crop area (if cropping is active)
is_in_crop = True
if self.crop_rect:
crop_x, crop_y, crop_w, crop_h = self.crop_rect
is_in_crop = (crop_x <= point[0] < crop_x + crop_w and
crop_y <= point[1] < crop_y + crop_h)
if display_point: if display_point:
print(f"draw_tracking_points: point {i} transformed to {display_point}")
# Scale and offset the point to match the canvas # Scale and offset the point to match the canvas
x = int(offset_x + display_point[0] * scale) x = int(offset_x + display_point[0] * scale)
y = int(offset_y + display_point[1] * scale) y = int(offset_y + display_point[1] * scale)
# Draw white border print(f"draw_tracking_points: point {i} canvas position: ({x},{y})")
cv2.circle(canvas, (x, y), self.tracking_point_radius + 2, (255, 255, 255), 2)
# Draw green circle # Draw the point - use different colors based on whether it's in the crop area
cv2.circle(canvas, (x, y), self.tracking_point_radius, (0, 255, 0), -1) if is_in_crop:
# Point is in crop area - draw normally
# Draw white border
cv2.circle(canvas, (x, y), self.tracking_point_radius + 2, (255, 255, 255), 2)
# Draw green circle
cv2.circle(canvas, (x, y), self.tracking_point_radius, (0, 255, 0), -1)
else:
# Point is outside crop area - draw with different color (semi-transparent)
# Draw gray border
cv2.circle(canvas, (x, y), self.tracking_point_radius + 2, (128, 128, 128), 2)
# Draw yellow circle
cv2.circle(canvas, (x, y), self.tracking_point_radius, (0, 255, 255), -1)
else:
print(f"draw_tracking_points: point {i} not visible in current view")
# Draw computed tracking position (blue cross) if tracking is enabled # Draw computed tracking position (blue cross) if tracking is enabled
if self.motion_tracker.tracking_enabled: if self.motion_tracker.tracking_enabled:
interpolated_pos = self.motion_tracker.get_interpolated_position(self.current_frame) interpolated_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
print(f"draw_tracking_points: interpolated position: {interpolated_pos}")
if interpolated_pos: if interpolated_pos:
# Transform point from original frame coordinates to display coordinates # Transform point from original frame coordinates to display coordinates
display_point = self.transform_point(interpolated_pos) display_point = self.transform_point(interpolated_pos)
print(f"draw_tracking_points: interpolated display point: {display_point}")
if display_point: if display_point:
# Scale and offset the point to match the canvas # Scale and offset the point to match the canvas
x = int(offset_x + display_point[0] * scale) x = int(offset_x + display_point[0] * scale)
y = int(offset_y + display_point[1] * scale) y = int(offset_y + display_point[1] * scale)
print(f"draw_tracking_points: interpolated canvas position: ({x},{y})")
# Draw blue cross # Draw blue cross
cross_size = 10 cross_size = 10
cv2.line(canvas, (x - cross_size, y), (x + cross_size, y), (255, 0, 0), 2) cv2.line(canvas, (x - cross_size, y), (x + cross_size, y), (255, 0, 0), 2)
@@ -3043,39 +3115,59 @@ class VideoEditor:
if self.motion_tracker.tracking_enabled: if self.motion_tracker.tracking_enabled:
self.motion_tracker.stop_tracking() self.motion_tracker.stop_tracking()
print("Motion tracking disabled") print("Motion tracking disabled")
print("Motion tracking disabled")
else: else:
# If we have tracking points, start tracking # If we have tracking points, start tracking
if self.motion_tracker.has_tracking_points(): if self.motion_tracker.has_tracking_points():
# Get the current interpolated position to use as base # Get the current interpolated position to use as base
current_pos = self.motion_tracker.get_interpolated_position(self.current_frame) current_pos = self.motion_tracker.get_interpolated_position(self.current_frame)
print(f"Toggle tracking: interpolated position = {current_pos}")
# Use crop center if we have a crop rect # Use crop center if we have a crop rect
if self.crop_rect: if self.crop_rect:
x, y, w, h = self.crop_rect x, y, w, h = self.crop_rect
crop_center = (x + w//2, y + h//2) crop_center = (x + w//2, y + h//2)
print(f"Toggle tracking: crop_rect = {self.crop_rect}, crop_center = {crop_center}")
# If we have a current position from tracking points, use that as base # If we have a current position from tracking points, use that as base
if current_pos: if current_pos:
# The base zoom center is the current position # The base zoom center is the current position
base_zoom_center = current_pos base_zoom_center = current_pos
print(f"Toggle tracking: using interpolated position as base: {base_zoom_center}")
else: else:
# Use crop center as fallback # Use crop center as fallback
base_zoom_center = crop_center base_zoom_center = crop_center
print(f"Toggle tracking: using crop center as base: {base_zoom_center}")
else: else:
# No crop rect, use frame center # No crop rect, use frame center
if self.current_display_frame is not None: if self.current_display_frame is not None:
h, w = self.current_display_frame.shape[:2] h, w = self.current_display_frame.shape[:2]
base_zoom_center = (w // 2, h // 2) base_zoom_center = (w // 2, h // 2)
print(f"Toggle tracking: using frame center as base: {base_zoom_center}")
else: else:
base_zoom_center = None base_zoom_center = None
print("Toggle tracking: no base center available")
# Create a crop rect if one doesn't exist
base_crop_rect = self.crop_rect
if not base_crop_rect and current_pos and self.current_display_frame is not None:
# Create a default crop rect centered on the current position
h, w = self.current_display_frame.shape[:2]
crop_size = min(w, h) // 2 # Use half of the smaller dimension
x = max(0, int(current_pos[0] - crop_size // 2))
y = max(0, int(current_pos[1] - crop_size // 2))
base_crop_rect = (x, y, crop_size, crop_size)
print(f"Toggle tracking: created default crop rect: {base_crop_rect}")
self.motion_tracker.start_tracking( self.motion_tracker.start_tracking(
self.crop_rect, base_crop_rect,
base_zoom_center base_zoom_center
) )
print("Motion tracking enabled") print("Motion tracking enabled")
print(f"Motion tracking enabled with base_crop_rect={base_crop_rect}, base_zoom_center={base_zoom_center}")
else: else:
print("No tracking points available. Add tracking points with right-click first.") print("No tracking points available. Add tracking points with right-click first.")
print("Motion tracking not enabled - no tracking points available")
self.save_state() self.save_state()
else: # V - Clear all tracking points else: # V - Clear all tracking points
self.motion_tracker.clear_tracking_points() self.motion_tracker.clear_tracking_points()

View File

@@ -108,11 +108,20 @@ class MotionTracker:
def get_tracking_offset(self, frame_number: int) -> Tuple[float, float]: def get_tracking_offset(self, frame_number: int) -> Tuple[float, float]:
"""Get the offset to center the crop on the tracked point""" """Get the offset to center the crop on the tracked point"""
if not self.tracking_enabled or not self.base_zoom_center: import logging
logger = logging.getLogger('croppa')
if not self.tracking_enabled:
print(f"get_tracking_offset: tracking not enabled, returning (0,0)")
return (0.0, 0.0)
if not self.base_zoom_center:
print(f"get_tracking_offset: no base_zoom_center, returning (0,0)")
return (0.0, 0.0) return (0.0, 0.0)
current_pos = self.get_interpolated_position(frame_number) current_pos = self.get_interpolated_position(frame_number)
if not current_pos: if not current_pos:
print(f"get_tracking_offset: no interpolated position for frame {frame_number}, returning (0,0)")
return (0.0, 0.0) return (0.0, 0.0)
# Calculate offset to center the crop on the tracked point # Calculate offset to center the crop on the tracked point
@@ -120,6 +129,7 @@ class MotionTracker:
offset_x = current_pos[0] - self.base_zoom_center[0] offset_x = current_pos[0] - self.base_zoom_center[0]
offset_y = current_pos[1] - self.base_zoom_center[1] offset_y = current_pos[1] - self.base_zoom_center[1]
print(f"get_tracking_offset: frame={frame_number}, base={self.base_zoom_center}, current={current_pos}, offset=({offset_x}, {offset_y})")
return (offset_x, offset_y) return (offset_x, offset_y)
def start_tracking(self, base_crop_rect: Tuple[int, int, int, int], base_zoom_center: Tuple[int, int]): def start_tracking(self, base_crop_rect: Tuple[int, int, int, int], base_zoom_center: Tuple[int, int]):