diff --git a/croppa/main.py b/croppa/main.py index 4ad687c..85c00bc 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1201,141 +1201,116 @@ class VideoEditor: def transform_point(self, point: Tuple[float, float]) -> Tuple[float, float]: """Transform a point from original frame coordinates to display coordinates - This applies the same transformations that are applied to frames: - 1. Crop - 2. Rotation - 3. Zoom - - IMPORTANT: This function must exactly match the transformations applied in - apply_crop_zoom_and_rotation to ensure consistent coordinate mapping. + COMPLETELY REWRITTEN - NO CROP TRANSFORMATION """ if point is None or self.current_display_frame is None: + print("DEBUG: transform_point: point is None or no display frame") return None - # Get original coordinates and convert to float for precise calculations + # Get original coordinates orig_x, orig_y = float(point[0]), float(point[1]) - print(f"transform_point: original point ({orig_x}, {orig_y})") + print(f"DEBUG: transform_point: START - original point ({orig_x}, {orig_y})") # Get original frame dimensions frame_height, frame_width = self.current_display_frame.shape[:2] + print(f"DEBUG: transform_point: original frame dimensions = {frame_width}x{frame_height}") - # STEP 1: Apply crop - adjust coordinates relative to crop origin + # STEP 1: Apply rotation to original frame coordinates x, y = orig_x, orig_y - if self.crop_rect: - crop_x, crop_y, crop_w, crop_h = self.crop_rect - - # Check if point is inside the crop area (for debugging only) - is_inside = (crop_x <= x < crop_x + crop_w and crop_y <= y < crop_y + crop_h) - print(f"transform_point: point ({x}, {y}) is {'inside' if is_inside else 'outside'} crop area") - - # Adjust coordinates relative to crop origin - x -= crop_x - y -= crop_y - - # Update dimensions for rotation calculations - frame_width, frame_height = crop_w, crop_h - - print(f"transform_point: after crop adjustment ({x}, {y})") - - # STEP 2: Apply rotation if self.rotation_angle != 0: - print(f"transform_point: rotation_angle = {self.rotation_angle}, dimensions = ({frame_width}, {frame_height})") + print(f"DEBUG: transform_point: applying rotation {self.rotation_angle}° to ({x}, {y}) with dimensions {frame_width}x{frame_height}") - # Apply rotation to coordinates if self.rotation_angle == 90: # 90° clockwise: (x,y) -> (y, width-x) new_x = y new_y = frame_width - x x, y = new_x, new_y + print(f"DEBUG: transform_point: 90° rotation: ({orig_x}, {orig_y}) -> ({x}, {y})") elif self.rotation_angle == 180: # 180° rotation: (x,y) -> (width-x, height-y) x = frame_width - x y = frame_height - y + print(f"DEBUG: transform_point: 180° rotation: ({orig_x}, {orig_y}) -> ({x}, {y})") elif self.rotation_angle == 270: # 270° clockwise: (x,y) -> (height-y, x) new_x = frame_height - y new_y = x x, y = new_x, new_y + print(f"DEBUG: transform_point: 270° rotation: ({orig_x}, {orig_y}) -> ({x}, {y})") + else: + print("DEBUG: transform_point: no rotation") - print(f"transform_point: after rotation ({x}, {y})") - - # STEP 3: Apply zoom + # STEP 2: Apply zoom if self.zoom_factor != 1.0: + print(f"DEBUG: transform_point: applying zoom {self.zoom_factor} to ({x}, {y})") x *= self.zoom_factor y *= self.zoom_factor - print(f"transform_point: after zoom ({x}, {y}), zoom_factor = {self.zoom_factor}") + print(f"DEBUG: transform_point: after zoom ({x}, {y})") + else: + print("DEBUG: transform_point: no zoom") - print(f"transform_point: final result = ({x}, {y})") + print(f"DEBUG: transform_point: FINAL RESULT = ({x}, {y})") return (x, y) def untransform_point(self, point: Tuple[float, float]) -> Tuple[float, float]: """Transform a point from display coordinates back to original frame coordinates - This reverses the transformations in the opposite order: - 1. Reverse zoom - 2. Reverse rotation - 3. Reverse crop - - IMPORTANT: This function must exactly reverse the transformations applied in - transform_point to ensure consistent coordinate mapping. + COMPLETELY REWRITTEN - NO CROP TRANSFORMATION """ if point is None or self.current_display_frame is None: + print("DEBUG: untransform_point: point is None or no display frame") return None - # Get display coordinates and convert to float for precise calculations + # Get display coordinates display_x, display_y = float(point[0]), float(point[1]) - print(f"untransform_point: original display point ({display_x}, {display_y})") + print(f"DEBUG: untransform_point: START - display point ({display_x}, {display_y})") # Get original frame dimensions orig_frame_height, orig_frame_width = self.current_display_frame.shape[:2] - - # Get dimensions for rotation calculations - if self.crop_rect: - frame_width, frame_height = float(self.crop_rect[2]), float(self.crop_rect[3]) - else: - frame_width, frame_height = float(orig_frame_width), float(orig_frame_height) + print(f"DEBUG: untransform_point: original frame dimensions = {orig_frame_width}x{orig_frame_height}") # STEP 1: Reverse zoom x, y = display_x, display_y if self.zoom_factor != 1.0: + print(f"DEBUG: untransform_point: reversing zoom {self.zoom_factor} from ({x}, {y})") x /= self.zoom_factor y /= self.zoom_factor - print(f"untransform_point: after reverse zoom ({x}, {y}), zoom_factor = {self.zoom_factor}") + print(f"DEBUG: untransform_point: after reverse zoom ({x}, {y})") + else: + print("DEBUG: untransform_point: no zoom to reverse") # STEP 2: Reverse rotation if self.rotation_angle != 0: - print(f"untransform_point: rotation_angle = {self.rotation_angle}, dimensions = ({frame_width}, {frame_height})") + print(f"DEBUG: untransform_point: reversing rotation {self.rotation_angle}° from ({x}, {y}) with dimensions {orig_frame_width}x{orig_frame_height}") - # Apply inverse rotation to coordinates if self.rotation_angle == 90: # Reverse 90° clockwise: (x,y) -> (width-y, x) - new_x = frame_width - y + new_x = orig_frame_width - y new_y = x x, y = new_x, new_y + print(f"DEBUG: untransform_point: reverse 90° rotation: ({display_x}, {display_y}) -> ({x}, {y})") elif self.rotation_angle == 180: # Reverse 180° rotation: (x,y) -> (width-x, height-y) - x = frame_width - x - y = frame_height - y + x = orig_frame_width - x + y = orig_frame_height - y + print(f"DEBUG: untransform_point: reverse 180° rotation: ({display_x}, {display_y}) -> ({x}, {y})") elif self.rotation_angle == 270: # Reverse 270° clockwise: (x,y) -> (y, height-x) new_x = y - new_y = frame_height - x + new_y = orig_frame_height - x x, y = new_x, new_y - - print(f"untransform_point: after reverse rotation ({x}, {y})") - - # STEP 3: Reverse crop (add crop offset) - if self.crop_rect: - crop_x, crop_y = float(self.crop_rect[0]), float(self.crop_rect[1]) - x += crop_x - y += crop_y - print(f"untransform_point: after reverse crop ({x}, {y}), crop_rect = {self.crop_rect}") + print(f"DEBUG: untransform_point: reverse 270° rotation: ({display_x}, {display_y}) -> ({x}, {y})") + else: + print("DEBUG: untransform_point: no rotation to reverse") - # Clamp coordinates to frame bounds to ensure they're valid + # Clamp coordinates to frame bounds + orig_x, orig_y = x, y x = max(0, min(x, orig_frame_width - 1)) y = max(0, min(y, orig_frame_height - 1)) + if orig_x != x or orig_y != y: + print(f"DEBUG: untransform_point: clamped coordinates from ({orig_x}, {orig_y}) to ({x}, {y})") - print(f"untransform_point: final result = ({x}, {y})") + print(f"DEBUG: untransform_point: FINAL RESULT = ({x}, {y})") return (x, y) @@ -1522,37 +1497,42 @@ class VideoEditor: debug_mode = True if debug_mode and self.crop_rect: # Draw crop rectangle outline on the canvas + # The crop outline should always be the edges of the display frame + # since the crop IS the display frame crop_x, crop_y, crop_w, crop_h = self.crop_rect - # Transform the crop corners to display coordinates - top_left = self.transform_point((crop_x, crop_y)) - top_right = self.transform_point((crop_x + crop_w, crop_y)) - bottom_left = self.transform_point((crop_x, crop_y + crop_h)) - bottom_right = self.transform_point((crop_x + crop_w, crop_y + crop_h)) + print(f"DEBUG: draw_tracking_points: drawing crop outline for crop_rect = {self.crop_rect}") + print(f"DEBUG: draw_tracking_points: canvas offset=({offset_x},{offset_y}), scale={scale}") - # Draw crop outline if all corners are visible - if all([top_left, top_right, bottom_left, bottom_right]): - # Convert to canvas coordinates - tl_x, tl_y = int(offset_x + top_left[0] * scale), int(offset_y + top_left[1] * scale) - tr_x, tr_y = int(offset_x + top_right[0] * scale), int(offset_y + top_right[1] * scale) - bl_x, bl_y = int(offset_x + bottom_left[0] * scale), int(offset_y + bottom_left[1] * scale) - br_x, br_y = int(offset_x + bottom_right[0] * scale), int(offset_y + bottom_right[1] * scale) - - # Draw crop outline - cv2.line(canvas, (tl_x, tl_y), (tr_x, tr_y), (255, 0, 255), 1) - cv2.line(canvas, (tr_x, tr_y), (br_x, br_y), (255, 0, 255), 1) - cv2.line(canvas, (br_x, br_y), (bl_x, bl_y), (255, 0, 255), 1) - cv2.line(canvas, (bl_x, bl_y), (tl_x, tl_y), (255, 0, 255), 1) + # The crop corners in display coordinates are always: + # (0,0), (crop_w,0), (0,crop_h), (crop_w,crop_h) + # because the crop IS the display frame + tl_x = int(offset_x + 0 * scale) + tl_y = int(offset_y + 0 * scale) + tr_x = int(offset_x + crop_w * scale) + tr_y = int(offset_y + 0 * scale) + bl_x = int(offset_x + 0 * scale) + bl_y = int(offset_y + crop_h * scale) + br_x = int(offset_x + crop_w * scale) + br_y = int(offset_y + crop_h * scale) + + print(f"DEBUG: draw_tracking_points: crop outline corners: TL({tl_x},{tl_y}) TR({tr_x},{tr_y}) BL({bl_x},{bl_y}) BR({br_x},{br_y})") + + # Draw crop outline + cv2.line(canvas, (tl_x, tl_y), (tr_x, tr_y), (255, 0, 255), 1) + cv2.line(canvas, (tr_x, tr_y), (br_x, br_y), (255, 0, 255), 1) + cv2.line(canvas, (br_x, br_y), (bl_x, bl_y), (255, 0, 255), 1) + cv2.line(canvas, (bl_x, bl_y), (tl_x, tl_y), (255, 0, 255), 1) # Process each tracking point for i, tracking_point in enumerate(tracking_points): # Get the original coordinates orig_x, orig_y = tracking_point.original - print(f"draw_tracking_points: processing point {i}: original={tracking_point.original}") + print(f"DEBUG: draw_tracking_points: processing point {i}: original={tracking_point.original}") # Check if the point is within the frame bounds is_in_frame = (0 <= orig_x < frame_width and 0 <= orig_y < frame_height) - print(f"draw_tracking_points: point {i} is {'inside' if is_in_frame else 'outside'} frame bounds") + print(f"DEBUG: draw_tracking_points: point {i} is {'inside' if is_in_frame else 'outside'} frame bounds") # Check if the point is within the crop area (if cropping is active) is_in_crop = True @@ -1560,22 +1540,43 @@ class VideoEditor: crop_x, crop_y, crop_w, crop_h = self.crop_rect is_in_crop = (crop_x <= orig_x < crop_x + crop_w and crop_y <= orig_y < crop_y + crop_h) - print(f"draw_tracking_points: point {i} is {'inside' if is_in_crop else 'outside'} crop area") + print(f"DEBUG: draw_tracking_points: point {i} is {'inside' if is_in_crop else 'outside'} crop area") - # Always transform point from original frame coordinates to display coordinates - # This ensures the point is always drawn correctly regardless of current crop/rotation state + # Transform point from original frame coordinates to display coordinates + print(f"DEBUG: draw_tracking_points: calling transform_point for point {i}") display_point = self.transform_point(tracking_point.original) - print(f"draw_tracking_points: transformed to display coordinates {display_point}") + print(f"DEBUG: draw_tracking_points: point {i} transformed to display coordinates {display_point}") if display_point is not None: - # Scale and offset the point to match the canvas - x = int(offset_x + display_point[0] * scale) - y = int(offset_y + display_point[1] * scale) + # If we have a crop, we need to adjust the display coordinates + if self.crop_rect: + crop_x, crop_y, crop_w, crop_h = self.crop_rect + # Check if the point is within the crop area + if (crop_x <= orig_x < crop_x + crop_w and crop_y <= orig_y < crop_y + crop_h): + # Point is within crop area, adjust coordinates relative to crop + adjusted_x = display_point[0] - crop_x + adjusted_y = display_point[1] - crop_y + print(f"DEBUG: draw_tracking_points: point {i} adjusted for crop: ({adjusted_x}, {adjusted_y})") + else: + # Point is outside crop area, don't draw it + print(f"DEBUG: draw_tracking_points: point {i} is outside crop area - NOT DRAWN") + continue + else: + # No crop, use display coordinates as-is + adjusted_x = display_point[0] + adjusted_y = display_point[1] + print(f"DEBUG: draw_tracking_points: point {i} no crop adjustment: ({adjusted_x}, {adjusted_y})") - print(f"draw_tracking_points: point {i} canvas position: ({x},{y})") + # Scale and offset the point to match the canvas + x = int(offset_x + adjusted_x * scale) + y = int(offset_y + adjusted_y * scale) + + print(f"DEBUG: draw_tracking_points: point {i} canvas position: ({x},{y})") + print(f"DEBUG: draw_tracking_points: canvas offset=({offset_x},{offset_y}), scale={scale}") # Check if the point is within the canvas bounds is_on_canvas = (0 <= x < self.window_width and 0 <= y < self.window_height) + print(f"DEBUG: draw_tracking_points: point {i} is {'on' if is_on_canvas else 'off'} canvas") if is_on_canvas: # Draw the point - use different colors based on whether it's in the crop area @@ -1587,6 +1588,7 @@ class VideoEditor: cv2.circle(canvas, (x, y), self.tracking_point_radius, (0, 255, 0), -1) # Draw point index for identification cv2.putText(canvas, str(i), (x + 15, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) + print(f"DEBUG: draw_tracking_points: drew point {i} in GREEN at ({x},{y})") else: # Point is outside crop area - draw with different color # Draw gray border @@ -1595,10 +1597,11 @@ class VideoEditor: cv2.circle(canvas, (x, y), self.tracking_point_radius, (0, 255, 255), -1) # Draw point index for identification cv2.putText(canvas, str(i), (x + 15, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) + print(f"DEBUG: draw_tracking_points: drew point {i} in YELLOW at ({x},{y})") else: - print(f"draw_tracking_points: point {i} is outside canvas bounds") + print(f"DEBUG: draw_tracking_points: point {i} is outside canvas bounds - NOT DRAWN") else: - print(f"draw_tracking_points: point {i} not visible in current view") + print(f"DEBUG: draw_tracking_points: point {i} not visible in current view - NOT DRAWN") # Draw computed tracking position (blue cross) if tracking is enabled if self.motion_tracker.tracking_enabled: @@ -2191,18 +2194,36 @@ class VideoEditor: display_x = (x - start_x) / scale display_y = (y - start_y) / scale - print(f"mouse_callback: converted to display coords ({display_x}, {display_y})") + print(f"DEBUG: mouse_callback: screen click at ({x}, {y})") + print(f"DEBUG: mouse_callback: canvas offset ({start_x}, {start_y}), scale {scale}") + print(f"DEBUG: mouse_callback: converted to display coords ({display_x}, {display_y})") + + # If we have a crop, we need to add the crop offset to get the original frame coordinates + if self.crop_rect: + crop_x, crop_y, crop_w, crop_h = self.crop_rect + # The display coordinates are relative to the crop, so add the crop offset + original_display_x = display_x + crop_x + original_display_y = display_y + crop_y + print(f"DEBUG: mouse_callback: added crop offset ({crop_x}, {crop_y}) -> ({original_display_x}, {original_display_y})") + else: + original_display_x = display_x + original_display_y = display_y + print(f"DEBUG: mouse_callback: no crop, using display coords as-is") # Now convert display coordinates to original frame coordinates # This is where the magic happens - we need to reverse all transformations - original_point = self.untransform_point((display_x, display_y)) + print(f"DEBUG: mouse_callback: calling untransform_point with ({original_display_x}, {original_display_y})") + original_point = self.untransform_point((original_display_x, original_display_y)) - print(f"mouse_callback: untransformed to original coords {original_point}") + print(f"DEBUG: mouse_callback: untransformed to original coords {original_point}") if original_point: # Store the original frame dimensions for reference frame_height, frame_width = self.current_display_frame.shape[:2] - print(f"mouse_callback: frame dimensions: {frame_width}x{frame_height}") + print(f"DEBUG: mouse_callback: frame dimensions: {frame_width}x{frame_height}") + print(f"DEBUG: mouse_callback: current crop_rect: {self.crop_rect}") + print(f"DEBUG: mouse_callback: current rotation_angle: {self.rotation_angle}") + print(f"DEBUG: mouse_callback: current zoom_factor: {self.zoom_factor}") # Check if clicking on an existing tracking point to remove it removed = self.motion_tracker.remove_tracking_point( @@ -2213,7 +2234,7 @@ class VideoEditor: ) if removed: - print(f"mouse_callback: removed tracking point at {original_point}") + print(f"DEBUG: mouse_callback: removed tracking point at {original_point}") else: # Add a new tracking point - only store the original coordinates # Display coordinates will be calculated fresh each time to ensure accuracy @@ -2223,18 +2244,23 @@ class VideoEditor: original_point[1] # No display coordinates - we'll calculate them fresh each time ) - print(f"mouse_callback: added tracking point at {original_point}") + print(f"DEBUG: mouse_callback: added tracking point at {original_point}") # Verify the coordinates are correct by doing a round-trip transformation + print(f"DEBUG: mouse_callback: doing round-trip verification...") verification_display = self.transform_point(original_point) if verification_display: expected_x = int(start_x + verification_display[0] * scale) expected_y = int(start_y + verification_display[1] * scale) - print(f"mouse_callback: verification - expected canvas position: ({expected_x}, {expected_y}), actual: ({x}, {y})") + print(f"DEBUG: mouse_callback: verification - expected canvas position: ({expected_x}, {expected_y}), actual: ({x}, {y})") error_x = abs(expected_x - x) error_y = abs(expected_y - y) - print(f"mouse_callback: verification - position error: ({error_x}, {error_y}) pixels") + print(f"DEBUG: mouse_callback: verification - position error: ({error_x}, {error_y}) pixels") + + if error_x > 2 or error_y > 2: + print(f"DEBUG: ERROR: Significant coordinate transformation error detected!") + print(f"DEBUG: ERROR: This indicates a problem with the transform/untransform functions!") # Save state when tracking points change self.save_state()