diff --git a/croppa/main.py b/croppa/main.py index 762bbfc..1a70998 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1248,8 +1248,11 @@ class VideoEditor: # This handles all transformations (crop, zoom, rotation) correctly display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) if display_frame is not None: - # Extract features without coordinate mapping - store in transformed frame coordinates - self.feature_tracker.extract_features(display_frame, self.current_frame) + # Store features in rotated frame coordinates (like existing motion tracking) + def coord_mapper(x, y): + return (int(x), int(y)) + + self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper) def jump_to_previous_marker(self): """Jump to the previous tracking marker (frame with tracking points).""" @@ -1481,14 +1484,14 @@ class VideoEditor: # Then reverse the rotation if self.rotation_angle == 90: # 90° clockwise -> 270° counterclockwise - orig_x, orig_y = orig_y, self.frame_width - orig_x + orig_x, orig_y = self.frame_height - orig_y, orig_x elif self.rotation_angle == 180: # 180° -> flip both axes orig_x = self.frame_width - orig_x orig_y = self.frame_height - orig_y elif self.rotation_angle == 270: # 270° clockwise -> 90° counterclockwise - orig_x, orig_y = self.frame_height - orig_y, orig_x + orig_x, orig_y = orig_y, self.frame_width - orig_x return (int(orig_x), int(orig_y)) @@ -1515,15 +1518,9 @@ class VideoEditor: if self.feature_tracker.tracking_enabled: feature_pos = self.feature_tracker.get_tracking_position(frame_number) if feature_pos: - # Features are stored in transformed frame coordinates - # We need to map them to rotated frame coordinates for the tracking system - # The transformed frame coordinates are already in the correct space for display - # But we need to map them back to the original frame space first - orig_x, orig_y = self._map_transformed_to_original_coords(feature_pos[0], feature_pos[1]) - - # Transform to rotated frame coordinates - rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) - return (rot_x, rot_y) + # Features are stored in rotated frame coordinates (like existing motion tracking) + # We can use them directly for the tracking system + return (feature_pos[0], feature_pos[1]) # Fall back to manual tracking points if not self.tracking_points: @@ -2307,12 +2304,9 @@ class VideoEditor: self.current_frame in self.feature_tracker.features): feature_positions = self.feature_tracker.features[self.current_frame]['positions'] for (fx, fy) in feature_positions: - # Features are stored in transformed frame coordinates - # We need to map them to screen coordinates for display - # The transformed frame coordinates need to be mapped to rotated frame coordinates first - orig_x, orig_y = self._map_transformed_to_original_coords(fx, fy) - rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) - sx, sy = self._map_rotated_to_screen(rot_x, rot_y) + # Features are stored in rotated frame coordinates (like existing motion tracking) + # Use the existing coordinate transformation system + sx, sy = self._map_rotated_to_screen(fx, fy) cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1) @@ -3567,8 +3561,15 @@ class VideoEditor: # This handles all transformations (crop, zoom, rotation) correctly display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) if display_frame is not None: - # Extract features without coordinate mapping - store in transformed frame coordinates - success = self.feature_tracker.extract_features(display_frame, self.current_frame) + # Store features in rotated frame coordinates (like existing motion tracking) + # This way we can reuse the existing display system + def coord_mapper(x, y): + # The transformed frame coordinates are already in the right space + # We just need to map them to rotated frame coordinates + # Since the transformed frame is what the user sees, we can use it directly + return (int(x), int(y)) + + success = self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper) if success: count = self.feature_tracker.get_feature_count(self.current_frame) self.show_feedback_message(f"Extracted {count} features from visible area")