diff --git a/croppa/main.py b/croppa/main.py index 8457d51..762bbfc 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1244,15 +1244,12 @@ class VideoEditor: # Only extract if we don't already have features for this frame if self.current_frame not in self.feature_tracker.features: - # Extract features from the cropped region of the original frame - if self.crop_rect: - x, y, w, h = self.crop_rect - cropped_frame = self.current_display_frame[y:y+h, x:x+w] - if cropped_frame.size > 0: - self.feature_tracker.extract_features(cropped_frame, self.current_frame) - else: - # No crop - extract from full frame - self.feature_tracker.extract_features(self.current_display_frame, self.current_frame) + # Extract features from the transformed frame (what user sees) + # This handles all transformations (crop, zoom, rotation) correctly + display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) + if display_frame is not None: + # Extract features without coordinate mapping - store in transformed frame coordinates + self.feature_tracker.extract_features(display_frame, self.current_frame) def jump_to_previous_marker(self): """Jump to the previous tracking marker (frame with tracking points).""" @@ -1518,15 +1515,11 @@ class VideoEditor: if self.feature_tracker.tracking_enabled: feature_pos = self.feature_tracker.get_tracking_position(frame_number) if feature_pos: - # Features are in cropped coordinate space, need to map to original frame coordinates - if self.crop_rect: - crop_x, crop_y, crop_w, crop_h = self.crop_rect - # Map from cropped coordinates to original frame coordinates - orig_x = feature_pos[0] + crop_x - orig_y = feature_pos[1] + crop_y - else: - # No crop - coordinates are already in original frame space - orig_x, orig_y = feature_pos[0], feature_pos[1] + # Features are stored in transformed frame coordinates + # We need to map them to rotated frame coordinates for the tracking system + # The transformed frame coordinates are already in the correct space for display + # But we need to map them back to the original frame space first + orig_x, orig_y = self._map_transformed_to_original_coords(feature_pos[0], feature_pos[1]) # Transform to rotated frame coordinates rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) @@ -2314,17 +2307,10 @@ class VideoEditor: self.current_frame in self.feature_tracker.features): feature_positions = self.feature_tracker.features[self.current_frame]['positions'] for (fx, fy) in feature_positions: - # Features are in cropped coordinate space, need to map to original frame coordinates - if self.crop_rect: - crop_x, crop_y, crop_w, crop_h = self.crop_rect - # Map from cropped coordinates to original frame coordinates - orig_x = fx + crop_x - orig_y = fy + crop_y - else: - # No crop - coordinates are already in original frame space - orig_x, orig_y = fx, fy - - # Convert from original frame coordinates to rotated frame coordinates + # Features are stored in transformed frame coordinates + # We need to map them to screen coordinates for display + # The transformed frame coordinates need to be mapped to rotated frame coordinates first + orig_x, orig_y = self._map_transformed_to_original_coords(fx, fy) rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) sx, sy = self._map_rotated_to_screen(rot_x, rot_y) cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features @@ -3577,28 +3563,19 @@ class VideoEditor: elif key == ord("T"): # Extract features from current frame (Shift+T) if not self.is_image_mode and self.current_display_frame is not None: - # Extract features from the cropped region of the original frame - # This gives us features only from the visible area with correct coordinates - if self.crop_rect: - x, y, w, h = self.crop_rect - cropped_frame = self.current_display_frame[y:y+h, x:x+w] - if cropped_frame.size > 0: - success = self.feature_tracker.extract_features(cropped_frame, self.current_frame) - if success: - count = self.feature_tracker.get_feature_count(self.current_frame) - self.show_feedback_message(f"Extracted {count} features from crop area") - else: - self.show_feedback_message("Failed to extract features") - else: - self.show_feedback_message("Crop area too small") - else: - # No crop - extract from full frame - success = self.feature_tracker.extract_features(self.current_display_frame, self.current_frame) + # Extract features from the transformed frame (what user sees) + # This handles all transformations (crop, zoom, rotation) correctly + display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) + if display_frame is not None: + # Extract features without coordinate mapping - store in transformed frame coordinates + success = self.feature_tracker.extract_features(display_frame, self.current_frame) if success: count = self.feature_tracker.get_feature_count(self.current_frame) - self.show_feedback_message(f"Extracted {count} features from full frame") + self.show_feedback_message(f"Extracted {count} features from visible area") else: self.show_feedback_message("Failed to extract features") + else: + self.show_feedback_message("No display frame available") self.save_state() else: self.show_feedback_message("No frame data available")