diff --git a/croppa/main.py b/croppa/main.py index e5272e5..8457d51 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1244,11 +1244,15 @@ class VideoEditor: # Only extract if we don't already have features for this frame if self.current_frame not in self.feature_tracker.features: - # Extract features from the transformed frame (what user sees after crop/zoom/rotation) - display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) - if display_frame is not None: - # Pass coordinate mapper to map features back to original frame space - self.feature_tracker.extract_features(display_frame, self.current_frame, self._map_transformed_to_original_coords) + # Extract features from the cropped region of the original frame + if self.crop_rect: + x, y, w, h = self.crop_rect + cropped_frame = self.current_display_frame[y:y+h, x:x+w] + if cropped_frame.size > 0: + self.feature_tracker.extract_features(cropped_frame, self.current_frame) + else: + # No crop - extract from full frame + self.feature_tracker.extract_features(self.current_display_frame, self.current_frame) def jump_to_previous_marker(self): """Jump to the previous tracking marker (frame with tracking points).""" @@ -1493,7 +1497,7 @@ class VideoEditor: def _map_original_to_rotated_coords(self, x, y): """Map coordinates from original frame to rotated frame coordinates.""" - # First apply rotation + # Apply rotation (same as existing system) if self.rotation_angle == 90: # 90° clockwise rot_x, rot_y = self.frame_height - y, x @@ -1514,8 +1518,18 @@ class VideoEditor: if self.feature_tracker.tracking_enabled: feature_pos = self.feature_tracker.get_tracking_position(frame_number) if feature_pos: - # Features are stored in original frame coordinates, transform to rotated frame coordinates - rot_x, rot_y = self._map_original_to_rotated_coords(feature_pos[0], feature_pos[1]) + # Features are in cropped coordinate space, need to map to original frame coordinates + if self.crop_rect: + crop_x, crop_y, crop_w, crop_h = self.crop_rect + # Map from cropped coordinates to original frame coordinates + orig_x = feature_pos[0] + crop_x + orig_y = feature_pos[1] + crop_y + else: + # No crop - coordinates are already in original frame space + orig_x, orig_y = feature_pos[0], feature_pos[1] + + # Transform to rotated frame coordinates + rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) return (rot_x, rot_y) # Fall back to manual tracking points @@ -2300,7 +2314,19 @@ class VideoEditor: self.current_frame in self.feature_tracker.features): feature_positions = self.feature_tracker.features[self.current_frame]['positions'] for (fx, fy) in feature_positions: - sx, sy = self._map_rotated_to_screen(fx, fy) + # Features are in cropped coordinate space, need to map to original frame coordinates + if self.crop_rect: + crop_x, crop_y, crop_w, crop_h = self.crop_rect + # Map from cropped coordinates to original frame coordinates + orig_x = fx + crop_x + orig_y = fy + crop_y + else: + # No crop - coordinates are already in original frame space + orig_x, orig_y = fx, fy + + # Convert from original frame coordinates to rotated frame coordinates + rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y) + sx, sy = self._map_rotated_to_screen(rot_x, rot_y) cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1) @@ -3551,19 +3577,29 @@ class VideoEditor: elif key == ord("T"): # Extract features from current frame (Shift+T) if not self.is_image_mode and self.current_display_frame is not None: - # Extract features from the transformed frame (what user sees after crop/zoom/rotation) - display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame) - if display_frame is not None: - # Pass coordinate mapper to map features back to original frame space - success = self.feature_tracker.extract_features(display_frame, self.current_frame, self._map_transformed_to_original_coords) + # Extract features from the cropped region of the original frame + # This gives us features only from the visible area with correct coordinates + if self.crop_rect: + x, y, w, h = self.crop_rect + cropped_frame = self.current_display_frame[y:y+h, x:x+w] + if cropped_frame.size > 0: + success = self.feature_tracker.extract_features(cropped_frame, self.current_frame) + if success: + count = self.feature_tracker.get_feature_count(self.current_frame) + self.show_feedback_message(f"Extracted {count} features from crop area") + else: + self.show_feedback_message("Failed to extract features") + else: + self.show_feedback_message("Crop area too small") + else: + # No crop - extract from full frame + success = self.feature_tracker.extract_features(self.current_display_frame, self.current_frame) if success: count = self.feature_tracker.get_feature_count(self.current_frame) - self.show_feedback_message(f"Extracted {count} features from transformed frame") + self.show_feedback_message(f"Extracted {count} features from full frame") else: self.show_feedback_message("Failed to extract features") - self.save_state() - else: - self.show_feedback_message("No display frame available") + self.save_state() else: self.show_feedback_message("No frame data available") elif key == ord("g"):