diff --git a/croppa/main.py b/croppa/main.py index 75154e1..7026ccd 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1613,21 +1613,25 @@ class VideoEditor: return def _get_previous_tracking_point(self): - """Get the tracking point from the previous frame that has tracking points.""" + """Get the tracking point from the previous frame that has tracking points (like jump_to_previous_marker).""" if self.is_image_mode or not self.tracking_points: return None - + tracking_frames = sorted(k for k, v in self.tracking_points.items() if v and 0 <= k < self.total_frames) if not tracking_frames: return None - - # Find the last frame with tracking points that's before current frame - prev_frames = [f for f in tracking_frames if f < self.current_frame] - if not prev_frames: - return None - - prev_frame = max(prev_frames) - return prev_frame, self.tracking_points[prev_frame] + + current = self.current_frame + candidates = [f for f in tracking_frames if f < current] + + if candidates: + # Use the most recent frame before current (like jump_to_previous_marker) + prev_frame = candidates[-1] + return prev_frame, self.tracking_points[prev_frame] + else: + # If no previous frames, use the first frame with tracking points + prev_frame = tracking_frames[0] + return prev_frame, self.tracking_points[prev_frame] def _get_next_tracking_point(self): """Get the tracking point from the next frame that has tracking points.""" @@ -3778,20 +3782,20 @@ class VideoEditor: self.selective_feature_extraction_start = (x, y) self.selective_feature_extraction_rect = None print(f"DEBUG: Started selective feature extraction at ({x}, {y})") - + # Handle Shift+Right-click+drag for selective feature extraction if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start: if not self.is_image_mode: start_x, start_y = self.selective_feature_extraction_start self.selective_feature_extraction_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y)) - + # Handle Shift+Right-click release for selective feature extraction if event == cv2.EVENT_RBUTTONUP and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start: if not self.is_image_mode and self.selective_feature_extraction_rect: self._extract_features_from_region(self.selective_feature_extraction_rect) self.selective_feature_extraction_start = None self.selective_feature_extraction_rect = None - + # Handle Ctrl+Right-click+drag for selective feature deletion if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY): if not self.is_image_mode and self.feature_tracker.tracking_enabled: @@ -3846,6 +3850,7 @@ class VideoEditor: self.toggle_interesting_region_selection() self.display_needs_update = True + # Handle right-click for tracking points (no modifiers) if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)): if not self.is_image_mode: @@ -4967,6 +4972,14 @@ class VideoEditor: self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 1.0) print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%") self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}%") + elif key == ord(")"): # Shift+9 - Decrease frame difference threshold by 10 percentage points + self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 10.0) + print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (-10pp)") + self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (-10pp)") + elif key == ord("="): # Shift+0 - Increase frame difference threshold by 10 percentage points + self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 10.0) + print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (+10pp)") + self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (+10pp)") elif key == ord("7"): # 7 - Decrease frame difference gap self.frame_difference_gap = max(1, self.frame_difference_gap - 1) print(f"Frame difference gap: {self.frame_difference_gap} frames") @@ -4975,6 +4988,14 @@ class VideoEditor: self.frame_difference_gap = self.frame_difference_gap + 1 print(f"Frame difference gap: {self.frame_difference_gap} frames") self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames") + elif key == ord("/"): # Shift+7 - Decrease frame difference gap by 60 frames + self.frame_difference_gap = max(1, self.frame_difference_gap - 60) + print(f"Frame difference gap: {self.frame_difference_gap} frames (-60)") + self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (-60)") + elif key == ord("("): # Shift+8 - Increase frame difference gap by 60 frames + self.frame_difference_gap = self.frame_difference_gap + 60 + print(f"Frame difference gap: {self.frame_difference_gap} frames (+60)") + self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (+60)") elif key == ord("N"): if len(self.video_files) > 1: self.previous_video() @@ -5090,7 +5111,19 @@ class VideoEditor: self.feature_tracker.clear_features() self.show_feedback_message("Feature tracking data cleared") self.save_state() - elif key == ord("H"): + elif key == ord("Z"): + # Switch detector type (SIFT -> ORB -> SIFT) - SURF not available + current_type = self.feature_tracker.detector_type + if current_type == 'SIFT': + new_type = 'ORB' + elif current_type == 'ORB': + new_type = 'SIFT' + else: + new_type = 'SIFT' + self.feature_tracker.set_detector_type(new_type) + self.show_feedback_message(f"Detector switched to {new_type}") + self.save_state() + elif key == ord("z"): # Switch detector type (SIFT -> ORB -> SIFT) - SURF not available current_type = self.feature_tracker.detector_type if current_type == 'SIFT':