Add optical flow tracking for feature tracking in VideoEditor

This commit introduces a new method for tracking features using Lucas-Kanade optical flow, enhancing the feature tracking capabilities. It includes logic to toggle optical flow tracking, store previous frames for flow calculations, and update feature positions based on optical flow results. Debug messages have been added to provide insights during the tracking process, improving user experience and functionality.
This commit is contained in:
2025-09-26 14:11:05 +02:00
parent 463228baf5
commit d8b4439382

View File

@@ -165,6 +165,39 @@ class FeatureTracker:
print(f"Error extracting features from frame {frame_number}: {e}") print(f"Error extracting features from frame {frame_number}: {e}")
return False return False
def track_features_optical_flow(self, prev_frame, curr_frame, prev_points):
"""Track features using Lucas-Kanade optical flow"""
try:
# Convert to grayscale if needed
if len(prev_frame.shape) == 3:
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
else:
prev_gray = prev_frame
if len(curr_frame.shape) == 3:
curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
else:
curr_gray = curr_frame
# Parameters for Lucas-Kanade optical flow
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Calculate optical flow
new_points, status, _ = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_points, None, **lk_params)
# Filter out bad tracks
good_new = new_points[status == 1]
good_old = prev_points[status == 1]
return good_new, good_old, status
except Exception as e:
print(f"Error in optical flow tracking: {e}")
return None, None, None
def get_tracking_position(self, frame_number: int) -> Optional[Tuple[float, float]]: def get_tracking_position(self, frame_number: int) -> Optional[Tuple[float, float]]:
@@ -834,6 +867,10 @@ class VideoEditor:
self.selective_feature_deletion_start = None self.selective_feature_deletion_start = None
self.selective_feature_deletion_rect = None self.selective_feature_deletion_rect = None
# Optical flow tracking
self.optical_flow_enabled = False
self.previous_frame_for_flow = None
# Project view mode # Project view mode
self.project_view_mode = False self.project_view_mode = False
self.project_view = None self.project_view = None
@@ -1336,6 +1373,19 @@ class VideoEditor:
else: else:
print(f"DEBUG: Frame {self.current_frame} already has features, skipping") print(f"DEBUG: Frame {self.current_frame} already has features, skipping")
# Optical flow tracking - track features from previous frame
if (not self.is_image_mode and
self.optical_flow_enabled and
self.feature_tracker.tracking_enabled and
self.previous_frame_for_flow is not None and
self.current_display_frame is not None):
self._track_with_optical_flow()
# Store current frame for next optical flow iteration
if not self.is_image_mode and self.current_display_frame is not None:
self.previous_frame_for_flow = self.current_display_frame.copy()
def jump_to_previous_marker(self): def jump_to_previous_marker(self):
"""Jump to the previous tracking marker (frame with tracking points).""" """Jump to the previous tracking marker (frame with tracking points)."""
if self.is_image_mode: if self.is_image_mode:
@@ -1786,6 +1836,84 @@ class VideoEditor:
else: else:
self.show_feedback_message("No features found in selected region") self.show_feedback_message("No features found in selected region")
def _track_with_optical_flow(self):
"""Track features using optical flow from previous frame"""
try:
# Get previous frame features
prev_frame_number = self.current_frame - 1
if prev_frame_number not in self.feature_tracker.features:
return
prev_features = self.feature_tracker.features[prev_frame_number]
prev_positions = np.array(prev_features['positions'], dtype=np.float32).reshape(-1, 1, 2)
if len(prev_positions) == 0:
return
# Apply transformations to get the display frames
prev_display_frame = self.apply_crop_zoom_and_rotation(self.previous_frame_for_flow)
curr_display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
if prev_display_frame is None or curr_display_frame is None:
return
# Map previous positions to display frame coordinates
display_prev_positions = []
for px, py in prev_positions.reshape(-1, 2):
# Map from rotated frame coordinates to screen coordinates
sx, sy = self._map_rotated_to_screen(px, py)
# Map from screen coordinates to display frame coordinates
frame_height, frame_width = prev_display_frame.shape[:2]
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
start_y = (available_height - frame_height) // 2
start_x = (self.window_width - frame_width) // 2
display_x = sx - start_x
display_y = sy - start_y
if 0 <= display_x < frame_width and 0 <= display_y < frame_height:
display_prev_positions.append([display_x, display_y])
if len(display_prev_positions) == 0:
return
display_prev_positions = np.array(display_prev_positions, dtype=np.float32).reshape(-1, 1, 2)
# Track using optical flow
new_points, good_old, status = self.feature_tracker.track_features_optical_flow(
prev_display_frame, curr_display_frame, display_prev_positions
)
if new_points is not None and len(new_points) > 0:
# Map new positions back to rotated frame coordinates
mapped_positions = []
for point in new_points.reshape(-1, 2):
# Map from display frame coordinates to screen coordinates
frame_height, frame_width = curr_display_frame.shape[:2]
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
start_y = (available_height - frame_height) // 2
start_x = (self.window_width - frame_width) // 2
screen_x = point[0] + start_x
screen_y = point[1] + start_y
# Map from screen coordinates to rotated frame coordinates
rx, ry = self._map_screen_to_rotated(screen_x, screen_y)
mapped_positions.append((int(rx), int(ry)))
# Store tracked features
self.feature_tracker.features[self.current_frame] = {
'keypoints': [], # Optical flow doesn't use keypoints
'descriptors': np.array([]), # Optical flow doesn't use descriptors
'positions': mapped_positions
}
print(f"Optical flow tracked {len(mapped_positions)} features to frame {self.current_frame}")
except Exception as e:
print(f"Error in optical flow tracking: {e}")
def apply_rotation(self, frame): def apply_rotation(self, frame):
"""Apply rotation to frame""" """Apply rotation to frame"""
@@ -2341,6 +2469,8 @@ class VideoEditor:
if self.feature_tracker.tracking_enabled and self.current_frame in self.feature_tracker.features: if self.feature_tracker.tracking_enabled and self.current_frame in self.feature_tracker.features:
feature_count = self.feature_tracker.get_feature_count(self.current_frame) feature_count = self.feature_tracker.get_feature_count(self.current_frame)
feature_text = f" | Features: {feature_count} pts" feature_text = f" | Features: {feature_count} pts"
if self.optical_flow_enabled:
feature_text += " (OPTICAL FLOW)"
autorepeat_text = ( autorepeat_text = (
f" | Loop: ON" if self.looping_between_markers else "" f" | Loop: ON" if self.looping_between_markers else ""
) )
@@ -3490,6 +3620,7 @@ class VideoEditor:
print(" g: Toggle auto feature extraction") print(" g: Toggle auto feature extraction")
print(" G: Clear all feature data") print(" G: Clear all feature data")
print(" H: Switch detector (SIFT/ORB)") print(" H: Switch detector (SIFT/ORB)")
print(" o: Toggle optical flow tracking")
print(" Shift+Right-click+drag: Extract features from selected region") print(" Shift+Right-click+drag: Extract features from selected region")
print(" Ctrl+Right-click+drag: Delete features from selected region") print(" Ctrl+Right-click+drag: Delete features from selected region")
if len(self.video_files) > 1: if len(self.video_files) > 1:
@@ -3816,6 +3947,12 @@ class VideoEditor:
self.feature_tracker.set_detector_type(new_type) self.feature_tracker.set_detector_type(new_type)
self.show_feedback_message(f"Detector switched to {new_type}") self.show_feedback_message(f"Detector switched to {new_type}")
self.save_state() self.save_state()
elif key == ord("o"):
# Toggle optical flow tracking
self.optical_flow_enabled = not self.optical_flow_enabled
print(f"DEBUG: Optical flow toggled to {self.optical_flow_enabled}")
self.show_feedback_message(f"Optical flow {'ON' if self.optical_flow_enabled else 'OFF'}")
self.save_state()
elif key == ord("t"): elif key == ord("t"):
# Marker looping only for videos # Marker looping only for videos
if not self.is_image_mode: if not self.is_image_mode: