Refactor feature extraction to utilize transformed frames in VideoEditor
This commit enhances the feature extraction process by ensuring that features are extracted from the transformed frame that users see, accounting for all transformations such as cropping, zooming, and rotation. It simplifies the coordinate mapping logic, allowing for accurate feature tracking without the need for manual coordinate adjustments. Feedback messages have been updated to reflect the extraction from the visible area, improving user experience.
This commit is contained in:
@@ -1244,15 +1244,12 @@ class VideoEditor:
|
|||||||
|
|
||||||
# Only extract if we don't already have features for this frame
|
# Only extract if we don't already have features for this frame
|
||||||
if self.current_frame not in self.feature_tracker.features:
|
if self.current_frame not in self.feature_tracker.features:
|
||||||
# Extract features from the cropped region of the original frame
|
# Extract features from the transformed frame (what user sees)
|
||||||
if self.crop_rect:
|
# This handles all transformations (crop, zoom, rotation) correctly
|
||||||
x, y, w, h = self.crop_rect
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
||||||
cropped_frame = self.current_display_frame[y:y+h, x:x+w]
|
if display_frame is not None:
|
||||||
if cropped_frame.size > 0:
|
# Extract features without coordinate mapping - store in transformed frame coordinates
|
||||||
self.feature_tracker.extract_features(cropped_frame, self.current_frame)
|
self.feature_tracker.extract_features(display_frame, self.current_frame)
|
||||||
else:
|
|
||||||
# No crop - extract from full frame
|
|
||||||
self.feature_tracker.extract_features(self.current_display_frame, self.current_frame)
|
|
||||||
|
|
||||||
def jump_to_previous_marker(self):
|
def jump_to_previous_marker(self):
|
||||||
"""Jump to the previous tracking marker (frame with tracking points)."""
|
"""Jump to the previous tracking marker (frame with tracking points)."""
|
||||||
@@ -1518,15 +1515,11 @@ class VideoEditor:
|
|||||||
if self.feature_tracker.tracking_enabled:
|
if self.feature_tracker.tracking_enabled:
|
||||||
feature_pos = self.feature_tracker.get_tracking_position(frame_number)
|
feature_pos = self.feature_tracker.get_tracking_position(frame_number)
|
||||||
if feature_pos:
|
if feature_pos:
|
||||||
# Features are in cropped coordinate space, need to map to original frame coordinates
|
# Features are stored in transformed frame coordinates
|
||||||
if self.crop_rect:
|
# We need to map them to rotated frame coordinates for the tracking system
|
||||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
# The transformed frame coordinates are already in the correct space for display
|
||||||
# Map from cropped coordinates to original frame coordinates
|
# But we need to map them back to the original frame space first
|
||||||
orig_x = feature_pos[0] + crop_x
|
orig_x, orig_y = self._map_transformed_to_original_coords(feature_pos[0], feature_pos[1])
|
||||||
orig_y = feature_pos[1] + crop_y
|
|
||||||
else:
|
|
||||||
# No crop - coordinates are already in original frame space
|
|
||||||
orig_x, orig_y = feature_pos[0], feature_pos[1]
|
|
||||||
|
|
||||||
# Transform to rotated frame coordinates
|
# Transform to rotated frame coordinates
|
||||||
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
||||||
@@ -2314,17 +2307,10 @@ class VideoEditor:
|
|||||||
self.current_frame in self.feature_tracker.features):
|
self.current_frame in self.feature_tracker.features):
|
||||||
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
||||||
for (fx, fy) in feature_positions:
|
for (fx, fy) in feature_positions:
|
||||||
# Features are in cropped coordinate space, need to map to original frame coordinates
|
# Features are stored in transformed frame coordinates
|
||||||
if self.crop_rect:
|
# We need to map them to screen coordinates for display
|
||||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
# The transformed frame coordinates need to be mapped to rotated frame coordinates first
|
||||||
# Map from cropped coordinates to original frame coordinates
|
orig_x, orig_y = self._map_transformed_to_original_coords(fx, fy)
|
||||||
orig_x = fx + crop_x
|
|
||||||
orig_y = fy + crop_y
|
|
||||||
else:
|
|
||||||
# No crop - coordinates are already in original frame space
|
|
||||||
orig_x, orig_y = fx, fy
|
|
||||||
|
|
||||||
# Convert from original frame coordinates to rotated frame coordinates
|
|
||||||
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
||||||
sx, sy = self._map_rotated_to_screen(rot_x, rot_y)
|
sx, sy = self._map_rotated_to_screen(rot_x, rot_y)
|
||||||
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
||||||
@@ -3577,28 +3563,19 @@ class VideoEditor:
|
|||||||
elif key == ord("T"):
|
elif key == ord("T"):
|
||||||
# Extract features from current frame (Shift+T)
|
# Extract features from current frame (Shift+T)
|
||||||
if not self.is_image_mode and self.current_display_frame is not None:
|
if not self.is_image_mode and self.current_display_frame is not None:
|
||||||
# Extract features from the cropped region of the original frame
|
# Extract features from the transformed frame (what user sees)
|
||||||
# This gives us features only from the visible area with correct coordinates
|
# This handles all transformations (crop, zoom, rotation) correctly
|
||||||
if self.crop_rect:
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
||||||
x, y, w, h = self.crop_rect
|
if display_frame is not None:
|
||||||
cropped_frame = self.current_display_frame[y:y+h, x:x+w]
|
# Extract features without coordinate mapping - store in transformed frame coordinates
|
||||||
if cropped_frame.size > 0:
|
success = self.feature_tracker.extract_features(display_frame, self.current_frame)
|
||||||
success = self.feature_tracker.extract_features(cropped_frame, self.current_frame)
|
|
||||||
if success:
|
if success:
|
||||||
count = self.feature_tracker.get_feature_count(self.current_frame)
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
||||||
self.show_feedback_message(f"Extracted {count} features from crop area")
|
self.show_feedback_message(f"Extracted {count} features from visible area")
|
||||||
else:
|
else:
|
||||||
self.show_feedback_message("Failed to extract features")
|
self.show_feedback_message("Failed to extract features")
|
||||||
else:
|
else:
|
||||||
self.show_feedback_message("Crop area too small")
|
self.show_feedback_message("No display frame available")
|
||||||
else:
|
|
||||||
# No crop - extract from full frame
|
|
||||||
success = self.feature_tracker.extract_features(self.current_display_frame, self.current_frame)
|
|
||||||
if success:
|
|
||||||
count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
||||||
self.show_feedback_message(f"Extracted {count} features from full frame")
|
|
||||||
else:
|
|
||||||
self.show_feedback_message("Failed to extract features")
|
|
||||||
self.save_state()
|
self.save_state()
|
||||||
else:
|
else:
|
||||||
self.show_feedback_message("No frame data available")
|
self.show_feedback_message("No frame data available")
|
||||||
|
Reference in New Issue
Block a user