Refactor feature extraction to support cropping in VideoEditor
This commit updates the feature extraction process to handle cropped regions of the original frame, allowing for more accurate feature tracking based on the visible area. It introduces logic to extract features from both cropped and full frames, ensuring that coordinates are correctly mapped back to the original frame space. Feedback messages have been enhanced to inform users about the success or failure of feature extraction, improving the overall user experience.
This commit is contained in:
@@ -1244,11 +1244,15 @@ class VideoEditor:
|
|||||||
|
|
||||||
# Only extract if we don't already have features for this frame
|
# Only extract if we don't already have features for this frame
|
||||||
if self.current_frame not in self.feature_tracker.features:
|
if self.current_frame not in self.feature_tracker.features:
|
||||||
# Extract features from the transformed frame (what user sees after crop/zoom/rotation)
|
# Extract features from the cropped region of the original frame
|
||||||
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
if self.crop_rect:
|
||||||
if display_frame is not None:
|
x, y, w, h = self.crop_rect
|
||||||
# Pass coordinate mapper to map features back to original frame space
|
cropped_frame = self.current_display_frame[y:y+h, x:x+w]
|
||||||
self.feature_tracker.extract_features(display_frame, self.current_frame, self._map_transformed_to_original_coords)
|
if cropped_frame.size > 0:
|
||||||
|
self.feature_tracker.extract_features(cropped_frame, self.current_frame)
|
||||||
|
else:
|
||||||
|
# No crop - extract from full frame
|
||||||
|
self.feature_tracker.extract_features(self.current_display_frame, self.current_frame)
|
||||||
|
|
||||||
def jump_to_previous_marker(self):
|
def jump_to_previous_marker(self):
|
||||||
"""Jump to the previous tracking marker (frame with tracking points)."""
|
"""Jump to the previous tracking marker (frame with tracking points)."""
|
||||||
@@ -1493,7 +1497,7 @@ class VideoEditor:
|
|||||||
|
|
||||||
def _map_original_to_rotated_coords(self, x, y):
|
def _map_original_to_rotated_coords(self, x, y):
|
||||||
"""Map coordinates from original frame to rotated frame coordinates."""
|
"""Map coordinates from original frame to rotated frame coordinates."""
|
||||||
# First apply rotation
|
# Apply rotation (same as existing system)
|
||||||
if self.rotation_angle == 90:
|
if self.rotation_angle == 90:
|
||||||
# 90° clockwise
|
# 90° clockwise
|
||||||
rot_x, rot_y = self.frame_height - y, x
|
rot_x, rot_y = self.frame_height - y, x
|
||||||
@@ -1514,8 +1518,18 @@ class VideoEditor:
|
|||||||
if self.feature_tracker.tracking_enabled:
|
if self.feature_tracker.tracking_enabled:
|
||||||
feature_pos = self.feature_tracker.get_tracking_position(frame_number)
|
feature_pos = self.feature_tracker.get_tracking_position(frame_number)
|
||||||
if feature_pos:
|
if feature_pos:
|
||||||
# Features are stored in original frame coordinates, transform to rotated frame coordinates
|
# Features are in cropped coordinate space, need to map to original frame coordinates
|
||||||
rot_x, rot_y = self._map_original_to_rotated_coords(feature_pos[0], feature_pos[1])
|
if self.crop_rect:
|
||||||
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||||
|
# Map from cropped coordinates to original frame coordinates
|
||||||
|
orig_x = feature_pos[0] + crop_x
|
||||||
|
orig_y = feature_pos[1] + crop_y
|
||||||
|
else:
|
||||||
|
# No crop - coordinates are already in original frame space
|
||||||
|
orig_x, orig_y = feature_pos[0], feature_pos[1]
|
||||||
|
|
||||||
|
# Transform to rotated frame coordinates
|
||||||
|
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
||||||
return (rot_x, rot_y)
|
return (rot_x, rot_y)
|
||||||
|
|
||||||
# Fall back to manual tracking points
|
# Fall back to manual tracking points
|
||||||
@@ -2300,7 +2314,19 @@ class VideoEditor:
|
|||||||
self.current_frame in self.feature_tracker.features):
|
self.current_frame in self.feature_tracker.features):
|
||||||
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
||||||
for (fx, fy) in feature_positions:
|
for (fx, fy) in feature_positions:
|
||||||
sx, sy = self._map_rotated_to_screen(fx, fy)
|
# Features are in cropped coordinate space, need to map to original frame coordinates
|
||||||
|
if self.crop_rect:
|
||||||
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||||
|
# Map from cropped coordinates to original frame coordinates
|
||||||
|
orig_x = fx + crop_x
|
||||||
|
orig_y = fy + crop_y
|
||||||
|
else:
|
||||||
|
# No crop - coordinates are already in original frame space
|
||||||
|
orig_x, orig_y = fx, fy
|
||||||
|
|
||||||
|
# Convert from original frame coordinates to rotated frame coordinates
|
||||||
|
rot_x, rot_y = self._map_original_to_rotated_coords(orig_x, orig_y)
|
||||||
|
sx, sy = self._map_rotated_to_screen(rot_x, rot_y)
|
||||||
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
||||||
cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1)
|
cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1)
|
||||||
|
|
||||||
@@ -3551,19 +3577,29 @@ class VideoEditor:
|
|||||||
elif key == ord("T"):
|
elif key == ord("T"):
|
||||||
# Extract features from current frame (Shift+T)
|
# Extract features from current frame (Shift+T)
|
||||||
if not self.is_image_mode and self.current_display_frame is not None:
|
if not self.is_image_mode and self.current_display_frame is not None:
|
||||||
# Extract features from the transformed frame (what user sees after crop/zoom/rotation)
|
# Extract features from the cropped region of the original frame
|
||||||
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
# This gives us features only from the visible area with correct coordinates
|
||||||
if display_frame is not None:
|
if self.crop_rect:
|
||||||
# Pass coordinate mapper to map features back to original frame space
|
x, y, w, h = self.crop_rect
|
||||||
success = self.feature_tracker.extract_features(display_frame, self.current_frame, self._map_transformed_to_original_coords)
|
cropped_frame = self.current_display_frame[y:y+h, x:x+w]
|
||||||
|
if cropped_frame.size > 0:
|
||||||
|
success = self.feature_tracker.extract_features(cropped_frame, self.current_frame)
|
||||||
if success:
|
if success:
|
||||||
count = self.feature_tracker.get_feature_count(self.current_frame)
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
||||||
self.show_feedback_message(f"Extracted {count} features from transformed frame")
|
self.show_feedback_message(f"Extracted {count} features from crop area")
|
||||||
|
else:
|
||||||
|
self.show_feedback_message("Failed to extract features")
|
||||||
|
else:
|
||||||
|
self.show_feedback_message("Crop area too small")
|
||||||
|
else:
|
||||||
|
# No crop - extract from full frame
|
||||||
|
success = self.feature_tracker.extract_features(self.current_display_frame, self.current_frame)
|
||||||
|
if success:
|
||||||
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
||||||
|
self.show_feedback_message(f"Extracted {count} features from full frame")
|
||||||
else:
|
else:
|
||||||
self.show_feedback_message("Failed to extract features")
|
self.show_feedback_message("Failed to extract features")
|
||||||
self.save_state()
|
self.save_state()
|
||||||
else:
|
|
||||||
self.show_feedback_message("No display frame available")
|
|
||||||
else:
|
else:
|
||||||
self.show_feedback_message("No frame data available")
|
self.show_feedback_message("No frame data available")
|
||||||
elif key == ord("g"):
|
elif key == ord("g"):
|
||||||
|
Reference in New Issue
Block a user