Compare commits

...

7 Commits

View File

@@ -871,6 +871,22 @@ class VideoEditor:
# Template matching modes # Template matching modes
self.template_matching_full_frame = False # Toggle for full frame vs cropped template matching self.template_matching_full_frame = False # Toggle for full frame vs cropped template matching
# Frame difference for interesting point detection
self.frame_difference_threshold = 10.0 # Percentage threshold for frame difference (10% default)
self.frame_difference_gap = 10 # Number of frames between comparisons (default 10)
# Region selection for interesting point detection
self.interesting_region = None # (x, y, width, height) or None for full frame
self.selecting_interesting_region = False
self.region_selection_start = None
self.region_selection_current = None
# Search state for interesting point detection
self.searching_interesting_point = False
self.search_progress_text = ""
self.search_progress_percent = 0.0
self.search_state = None # For non-blocking search state
# Project view mode # Project view mode
self.project_view_mode = False self.project_view_mode = False
self.project_view = None self.project_view = None
@@ -884,10 +900,8 @@ class VideoEditor:
def _get_state_file_path(self) -> Path: def _get_state_file_path(self) -> Path:
"""Get the state file path for the current media file""" """Get the state file path for the current media file"""
if not hasattr(self, 'video_path') or not self.video_path: if not hasattr(self, 'video_path') or not self.video_path:
print("DEBUG: No video_path available for state file")
return None return None
state_path = self.video_path.with_suffix('.json') state_path = self.video_path.with_suffix('.json')
print(f"DEBUG: State file path would be: {state_path}")
return state_path return state_path
def save_state(self): def save_state(self):
@@ -918,6 +932,9 @@ class VideoEditor:
'tracking_points': {str(k): v for k, v in self.tracking_points.items()}, 'tracking_points': {str(k): v for k, v in self.tracking_points.items()},
'feature_tracker': self.feature_tracker.get_state_dict(), 'feature_tracker': self.feature_tracker.get_state_dict(),
'template_matching_full_frame': self.template_matching_full_frame, 'template_matching_full_frame': self.template_matching_full_frame,
'frame_difference_threshold': self.frame_difference_threshold,
'frame_difference_gap': self.frame_difference_gap,
'interesting_region': self.interesting_region,
'templates': [{ 'templates': [{
'start_frame': start_frame, 'start_frame': start_frame,
'region': region 'region': region
@@ -1013,6 +1030,24 @@ class VideoEditor:
if 'template_matching_full_frame' in state: if 'template_matching_full_frame' in state:
self.template_matching_full_frame = state['template_matching_full_frame'] self.template_matching_full_frame = state['template_matching_full_frame']
# Load frame difference threshold
if 'frame_difference_threshold' in state:
self.frame_difference_threshold = state['frame_difference_threshold']
print(f"Loaded frame difference threshold: {self.frame_difference_threshold:.1f}%")
# Load frame difference gap
if 'frame_difference_gap' in state:
self.frame_difference_gap = state['frame_difference_gap']
print(f"Loaded frame difference gap: {self.frame_difference_gap} frames")
# Load interesting region
if 'interesting_region' in state and state['interesting_region'] is not None:
self.interesting_region = tuple(state['interesting_region'])
x, y, w, h = self.interesting_region
print(f"Loaded interesting region: ({x}, {y}, {w}, {h})")
else:
self.interesting_region = None
# Load simple templates state # Load simple templates state
if 'templates' in state: if 'templates' in state:
self.templates = [] self.templates = []
@@ -1213,7 +1248,7 @@ class VideoEditor:
# Get backend information # Get backend information
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other" backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})") print(f"Loaded video: {self.current_video_index + 1}/{len(self.video_files)}")
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}") print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s") print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
@@ -1274,7 +1309,7 @@ class VideoEditor:
"""Calculate frame delay in milliseconds based on playback speed""" """Calculate frame delay in milliseconds based on playback speed"""
# Round to 2 decimals to handle floating point precision issues # Round to 2 decimals to handle floating point precision issues
speed = round(self.playback_speed, 2) speed = round(self.playback_speed, 2)
print(f"Playback speed: {speed}") # print(f"Playback speed: {speed}")
if speed >= 1.0: if speed >= 1.0:
# Speed >= 1: maximum FPS (no delay) # Speed >= 1: maximum FPS (no delay)
return 1 return 1
@@ -1465,8 +1500,120 @@ class VideoEditor:
print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}") print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}")
self.seek_to_frame(target) self.seek_to_frame(target)
def continue_interesting_point_search(self):
"""Continue non-blocking search for interesting point - called from main loop"""
if not self.search_state or self.search_state.get('completed', False):
return
try:
# Process a small number of steps per call
steps_per_call = 3
update_interval = 5
for _ in range(steps_per_call):
if self.search_state['target_frame'] >= self.total_frames:
# End of video reached
self.search_state['completed'] = True
print("Reached end of video")
break
# Read comparison frame
comparison_frame_num = min(self.search_state['target_frame'] + self.frame_difference_gap, self.total_frames - 1)
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, comparison_frame_num)
ret, comparison_frame = self.cap.cap.read()
if not ret:
break
self.search_state['frames_checked'] += 1
# Calculate difference using full resolution
diff_percentage = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
# Update OSD
if self.search_state['frames_checked'] % update_interval == 0 or diff_percentage >= self.frame_difference_threshold:
progress_percent = (self.search_state['frames_checked'] / max(1, (self.total_frames - self.search_state['current_frame_backup']) // self.frame_difference_gap)) * 100
self.search_progress_percent = progress_percent
self.search_progress_text = f"Gap search: {self.search_state['target_frame']}{comparison_frame_num} ({diff_percentage:.1f}% change, gap: {self.frame_difference_gap}) - Press ; to cancel"
# Update display frame
self.current_frame = comparison_frame_num
self.current_display_frame = comparison_frame
self.display_needs_update = True
# Check if found interesting point
if diff_percentage >= self.frame_difference_threshold:
full_diff = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
print(f"Found interesting point between frames {self.search_state['target_frame']} and {comparison_frame_num} ({full_diff:.1f}% change)")
self.show_feedback_message(f"Interesting: {full_diff:.1f}% change over {self.frame_difference_gap} frames")
self.current_frame = comparison_frame_num
self.current_display_frame = comparison_frame
# Clean up search state
self.search_state = None
self.searching_interesting_point = False
self.search_progress_text = ""
self.display_needs_update = True
return
# Move to next comparison
self.search_state['target_frame'] += self.frame_difference_gap
self.search_state['base_frame'] = comparison_frame.copy() if comparison_frame is not None else None
except Exception as e:
print(f"Error during search: {e}")
self.search_state['completed'] = True
def go_to_next_interesting_point(self):
"""Go to the next frame where the difference from the previous frame exceeds the threshold"""
if self.is_image_mode:
return
self.stop_auto_repeat_seek()
if self.current_frame >= self.total_frames - 1:
print("Already at last frame")
return
# Store current frame for comparison
current_frame_backup = self.current_frame
current_display_frame = self.current_display_frame.copy() if self.current_display_frame is not None else None
print(f"Searching for next interesting point from frame {current_frame_backup + 1} with threshold {self.frame_difference_threshold:.1f}% (gap: {self.frame_difference_gap} frames)")
# Initialize search state for main loop processing instead of blocking
self.search_state = {
'current_frame_backup': current_frame_backup,
'target_frame': current_frame_backup + 1,
'frames_checked': 0,
'base_frame': None,
'base_frame_num': None,
'search_cancelled': False,
'update_interval': 10
}
# Enable search mode for OSD display
self.searching_interesting_point = True
self.search_progress_text = f"Starting search from frame {current_frame_backup + 1} (threshold: {self.frame_difference_threshold:.1f}%, gap: {self.frame_difference_gap} frames) - Press ; to cancel"
self.search_progress_percent = 0
self.display_needs_update = True
# Read the first frame to start comparisons
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame_backup)
ret, base_frame = self.cap.cap.read()
if not ret:
self.search_state['search_cancelled'] = True
print("Could not read base frame")
return
self.search_state['base_frame'] = base_frame
self.search_state['base_frame_num'] = current_frame_backup
# Let main loop handle the search - don't block here
return
def _get_previous_tracking_point(self): def _get_previous_tracking_point(self):
"""Get the tracking point from the previous frame that has tracking points.""" """Get the tracking point from the previous frame that has tracking points (like jump_to_previous_marker)."""
if self.is_image_mode or not self.tracking_points: if self.is_image_mode or not self.tracking_points:
return None return None
@@ -1474,13 +1621,17 @@ class VideoEditor:
if not tracking_frames: if not tracking_frames:
return None return None
# Find the last frame with tracking points that's before current frame current = self.current_frame
prev_frames = [f for f in tracking_frames if f < self.current_frame] candidates = [f for f in tracking_frames if f < current]
if not prev_frames:
return None
prev_frame = max(prev_frames) if candidates:
return prev_frame, self.tracking_points[prev_frame] # Use the most recent frame before current (like jump_to_previous_marker)
prev_frame = candidates[-1]
return prev_frame, self.tracking_points[prev_frame]
else:
# If no previous frames, use the first frame with tracking points
prev_frame = tracking_frames[0]
return prev_frame, self.tracking_points[prev_frame]
def _get_next_tracking_point(self): def _get_next_tracking_point(self):
"""Get the tracking point from the next frame that has tracking points.""" """Get the tracking point from the next frame that has tracking points."""
@@ -1613,6 +1764,71 @@ class VideoEditor:
return processed_frame return processed_frame
def calculate_frame_difference(self, frame1, frame2) -> float:
"""Calculate percentage difference between two frames, optionally within a region"""
if frame1 is None or frame2 is None:
return 0.0
try:
# Ensure frames are the same size
if frame1.shape != frame2.shape:
# Resize frame2 to match frame1
frame2 = cv2.resize(frame2, (frame1.shape[1], frame1.shape[0]))
# Apply region selection if set
if self.interesting_region is not None:
x, y, w, h = self.interesting_region
# Ensure region is within frame bounds
x = max(0, min(x, frame1.shape[1] - 1))
y = max(0, min(y, frame1.shape[0] - 1))
w = min(w, frame1.shape[1] - x)
h = min(h, frame1.shape[0] - y)
if w <= 0 or h <= 0:
return 0.0
frame1_region = frame1[y:y+h, x:x+w]
frame2_region = frame2[y:y+h, x:x+w]
else:
# Use full frames
frame1_region = frame1
frame2_region = frame2
# Convert to grayscale for difference calculation
if len(frame1_region.shape) == 3:
gray1 = cv2.cvtColor(frame1_region, cv2.COLOR_BGR2GRAY)
else:
gray1 = frame1_region
if len(frame2_region.shape) == 3:
gray2 = cv2.cvtColor(frame2_region, cv2.COLOR_BGR2GRAY)
else:
gray2 = frame2_region
# Calculate absolute difference
diff = cv2.absdiff(gray1, gray2)
# Calculate percentage of pixels that changed significantly
# Use threshold to ignore minor noise
_, thresh_diff = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)
# Count changed pixels
changed_pixels = cv2.countNonZero(thresh_diff)
total_pixels = gray1.size
if total_pixels == 0:
return 0.0
# Calculate percentage
difference_percentage = (changed_pixels / total_pixels) * 100.0
return difference_percentage
except Exception as e:
print(f"Error calculating frame difference: {e}")
return 0.0
# --- Motion tracking helpers --- # --- Motion tracking helpers ---
def _get_effective_crop_rect_for_frame(self, frame_number): def _get_effective_crop_rect_for_frame(self, frame_number):
"""Return EFFECTIVE crop_rect in ROTATED frame coords for this frame (applies tracking follow).""" """Return EFFECTIVE crop_rect in ROTATED frame coords for this frame (applies tracking follow)."""
@@ -1640,6 +1856,93 @@ class VideoEditor:
return (x, y, w, h) return (x, y, w, h)
def toggle_interesting_region_selection(self):
"""Toggle region selection mode for interesting point detection"""
# If a region is already defined and we're not currently selecting, clear the region
if self.interesting_region is not None and not self.selecting_interesting_region:
self.interesting_region = None
print("Interesting point region cleared")
self.show_feedback_message("Region cleared")
self.display_needs_update = True
return
if self.selecting_interesting_region:
# Finish region selection
self.selecting_interesting_region = False
if (self.region_selection_start is not None and
self.region_selection_current is not None):
# Calculate region rectangle
x1, y1 = self.region_selection_start
x2, y2 = self.region_selection_current
x = min(x1, x2)
y = min(y1, y2)
w = abs(x2 - x1)
h = abs(y2 - y1)
if w > 5 and h > 5: # Minimum size threshold
# Get raw frame dimensions for direct coordinate mapping
frame_height, frame_width = self.current_display_frame.shape[:2]
# Calculate display scaling (how much the frame is scaled to fit on screen)
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
scale_x = frame_width / self.window_width # This is wrong - need to calculate actual display scale
# Let's use a simpler approach - just proportionally map screen coords to frame coords
# This assumes the frame is centered and scaled to fit
display_scale = min(self.window_width / frame_width, available_height / frame_height)
if display_scale > 1:
display_scale = 1 # Frame is smaller than window, no scaling
# Calculate displayed dimensions
display_width = int(frame_width * display_scale)
display_height = int(frame_height * display_scale)
# Calculate offset (frame is centered on screen)
offset_x = (self.window_width - display_width) // 2
offset_y = (available_height - display_height) // 2
# Map screen coordinates to frame coordinates
# Adjust for the offset and scale
frame_x = int((x - offset_x) / display_scale)
frame_y = int((y - offset_y) / display_scale)
frame_x2 = int((x + w - offset_x) / display_scale)
frame_y2 = int((y + h - offset_y) / display_scale)
frame_w = frame_x2 - frame_x
frame_h = frame_y2 - frame_y
# Ensure coordinates are within frame bounds
frame_x = max(0, min(frame_x, frame_width - 1))
frame_y = max(0, min(frame_y, frame_height - 1))
frame_x2 = max(0, min(frame_x2, frame_width - 1))
frame_y2 = max(0, min(frame_y2, frame_height - 1))
frame_w = max(1, frame_x2 - frame_x)
frame_h = max(1, frame_y2 - frame_y)
self.interesting_region = (frame_x, frame_y, frame_w, frame_h)
print(f"Interesting point region set: ({frame_x}, {frame_y}, {frame_w}, {frame_h})")
self.show_feedback_message(f"Region set: {frame_w}x{frame_h}")
else:
# Region too small, clear it
self.interesting_region = None
print("Region too small, cleared")
self.show_feedback_message("Region cleared")
# Reset selection state
self.region_selection_start = None
self.region_selection_current = None
self.display_needs_update = True
else:
# Start region selection
self.selecting_interesting_region = True
self.region_selection_start = None
self.region_selection_current = None
print("Select region for interesting point detection (click and drag)")
self.show_feedback_message("Select region (click and drag)")
def _get_interpolated_tracking_position(self, frame_number): def _get_interpolated_tracking_position(self, frame_number):
"""Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None.""" """Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None."""
# Get base position from manual tracking points # Get base position from manual tracking points
@@ -1654,7 +1957,7 @@ class VideoEditor:
result = self.track_template(self.current_display_frame) result = self.track_template(self.current_display_frame)
if result: if result:
center_x, center_y, confidence = result center_x, center_y, confidence = result
print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}") # print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
template_offset = (center_x, center_y) template_offset = (center_x, center_y)
else: else:
# Cropped mode - use only the cropped region for faster template matching # Cropped mode - use only the cropped region for faster template matching
@@ -1719,7 +2022,7 @@ class VideoEditor:
# Add template matching position # Add template matching position
if template_offset: if template_offset:
positions.append(template_offset) positions.append(template_offset)
print(f"DEBUG: Template matching: ({template_offset[0]:.1f}, {template_offset[1]:.1f})") # print(f"DEBUG: Template matching: ({template_offset[0]:.1f}, {template_offset[1]:.1f})")
# Add feature tracking position # Add feature tracking position
if feature_offset: if feature_offset:
@@ -1730,7 +2033,7 @@ class VideoEditor:
if positions: if positions:
avg_x = sum(pos[0] for pos in positions) / len(positions) avg_x = sum(pos[0] for pos in positions) / len(positions)
avg_y = sum(pos[1] for pos in positions) / len(positions) avg_y = sum(pos[1] for pos in positions) / len(positions)
print(f"DEBUG: Average of {len(positions)} positions: ({avg_x:.1f}, {avg_y:.1f})") # print(f"DEBUG: Average of {len(positions)} positions: ({avg_x:.1f}, {avg_y:.1f})")
return (avg_x, avg_y) return (avg_x, avg_y)
# Fall back to individual tracking methods if no base position # Fall back to individual tracking methods if no base position
@@ -2998,6 +3301,19 @@ class VideoEditor:
1, 1,
) )
# Draw frame difference threshold info
region_status = "region" if self.interesting_region else "full frame"
threshold_text = f"Interesting: {self.frame_difference_threshold:.0f}% (gap: {self.frame_difference_gap}, {region_status})"
cv2.putText(
frame,
threshold_text,
(bar_x_start, bar_y - 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.4,
(200, 200, 200),
1,
)
def display_current_frame(self): def display_current_frame(self):
"""Display the current frame with all overlays""" """Display the current frame with all overlays"""
if self.current_display_frame is None: if self.current_display_frame is None:
@@ -3013,12 +3329,15 @@ class VideoEditor:
self.contrast, self.contrast,
self.display_offset, self.display_offset,
self.progress_bar_visible, self.progress_bar_visible,
self.feedback_message self.feedback_message,
self.searching_interesting_point,
self.search_progress_text,
self.search_progress_percent
) )
# Always update display when paused to ensure UI elements are visible # Always update display when paused or when searching to ensure UI elements are visible
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing: if not self.display_needs_update and current_state == self.last_display_state and self.is_playing and not self.searching_interesting_point:
return # Skip redraw if nothing changed and playing return # Skip redraw if nothing changed and playing (but not when searching)
self.last_display_state = current_state self.last_display_state = current_state
self.display_needs_update = False self.display_needs_update = False
@@ -3324,6 +3643,84 @@ class VideoEditor:
# Draw feedback message (if visible) # Draw feedback message (if visible)
self.draw_feedback_message(canvas) self.draw_feedback_message(canvas)
# Draw search progress (if searching for interesting point)
if self.searching_interesting_point and self.search_progress_text:
# Draw search progress overlay
height, width = canvas.shape[:2]
# Background for search progress
text_size = cv2.getTextSize(self.search_progress_text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
padding = 10
bg_x = (width - text_size[0]) // 2 - padding
bg_y = height // 2 - 50
bg_w = text_size[0] + 2 * padding
bg_h = 30
# Semi-transparent background
overlay = canvas.copy()
cv2.rectangle(overlay, (bg_x, bg_y), (bg_x + bg_w, bg_y + bg_h), (0, 0, 0), -1)
cv2.addWeighted(overlay, 0.7, canvas, 0.3, 0, canvas)
# Border
cv2.rectangle(canvas, (bg_x, bg_y), (bg_x + bg_w, bg_y + bg_h), (255, 255, 0), 2)
# Text
cv2.putText(canvas, self.search_progress_text, (bg_x + padding, bg_y + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# Progress bar
bar_width = 200
bar_height = 6
bar_x = (width - bar_width) // 2
bar_y = bg_y + bg_h + 5
# Background
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + bar_width, bar_y + bar_height), (100, 100, 100), -1)
# Progress fill
fill_width = int(bar_width * (self.search_progress_percent / 100.0))
if fill_width > 0:
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + fill_width, bar_y + bar_height), (0, 255, 0), -1)
# Draw interesting point region selection
if self.interesting_region is not None:
# Draw the selected region on screen
x, y, w, h = self.interesting_region
# Convert frame coordinates to screen coordinates
sx1, sy1 = self._map_rotated_to_screen(x, y)
sx2, sy2 = self._map_rotated_to_screen(x + w, y + h)
# Draw region outline (cyan color for interesting point region)
cv2.rectangle(canvas, (sx1, sy1), (sx2, sy2), (0, 255, 255), 2)
# Draw corner indicators
corner_size = 8
corners = [
(sx1, sy1), (sx2, sy1), (sx1, sy2), (sx2, sy2)
]
for cx, cy in corners:
cv2.line(canvas, (cx - corner_size//2, cy), (cx + corner_size//2, cy), (0, 255, 255), 2)
cv2.line(canvas, (cx, cy - corner_size//2), (cx, cy + corner_size//2), (0, 255, 255), 2)
# Draw region selection in progress
if self.selecting_interesting_region and self.region_selection_start and self.region_selection_current:
x1, y1 = self.region_selection_start
x2, y2 = self.region_selection_current
# Calculate selection rectangle
sel_x = min(x1, x2)
sel_y = min(y1, y2)
sel_w = abs(x2 - x1)
sel_h = abs(y2 - y1)
# Draw selection rectangle (yellow dashed line)
cv2.rectangle(canvas, (sel_x, sel_y), (sel_x + sel_w, sel_y + sel_h), (0, 255, 255), 2)
# Draw selection info
info_text = f"Region: {sel_w}x{sel_h}"
cv2.putText(canvas, info_text, (sel_x, sel_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
window_title = "Image Editor" if self.is_image_mode else "Video Editor" window_title = "Image Editor" if self.is_image_mode else "Video Editor"
cv2.imshow(window_title, canvas) cv2.imshow(window_title, canvas)
@@ -3375,29 +3772,29 @@ class VideoEditor:
if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN: if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN:
self.zoom_center = (x, y) self.zoom_center = (x, y)
# Handle Shift+Right-click+drag for selective feature extraction # Handle shift+right-click for placing tracking point at previous tracking point position
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_SHIFTKEY): if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_SHIFTKEY) and not (flags & cv2.EVENT_FLAG_CTRLKEY):
if not self.is_image_mode: if not self.is_image_mode:
# Enable feature tracking if not already enabled # Get previous tracking point position
if not self.feature_tracker.tracking_enabled: prev_result = self._get_previous_tracking_point()
self.feature_tracker.tracking_enabled = True if prev_result:
self.show_feedback_message("Feature tracking enabled") prev_frame, prev_points = prev_result
self.selective_feature_extraction_start = (x, y) if prev_points:
self.selective_feature_extraction_rect = None # Use the first tracking point from the previous frame
print(f"DEBUG: Started selective feature extraction at ({x}, {y})") prev_x, prev_y = prev_points[0]
# Handle Shift+Right-click+drag for selective feature extraction # Add tracking point at same position on current frame
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start: self.tracking_points.setdefault(self.current_frame, []).append((int(prev_x), int(prev_y)))
if not self.is_image_mode: print(f"DEBUG: Added tracking point at previous position ({prev_x}, {prev_y}) on frame {self.current_frame}")
start_x, start_y = self.selective_feature_extraction_start self.show_feedback_message("Tracking point added at previous position")
self.selective_feature_extraction_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
# Handle Shift+Right-click release for selective feature extraction self.clear_transformation_cache()
if event == cv2.EVENT_RBUTTONUP and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start: self.save_state()
if not self.is_image_mode and self.selective_feature_extraction_rect: self.display_current_frame()
self._extract_features_from_region(self.selective_feature_extraction_rect) else:
self.selective_feature_extraction_start = None self.show_feedback_message("No previous tracking points found")
self.selective_feature_extraction_rect = None else:
self.show_feedback_message("No previous tracking points found")
# Handle Ctrl+Right-click+drag for selective feature deletion # Handle Ctrl+Right-click+drag for selective feature deletion
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY): if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY):
@@ -3439,6 +3836,45 @@ class VideoEditor:
self.template_selection_start = None self.template_selection_start = None
self.template_selection_rect = None self.template_selection_rect = None
# Handle interesting point region selection
if self.selecting_interesting_region:
if event == cv2.EVENT_LBUTTONDOWN:
self.region_selection_start = (x, y)
self.region_selection_current = (x, y)
self.display_needs_update = True
elif event == cv2.EVENT_MOUSEMOVE and self.region_selection_start:
self.region_selection_current = (x, y)
self.display_needs_update = True
elif event == cv2.EVENT_LBUTTONUP and self.region_selection_start:
self.region_selection_current = (x, y)
self.toggle_interesting_region_selection()
self.display_needs_update = True
# Handle right-click for selective feature extraction when mode is active
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
if not self.is_image_mode and hasattr(self, 'selective_feature_extraction_mode') and self.selective_feature_extraction_mode:
# Start selective feature extraction
self.selective_feature_extraction_start = (x, y)
self.selective_feature_extraction_rect = None
print(f"DEBUG: Started selective feature extraction at ({x}, {y})")
return # Don't process regular right-click functionality
# Handle mouse move for selective feature extraction
if event == cv2.EVENT_MOUSEMOVE and hasattr(self, 'selective_feature_extraction_start') and self.selective_feature_extraction_start:
if not self.is_image_mode:
start_x, start_y = self.selective_feature_extraction_start
self.selective_feature_extraction_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
self.display_needs_update = True
# Handle mouse release for selective feature extraction
if event == cv2.EVENT_RBUTTONUP and hasattr(self, 'selective_feature_extraction_start') and self.selective_feature_extraction_start:
if not self.is_image_mode and self.selective_feature_extraction_rect:
self._extract_features_from_region(self.selective_feature_extraction_rect)
self.selective_feature_extraction_start = None
self.selective_feature_extraction_rect = None
self.display_needs_update = True
# Handle right-click for tracking points (no modifiers) # Handle right-click for tracking points (no modifiers)
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)): if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
if not self.is_image_mode: if not self.is_image_mode:
@@ -4358,6 +4794,19 @@ class VideoEditor:
if self.is_playing and not self.is_image_mode: if self.is_playing and not self.is_image_mode:
self.advance_frame() self.advance_frame()
# Continue non-blocking search if active
if self.searching_interesting_point and self.search_state:
self.continue_interesting_point_search()
# Check if search completed or was cancelled
if self.search_state and self.search_state.get('completed', False):
# Clean up completed search
self.search_state = None
self.searching_interesting_point = False
self.search_progress_text = ""
self.show_feedback_message("Search completed - no interesting point found")
self.display_needs_update = True
# Key capture with appropriate delay # Key capture with appropriate delay
key = cv2.waitKey(delay_ms) & 0xFF key = cv2.waitKey(delay_ms) & 0xFF
@@ -4392,6 +4841,12 @@ class VideoEditor:
if key == ord("q") or key == 27: # ESC if key == ord("q") or key == 27: # ESC
self.stop_auto_repeat_seek() self.stop_auto_repeat_seek()
# If search is active, cancel it first
if self.searching_interesting_point:
self.searching_interesting_point = False
self.search_progress_text = ""
print("Search cancelled")
self.show_feedback_message("Search cancelled")
self.save_state() self.save_state()
break break
elif key == ord("p"): # P - Toggle project view elif key == ord("p"): # P - Toggle project view
@@ -4511,6 +4966,60 @@ class VideoEditor:
self.cut_end_frame = self.current_frame self.cut_end_frame = self.current_frame
print(f"Set cut end at frame {self.current_frame}") print(f"Set cut end at frame {self.current_frame}")
self.save_state() # Save state when cut end is set self.save_state() # Save state when cut end is set
elif key == ord("!"): # Shift+1 - Jump to cut start marker
if not self.is_image_mode and self.cut_start_frame is not None:
self.seek_to_frame(self.cut_start_frame)
print(f"Jumped to cut start marker at frame {self.cut_start_frame}")
elif key == ord("\""): # Shift+2 - Jump to cut end marker
if not self.is_image_mode and self.cut_end_frame is not None:
self.seek_to_frame(self.cut_end_frame)
print(f"Jumped to cut end marker at frame {self.cut_end_frame}")
elif key == ord(";"): # ; - Go to next interesting point or cancel search
if not self.is_image_mode:
if self.searching_interesting_point and self.search_state:
# Cancel ongoing search
self.search_state = None
self.searching_interesting_point = False
self.search_progress_text = ""
print("Search cancelled")
self.show_feedback_message("Search cancelled")
self.display_needs_update = True
else:
self.go_to_next_interesting_point()
elif key == ord("'"): # ' (apostrophe) - Toggle region selection mode
self.toggle_interesting_region_selection()
elif key == ord("9"): # 0 - Decrease frame difference threshold
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 1.0)
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}%")
elif key == ord("0"): # 9 - Increase frame difference threshold
self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 1.0)
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}%")
elif key == ord(")"): # Shift+9 - Decrease frame difference threshold by 10 percentage points
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 10.0)
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (-10pp)")
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (-10pp)")
elif key == ord("="): # Shift+0 - Increase frame difference threshold by 10 percentage points
self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 10.0)
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (+10pp)")
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (+10pp)")
elif key == ord("7"): # 7 - Decrease frame difference gap
self.frame_difference_gap = max(1, self.frame_difference_gap - 1)
print(f"Frame difference gap: {self.frame_difference_gap} frames")
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
elif key == ord("8"): # 8 - Increase frame difference gap
self.frame_difference_gap = self.frame_difference_gap + 1
print(f"Frame difference gap: {self.frame_difference_gap} frames")
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
elif key == ord("/"): # Shift+7 - Decrease frame difference gap by 60 frames
self.frame_difference_gap = max(1, self.frame_difference_gap - 60)
print(f"Frame difference gap: {self.frame_difference_gap} frames (-60)")
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (-60)")
elif key == ord("("): # Shift+8 - Increase frame difference gap by 60 frames
self.frame_difference_gap = self.frame_difference_gap + 60
print(f"Frame difference gap: {self.frame_difference_gap} frames (+60)")
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (+60)")
elif key == ord("N"): elif key == ord("N"):
if len(self.video_files) > 1: if len(self.video_files) > 1:
self.previous_video() self.previous_video()
@@ -4626,7 +5135,7 @@ class VideoEditor:
self.feature_tracker.clear_features() self.feature_tracker.clear_features()
self.show_feedback_message("Feature tracking data cleared") self.show_feedback_message("Feature tracking data cleared")
self.save_state() self.save_state()
elif key == ord("H"): elif key == ord("Z"):
# Switch detector type (SIFT -> ORB -> SIFT) - SURF not available # Switch detector type (SIFT -> ORB -> SIFT) - SURF not available
current_type = self.feature_tracker.detector_type current_type = self.feature_tracker.detector_type
if current_type == 'SIFT': if current_type == 'SIFT':
@@ -4638,6 +5147,22 @@ class VideoEditor:
self.feature_tracker.set_detector_type(new_type) self.feature_tracker.set_detector_type(new_type)
self.show_feedback_message(f"Detector switched to {new_type}") self.show_feedback_message(f"Detector switched to {new_type}")
self.save_state() self.save_state()
elif key == ord("z"):
# Toggle selective feature extraction mode
if not self.is_image_mode:
if not hasattr(self, 'selective_feature_extraction_mode'):
self.selective_feature_extraction_mode = False
self.selective_feature_extraction_mode = not self.selective_feature_extraction_mode
if self.selective_feature_extraction_mode:
self.show_feedback_message("Selective feature extraction mode ON - Right-click and drag to select region")
# Enable feature tracking if not already enabled
if not self.feature_tracker.tracking_enabled:
self.feature_tracker.tracking_enabled = True
self.show_feedback_message("Feature tracking enabled")
else:
self.show_feedback_message("Selective feature extraction mode OFF")
self.save_state()
elif key == ord("o"): elif key == ord("o"):
# Toggle optical flow tracking # Toggle optical flow tracking
self.optical_flow_enabled = not self.optical_flow_enabled self.optical_flow_enabled = not self.optical_flow_enabled