Make region interesting frame (or interesting region I guess?)
This commit is contained in:
425
croppa/main.py
425
croppa/main.py
@@ -875,10 +875,17 @@ class VideoEditor:
|
|||||||
self.frame_difference_threshold = 10.0 # Percentage threshold for frame difference (10% default)
|
self.frame_difference_threshold = 10.0 # Percentage threshold for frame difference (10% default)
|
||||||
self.frame_difference_gap = 10 # Number of frames between comparisons (default 10)
|
self.frame_difference_gap = 10 # Number of frames between comparisons (default 10)
|
||||||
|
|
||||||
|
# Region selection for interesting point detection
|
||||||
|
self.interesting_region = None # (x, y, width, height) or None for full frame
|
||||||
|
self.selecting_interesting_region = False
|
||||||
|
self.region_selection_start = None
|
||||||
|
self.region_selection_current = None
|
||||||
|
|
||||||
# Search state for interesting point detection
|
# Search state for interesting point detection
|
||||||
self.searching_interesting_point = False
|
self.searching_interesting_point = False
|
||||||
self.search_progress_text = ""
|
self.search_progress_text = ""
|
||||||
self.search_progress_percent = 0.0
|
self.search_progress_percent = 0.0
|
||||||
|
self.search_state = None # For non-blocking search state
|
||||||
|
|
||||||
# Project view mode
|
# Project view mode
|
||||||
self.project_view_mode = False
|
self.project_view_mode = False
|
||||||
@@ -893,10 +900,8 @@ class VideoEditor:
|
|||||||
def _get_state_file_path(self) -> Path:
|
def _get_state_file_path(self) -> Path:
|
||||||
"""Get the state file path for the current media file"""
|
"""Get the state file path for the current media file"""
|
||||||
if not hasattr(self, 'video_path') or not self.video_path:
|
if not hasattr(self, 'video_path') or not self.video_path:
|
||||||
print("DEBUG: No video_path available for state file")
|
|
||||||
return None
|
return None
|
||||||
state_path = self.video_path.with_suffix('.json')
|
state_path = self.video_path.with_suffix('.json')
|
||||||
print(f"DEBUG: State file path would be: {state_path}")
|
|
||||||
return state_path
|
return state_path
|
||||||
|
|
||||||
def save_state(self):
|
def save_state(self):
|
||||||
@@ -929,6 +934,7 @@ class VideoEditor:
|
|||||||
'template_matching_full_frame': self.template_matching_full_frame,
|
'template_matching_full_frame': self.template_matching_full_frame,
|
||||||
'frame_difference_threshold': self.frame_difference_threshold,
|
'frame_difference_threshold': self.frame_difference_threshold,
|
||||||
'frame_difference_gap': self.frame_difference_gap,
|
'frame_difference_gap': self.frame_difference_gap,
|
||||||
|
'interesting_region': self.interesting_region,
|
||||||
'templates': [{
|
'templates': [{
|
||||||
'start_frame': start_frame,
|
'start_frame': start_frame,
|
||||||
'region': region
|
'region': region
|
||||||
@@ -1034,6 +1040,14 @@ class VideoEditor:
|
|||||||
self.frame_difference_gap = state['frame_difference_gap']
|
self.frame_difference_gap = state['frame_difference_gap']
|
||||||
print(f"Loaded frame difference gap: {self.frame_difference_gap} frames")
|
print(f"Loaded frame difference gap: {self.frame_difference_gap} frames")
|
||||||
|
|
||||||
|
# Load interesting region
|
||||||
|
if 'interesting_region' in state and state['interesting_region'] is not None:
|
||||||
|
self.interesting_region = tuple(state['interesting_region'])
|
||||||
|
x, y, w, h = self.interesting_region
|
||||||
|
print(f"Loaded interesting region: ({x}, {y}, {w}, {h})")
|
||||||
|
else:
|
||||||
|
self.interesting_region = None
|
||||||
|
|
||||||
# Load simple templates state
|
# Load simple templates state
|
||||||
if 'templates' in state:
|
if 'templates' in state:
|
||||||
self.templates = []
|
self.templates = []
|
||||||
@@ -1234,7 +1248,7 @@ class VideoEditor:
|
|||||||
# Get backend information
|
# Get backend information
|
||||||
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
|
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
|
||||||
|
|
||||||
print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})")
|
print(f"Loaded video: {self.current_video_index + 1}/{len(self.video_files)}")
|
||||||
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
|
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
|
||||||
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
|
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
|
||||||
|
|
||||||
@@ -1486,6 +1500,70 @@ class VideoEditor:
|
|||||||
print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
||||||
self.seek_to_frame(target)
|
self.seek_to_frame(target)
|
||||||
|
|
||||||
|
def continue_interesting_point_search(self):
|
||||||
|
"""Continue non-blocking search for interesting point - called from main loop"""
|
||||||
|
if not self.search_state or self.search_state.get('completed', False):
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Process a small number of steps per call
|
||||||
|
steps_per_call = 3
|
||||||
|
update_interval = 5
|
||||||
|
|
||||||
|
for _ in range(steps_per_call):
|
||||||
|
if self.search_state['target_frame'] >= self.total_frames:
|
||||||
|
# End of video reached
|
||||||
|
self.search_state['completed'] = True
|
||||||
|
print("Reached end of video")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Read comparison frame
|
||||||
|
comparison_frame_num = min(self.search_state['target_frame'] + self.frame_difference_gap, self.total_frames - 1)
|
||||||
|
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, comparison_frame_num)
|
||||||
|
ret, comparison_frame = self.cap.cap.read()
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.search_state['frames_checked'] += 1
|
||||||
|
|
||||||
|
# Calculate difference using full resolution
|
||||||
|
diff_percentage = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
|
||||||
|
|
||||||
|
# Update OSD
|
||||||
|
if self.search_state['frames_checked'] % update_interval == 0 or diff_percentage >= self.frame_difference_threshold:
|
||||||
|
progress_percent = (self.search_state['frames_checked'] / max(1, (self.total_frames - self.search_state['current_frame_backup']) // self.frame_difference_gap)) * 100
|
||||||
|
self.search_progress_percent = progress_percent
|
||||||
|
self.search_progress_text = f"Gap search: {self.search_state['target_frame']}↔{comparison_frame_num} ({diff_percentage:.1f}% change, gap: {self.frame_difference_gap}) - Press ; to cancel"
|
||||||
|
|
||||||
|
# Update display frame
|
||||||
|
self.current_frame = comparison_frame_num
|
||||||
|
self.current_display_frame = comparison_frame
|
||||||
|
self.display_needs_update = True
|
||||||
|
|
||||||
|
# Check if found interesting point
|
||||||
|
if diff_percentage >= self.frame_difference_threshold:
|
||||||
|
full_diff = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
|
||||||
|
print(f"Found interesting point between frames {self.search_state['target_frame']} and {comparison_frame_num} ({full_diff:.1f}% change)")
|
||||||
|
self.show_feedback_message(f"Interesting: {full_diff:.1f}% change over {self.frame_difference_gap} frames")
|
||||||
|
|
||||||
|
self.current_frame = comparison_frame_num
|
||||||
|
self.current_display_frame = comparison_frame
|
||||||
|
|
||||||
|
# Clean up search state
|
||||||
|
self.search_state = None
|
||||||
|
self.searching_interesting_point = False
|
||||||
|
self.search_progress_text = ""
|
||||||
|
self.display_needs_update = True
|
||||||
|
return
|
||||||
|
|
||||||
|
# Move to next comparison
|
||||||
|
self.search_state['target_frame'] += self.frame_difference_gap
|
||||||
|
self.search_state['base_frame'] = comparison_frame.copy() if comparison_frame is not None else None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error during search: {e}")
|
||||||
|
self.search_state['completed'] = True
|
||||||
|
|
||||||
def go_to_next_interesting_point(self):
|
def go_to_next_interesting_point(self):
|
||||||
"""Go to the next frame where the difference from the previous frame exceeds the threshold"""
|
"""Go to the next frame where the difference from the previous frame exceeds the threshold"""
|
||||||
if self.is_image_mode:
|
if self.is_image_mode:
|
||||||
@@ -1503,98 +1581,36 @@ class VideoEditor:
|
|||||||
|
|
||||||
print(f"Searching for next interesting point from frame {current_frame_backup + 1} with threshold {self.frame_difference_threshold:.1f}% (gap: {self.frame_difference_gap} frames)")
|
print(f"Searching for next interesting point from frame {current_frame_backup + 1} with threshold {self.frame_difference_threshold:.1f}% (gap: {self.frame_difference_gap} frames)")
|
||||||
|
|
||||||
# Start searching from the next frame
|
# Initialize search state for main loop processing instead of blocking
|
||||||
target_frame = current_frame_backup + 1
|
self.search_state = {
|
||||||
search_cancelled = False
|
'current_frame_backup': current_frame_backup,
|
||||||
frames_checked = 0
|
'target_frame': current_frame_backup + 1,
|
||||||
total_frames_to_check = self.total_frames - target_frame
|
'frames_checked': 0,
|
||||||
|
'base_frame': None,
|
||||||
|
'base_frame_num': None,
|
||||||
|
'search_cancelled': False,
|
||||||
|
'update_interval': 10
|
||||||
|
}
|
||||||
|
|
||||||
# Enable search mode for OSD display
|
# Enable search mode for OSD display
|
||||||
self.searching_interesting_point = True
|
self.searching_interesting_point = True
|
||||||
|
self.search_progress_text = f"Starting search from frame {current_frame_backup + 1} (threshold: {self.frame_difference_threshold:.1f}%, gap: {self.frame_difference_gap} frames) - Press ; to cancel"
|
||||||
|
self.search_progress_percent = 0
|
||||||
|
self.display_needs_update = True
|
||||||
|
|
||||||
# Fast search using N-frame gap comparisons
|
# Read the first frame to start comparisons
|
||||||
try:
|
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame_backup)
|
||||||
# Performance optimization: sample frames for faster processing
|
ret, base_frame = self.cap.cap.read()
|
||||||
sample_size = (320, 240) # Small sample size for fast difference calculation
|
if not ret:
|
||||||
update_interval = 10 # Update OSD every 10 comparisons
|
self.search_state['search_cancelled'] = True
|
||||||
|
print("Could not read base frame")
|
||||||
|
return
|
||||||
|
|
||||||
# Read the first frame to start comparisons
|
self.search_state['base_frame'] = base_frame
|
||||||
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame_backup)
|
self.search_state['base_frame_num'] = current_frame_backup
|
||||||
ret, base_frame = self.cap.cap.read()
|
|
||||||
if not ret:
|
|
||||||
search_cancelled = True
|
|
||||||
raise Exception("Could not read base frame")
|
|
||||||
|
|
||||||
base_frame_num = current_frame_backup
|
# Let main loop handle the search - don't block here
|
||||||
|
return
|
||||||
while target_frame < self.total_frames:
|
|
||||||
# Check for cancellation key (less frequent checks for speed)
|
|
||||||
if target_frame % 10 == 0:
|
|
||||||
key = cv2.waitKey(1) & 0xFF
|
|
||||||
if key == ord(";"):
|
|
||||||
search_cancelled = True
|
|
||||||
print("Search cancelled")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Read comparison frame that's N frames ahead
|
|
||||||
comparison_frame_num = min(target_frame + self.frame_difference_gap, self.total_frames - 1)
|
|
||||||
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, comparison_frame_num)
|
|
||||||
ret, comparison_frame = self.cap.cap.read()
|
|
||||||
if not ret:
|
|
||||||
break
|
|
||||||
|
|
||||||
frames_checked += 1
|
|
||||||
|
|
||||||
# Fast difference calculation using downsampled frames
|
|
||||||
base_small = cv2.resize(base_frame, sample_size)
|
|
||||||
comparison_small = cv2.resize(comparison_frame, sample_size)
|
|
||||||
|
|
||||||
# Calculate frame difference between frames N apart
|
|
||||||
diff_percentage = self.calculate_frame_difference(base_small, comparison_small)
|
|
||||||
|
|
||||||
# Update OSD less frequently for speed
|
|
||||||
if frames_checked % update_interval == 0 or diff_percentage >= self.frame_difference_threshold:
|
|
||||||
progress_percent = (frames_checked / max(1, (self.total_frames - current_frame_backup) // self.frame_difference_gap)) * 100
|
|
||||||
self.search_progress_percent = progress_percent
|
|
||||||
self.search_progress_text = f"Gap search: {base_frame_num}↔{comparison_frame_num} ({diff_percentage:.1f}% change, gap: {self.frame_difference_gap}) - Press ; to cancel"
|
|
||||||
|
|
||||||
# Force display update to show search progress
|
|
||||||
self.display_needs_update = True
|
|
||||||
self.display_current_frame()
|
|
||||||
|
|
||||||
# Check if difference exceeds threshold
|
|
||||||
if diff_percentage >= self.frame_difference_threshold:
|
|
||||||
# Re-calculate with full resolution for accuracy
|
|
||||||
full_diff = self.calculate_frame_difference(base_frame, comparison_frame)
|
|
||||||
print(f"Found interesting point between frames {base_frame_num} and {comparison_frame_num} ({full_diff:.1f}% change)")
|
|
||||||
self.show_feedback_message(f"Interesting: {full_diff:.1f}% change over {self.frame_difference_gap} frames")
|
|
||||||
|
|
||||||
# Go to the later frame in the comparison
|
|
||||||
self.current_frame = comparison_frame_num
|
|
||||||
self.current_display_frame = comparison_frame
|
|
||||||
break
|
|
||||||
|
|
||||||
# Move base frame forward for next comparison
|
|
||||||
target_frame += self.frame_difference_gap
|
|
||||||
base_frame_num = target_frame
|
|
||||||
base_frame = comparison_frame.copy() if comparison_frame is not None else None
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during search: {e}")
|
|
||||||
search_cancelled = True
|
|
||||||
|
|
||||||
# Disable search mode
|
|
||||||
self.searching_interesting_point = False
|
|
||||||
self.search_progress_text = ""
|
|
||||||
|
|
||||||
# If no interesting point found or search was cancelled, go back to original frame
|
|
||||||
if search_cancelled:
|
|
||||||
self.seek_to_frame(current_frame_backup)
|
|
||||||
self.show_feedback_message("Search cancelled")
|
|
||||||
elif target_frame >= self.total_frames:
|
|
||||||
print(f"No interesting point found within threshold in remaining frames")
|
|
||||||
self.seek_to_frame(current_frame_backup)
|
|
||||||
self.show_feedback_message("No interesting point found")
|
|
||||||
|
|
||||||
def _get_previous_tracking_point(self):
|
def _get_previous_tracking_point(self):
|
||||||
"""Get the tracking point from the previous frame that has tracking points."""
|
"""Get the tracking point from the previous frame that has tracking points."""
|
||||||
@@ -1745,7 +1761,7 @@ class VideoEditor:
|
|||||||
return processed_frame
|
return processed_frame
|
||||||
|
|
||||||
def calculate_frame_difference(self, frame1, frame2) -> float:
|
def calculate_frame_difference(self, frame1, frame2) -> float:
|
||||||
"""Calculate percentage difference between two frames"""
|
"""Calculate percentage difference between two frames, optionally within a region"""
|
||||||
if frame1 is None or frame2 is None:
|
if frame1 is None or frame2 is None:
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
@@ -1755,16 +1771,36 @@ class VideoEditor:
|
|||||||
# Resize frame2 to match frame1
|
# Resize frame2 to match frame1
|
||||||
frame2 = cv2.resize(frame2, (frame1.shape[1], frame1.shape[0]))
|
frame2 = cv2.resize(frame2, (frame1.shape[1], frame1.shape[0]))
|
||||||
|
|
||||||
# Convert to grayscale for difference calculation
|
# Apply region selection if set
|
||||||
if len(frame1.shape) == 3:
|
if self.interesting_region is not None:
|
||||||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
|
x, y, w, h = self.interesting_region
|
||||||
else:
|
|
||||||
gray1 = frame1
|
|
||||||
|
|
||||||
if len(frame2.shape) == 3:
|
# Ensure region is within frame bounds
|
||||||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
|
x = max(0, min(x, frame1.shape[1] - 1))
|
||||||
|
y = max(0, min(y, frame1.shape[0] - 1))
|
||||||
|
w = min(w, frame1.shape[1] - x)
|
||||||
|
h = min(h, frame1.shape[0] - y)
|
||||||
|
|
||||||
|
if w <= 0 or h <= 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
frame1_region = frame1[y:y+h, x:x+w]
|
||||||
|
frame2_region = frame2[y:y+h, x:x+w]
|
||||||
else:
|
else:
|
||||||
gray2 = frame2
|
# Use full frames
|
||||||
|
frame1_region = frame1
|
||||||
|
frame2_region = frame2
|
||||||
|
|
||||||
|
# Convert to grayscale for difference calculation
|
||||||
|
if len(frame1_region.shape) == 3:
|
||||||
|
gray1 = cv2.cvtColor(frame1_region, cv2.COLOR_BGR2GRAY)
|
||||||
|
else:
|
||||||
|
gray1 = frame1_region
|
||||||
|
|
||||||
|
if len(frame2_region.shape) == 3:
|
||||||
|
gray2 = cv2.cvtColor(frame2_region, cv2.COLOR_BGR2GRAY)
|
||||||
|
else:
|
||||||
|
gray2 = frame2_region
|
||||||
|
|
||||||
# Calculate absolute difference
|
# Calculate absolute difference
|
||||||
diff = cv2.absdiff(gray1, gray2)
|
diff = cv2.absdiff(gray1, gray2)
|
||||||
@@ -1777,6 +1813,9 @@ class VideoEditor:
|
|||||||
changed_pixels = cv2.countNonZero(thresh_diff)
|
changed_pixels = cv2.countNonZero(thresh_diff)
|
||||||
total_pixels = gray1.size
|
total_pixels = gray1.size
|
||||||
|
|
||||||
|
if total_pixels == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
# Calculate percentage
|
# Calculate percentage
|
||||||
difference_percentage = (changed_pixels / total_pixels) * 100.0
|
difference_percentage = (changed_pixels / total_pixels) * 100.0
|
||||||
|
|
||||||
@@ -1813,6 +1852,93 @@ class VideoEditor:
|
|||||||
return (x, y, w, h)
|
return (x, y, w, h)
|
||||||
|
|
||||||
|
|
||||||
|
def toggle_interesting_region_selection(self):
|
||||||
|
"""Toggle region selection mode for interesting point detection"""
|
||||||
|
# If a region is already defined and we're not currently selecting, clear the region
|
||||||
|
if self.interesting_region is not None and not self.selecting_interesting_region:
|
||||||
|
self.interesting_region = None
|
||||||
|
print("Interesting point region cleared")
|
||||||
|
self.show_feedback_message("Region cleared")
|
||||||
|
self.display_needs_update = True
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.selecting_interesting_region:
|
||||||
|
# Finish region selection
|
||||||
|
self.selecting_interesting_region = False
|
||||||
|
if (self.region_selection_start is not None and
|
||||||
|
self.region_selection_current is not None):
|
||||||
|
|
||||||
|
# Calculate region rectangle
|
||||||
|
x1, y1 = self.region_selection_start
|
||||||
|
x2, y2 = self.region_selection_current
|
||||||
|
|
||||||
|
x = min(x1, x2)
|
||||||
|
y = min(y1, y2)
|
||||||
|
w = abs(x2 - x1)
|
||||||
|
h = abs(y2 - y1)
|
||||||
|
|
||||||
|
if w > 5 and h > 5: # Minimum size threshold
|
||||||
|
# Get raw frame dimensions for direct coordinate mapping
|
||||||
|
frame_height, frame_width = self.current_display_frame.shape[:2]
|
||||||
|
|
||||||
|
# Calculate display scaling (how much the frame is scaled to fit on screen)
|
||||||
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
||||||
|
scale_x = frame_width / self.window_width # This is wrong - need to calculate actual display scale
|
||||||
|
|
||||||
|
# Let's use a simpler approach - just proportionally map screen coords to frame coords
|
||||||
|
# This assumes the frame is centered and scaled to fit
|
||||||
|
display_scale = min(self.window_width / frame_width, available_height / frame_height)
|
||||||
|
if display_scale > 1:
|
||||||
|
display_scale = 1 # Frame is smaller than window, no scaling
|
||||||
|
|
||||||
|
# Calculate displayed dimensions
|
||||||
|
display_width = int(frame_width * display_scale)
|
||||||
|
display_height = int(frame_height * display_scale)
|
||||||
|
|
||||||
|
# Calculate offset (frame is centered on screen)
|
||||||
|
offset_x = (self.window_width - display_width) // 2
|
||||||
|
offset_y = (available_height - display_height) // 2
|
||||||
|
|
||||||
|
# Map screen coordinates to frame coordinates
|
||||||
|
# Adjust for the offset and scale
|
||||||
|
frame_x = int((x - offset_x) / display_scale)
|
||||||
|
frame_y = int((y - offset_y) / display_scale)
|
||||||
|
frame_x2 = int((x + w - offset_x) / display_scale)
|
||||||
|
frame_y2 = int((y + h - offset_y) / display_scale)
|
||||||
|
|
||||||
|
frame_w = frame_x2 - frame_x
|
||||||
|
frame_h = frame_y2 - frame_y
|
||||||
|
|
||||||
|
# Ensure coordinates are within frame bounds
|
||||||
|
frame_x = max(0, min(frame_x, frame_width - 1))
|
||||||
|
frame_y = max(0, min(frame_y, frame_height - 1))
|
||||||
|
frame_x2 = max(0, min(frame_x2, frame_width - 1))
|
||||||
|
frame_y2 = max(0, min(frame_y2, frame_height - 1))
|
||||||
|
frame_w = max(1, frame_x2 - frame_x)
|
||||||
|
frame_h = max(1, frame_y2 - frame_y)
|
||||||
|
|
||||||
|
self.interesting_region = (frame_x, frame_y, frame_w, frame_h)
|
||||||
|
print(f"Interesting point region set: ({frame_x}, {frame_y}, {frame_w}, {frame_h})")
|
||||||
|
self.show_feedback_message(f"Region set: {frame_w}x{frame_h}")
|
||||||
|
else:
|
||||||
|
# Region too small, clear it
|
||||||
|
self.interesting_region = None
|
||||||
|
print("Region too small, cleared")
|
||||||
|
self.show_feedback_message("Region cleared")
|
||||||
|
|
||||||
|
# Reset selection state
|
||||||
|
self.region_selection_start = None
|
||||||
|
self.region_selection_current = None
|
||||||
|
self.display_needs_update = True
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Start region selection
|
||||||
|
self.selecting_interesting_region = True
|
||||||
|
self.region_selection_start = None
|
||||||
|
self.region_selection_current = None
|
||||||
|
print("Select region for interesting point detection (click and drag)")
|
||||||
|
self.show_feedback_message("Select region (click and drag)")
|
||||||
|
|
||||||
def _get_interpolated_tracking_position(self, frame_number):
|
def _get_interpolated_tracking_position(self, frame_number):
|
||||||
"""Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None."""
|
"""Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None."""
|
||||||
# Get base position from manual tracking points
|
# Get base position from manual tracking points
|
||||||
@@ -3172,7 +3298,8 @@ class VideoEditor:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Draw frame difference threshold info
|
# Draw frame difference threshold info
|
||||||
threshold_text = f"Interesting: {self.frame_difference_threshold:.0f}% (gap: {self.frame_difference_gap})"
|
region_status = "region" if self.interesting_region else "full frame"
|
||||||
|
threshold_text = f"Interesting: {self.frame_difference_threshold:.0f}% (gap: {self.frame_difference_gap}, {region_status})"
|
||||||
cv2.putText(
|
cv2.putText(
|
||||||
frame,
|
frame,
|
||||||
threshold_text,
|
threshold_text,
|
||||||
@@ -3198,12 +3325,15 @@ class VideoEditor:
|
|||||||
self.contrast,
|
self.contrast,
|
||||||
self.display_offset,
|
self.display_offset,
|
||||||
self.progress_bar_visible,
|
self.progress_bar_visible,
|
||||||
self.feedback_message
|
self.feedback_message,
|
||||||
|
self.searching_interesting_point,
|
||||||
|
self.search_progress_text,
|
||||||
|
self.search_progress_percent
|
||||||
)
|
)
|
||||||
|
|
||||||
# Always update display when paused to ensure UI elements are visible
|
# Always update display when paused or when searching to ensure UI elements are visible
|
||||||
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing:
|
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing and not self.searching_interesting_point:
|
||||||
return # Skip redraw if nothing changed and playing
|
return # Skip redraw if nothing changed and playing (but not when searching)
|
||||||
|
|
||||||
self.last_display_state = current_state
|
self.last_display_state = current_state
|
||||||
self.display_needs_update = False
|
self.display_needs_update = False
|
||||||
@@ -3548,6 +3678,45 @@ class VideoEditor:
|
|||||||
if fill_width > 0:
|
if fill_width > 0:
|
||||||
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + fill_width, bar_y + bar_height), (0, 255, 0), -1)
|
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + fill_width, bar_y + bar_height), (0, 255, 0), -1)
|
||||||
|
|
||||||
|
# Draw interesting point region selection
|
||||||
|
if self.interesting_region is not None:
|
||||||
|
# Draw the selected region on screen
|
||||||
|
x, y, w, h = self.interesting_region
|
||||||
|
|
||||||
|
# Convert frame coordinates to screen coordinates
|
||||||
|
sx1, sy1 = self._map_rotated_to_screen(x, y)
|
||||||
|
sx2, sy2 = self._map_rotated_to_screen(x + w, y + h)
|
||||||
|
|
||||||
|
# Draw region outline (cyan color for interesting point region)
|
||||||
|
cv2.rectangle(canvas, (sx1, sy1), (sx2, sy2), (0, 255, 255), 2)
|
||||||
|
|
||||||
|
# Draw corner indicators
|
||||||
|
corner_size = 8
|
||||||
|
corners = [
|
||||||
|
(sx1, sy1), (sx2, sy1), (sx1, sy2), (sx2, sy2)
|
||||||
|
]
|
||||||
|
for cx, cy in corners:
|
||||||
|
cv2.line(canvas, (cx - corner_size//2, cy), (cx + corner_size//2, cy), (0, 255, 255), 2)
|
||||||
|
cv2.line(canvas, (cx, cy - corner_size//2), (cx, cy + corner_size//2), (0, 255, 255), 2)
|
||||||
|
|
||||||
|
# Draw region selection in progress
|
||||||
|
if self.selecting_interesting_region and self.region_selection_start and self.region_selection_current:
|
||||||
|
x1, y1 = self.region_selection_start
|
||||||
|
x2, y2 = self.region_selection_current
|
||||||
|
|
||||||
|
# Calculate selection rectangle
|
||||||
|
sel_x = min(x1, x2)
|
||||||
|
sel_y = min(y1, y2)
|
||||||
|
sel_w = abs(x2 - x1)
|
||||||
|
sel_h = abs(y2 - y1)
|
||||||
|
|
||||||
|
# Draw selection rectangle (yellow dashed line)
|
||||||
|
cv2.rectangle(canvas, (sel_x, sel_y), (sel_x + sel_w, sel_y + sel_h), (0, 255, 255), 2)
|
||||||
|
|
||||||
|
# Draw selection info
|
||||||
|
info_text = f"Region: {sel_w}x{sel_h}"
|
||||||
|
cv2.putText(canvas, info_text, (sel_x, sel_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
|
||||||
|
|
||||||
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
||||||
cv2.imshow(window_title, canvas)
|
cv2.imshow(window_title, canvas)
|
||||||
|
|
||||||
@@ -3663,6 +3832,20 @@ class VideoEditor:
|
|||||||
self.template_selection_start = None
|
self.template_selection_start = None
|
||||||
self.template_selection_rect = None
|
self.template_selection_rect = None
|
||||||
|
|
||||||
|
# Handle interesting point region selection
|
||||||
|
if self.selecting_interesting_region:
|
||||||
|
if event == cv2.EVENT_LBUTTONDOWN:
|
||||||
|
self.region_selection_start = (x, y)
|
||||||
|
self.region_selection_current = (x, y)
|
||||||
|
self.display_needs_update = True
|
||||||
|
elif event == cv2.EVENT_MOUSEMOVE and self.region_selection_start:
|
||||||
|
self.region_selection_current = (x, y)
|
||||||
|
self.display_needs_update = True
|
||||||
|
elif event == cv2.EVENT_LBUTTONUP and self.region_selection_start:
|
||||||
|
self.region_selection_current = (x, y)
|
||||||
|
self.toggle_interesting_region_selection()
|
||||||
|
self.display_needs_update = True
|
||||||
|
|
||||||
# Handle right-click for tracking points (no modifiers)
|
# Handle right-click for tracking points (no modifiers)
|
||||||
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
||||||
if not self.is_image_mode:
|
if not self.is_image_mode:
|
||||||
@@ -4582,6 +4765,19 @@ class VideoEditor:
|
|||||||
if self.is_playing and not self.is_image_mode:
|
if self.is_playing and not self.is_image_mode:
|
||||||
self.advance_frame()
|
self.advance_frame()
|
||||||
|
|
||||||
|
# Continue non-blocking search if active
|
||||||
|
if self.searching_interesting_point and self.search_state:
|
||||||
|
self.continue_interesting_point_search()
|
||||||
|
|
||||||
|
# Check if search completed or was cancelled
|
||||||
|
if self.search_state and self.search_state.get('completed', False):
|
||||||
|
# Clean up completed search
|
||||||
|
self.search_state = None
|
||||||
|
self.searching_interesting_point = False
|
||||||
|
self.search_progress_text = ""
|
||||||
|
self.show_feedback_message("Search completed - no interesting point found")
|
||||||
|
self.display_needs_update = True
|
||||||
|
|
||||||
# Key capture with appropriate delay
|
# Key capture with appropriate delay
|
||||||
key = cv2.waitKey(delay_ms) & 0xFF
|
key = cv2.waitKey(delay_ms) & 0xFF
|
||||||
|
|
||||||
@@ -4616,6 +4812,12 @@ class VideoEditor:
|
|||||||
|
|
||||||
if key == ord("q") or key == 27: # ESC
|
if key == ord("q") or key == 27: # ESC
|
||||||
self.stop_auto_repeat_seek()
|
self.stop_auto_repeat_seek()
|
||||||
|
# If search is active, cancel it first
|
||||||
|
if self.searching_interesting_point:
|
||||||
|
self.searching_interesting_point = False
|
||||||
|
self.search_progress_text = ""
|
||||||
|
print("Search cancelled")
|
||||||
|
self.show_feedback_message("Search cancelled")
|
||||||
self.save_state()
|
self.save_state()
|
||||||
break
|
break
|
||||||
elif key == ord("p"): # P - Toggle project view
|
elif key == ord("p"): # P - Toggle project view
|
||||||
@@ -4743,9 +4945,20 @@ class VideoEditor:
|
|||||||
if not self.is_image_mode and self.cut_end_frame is not None:
|
if not self.is_image_mode and self.cut_end_frame is not None:
|
||||||
self.seek_to_frame(self.cut_end_frame)
|
self.seek_to_frame(self.cut_end_frame)
|
||||||
print(f"Jumped to cut end marker at frame {self.cut_end_frame}")
|
print(f"Jumped to cut end marker at frame {self.cut_end_frame}")
|
||||||
elif key == ord(";"): # ; - Go to next interesting point
|
elif key == ord(";"): # ; - Go to next interesting point or cancel search
|
||||||
if not self.is_image_mode:
|
if not self.is_image_mode:
|
||||||
self.go_to_next_interesting_point()
|
if self.searching_interesting_point and self.search_state:
|
||||||
|
# Cancel ongoing search
|
||||||
|
self.search_state = None
|
||||||
|
self.searching_interesting_point = False
|
||||||
|
self.search_progress_text = ""
|
||||||
|
print("Search cancelled")
|
||||||
|
self.show_feedback_message("Search cancelled")
|
||||||
|
self.display_needs_update = True
|
||||||
|
else:
|
||||||
|
self.go_to_next_interesting_point()
|
||||||
|
elif key == ord("'"): # ' (apostrophe) - Toggle region selection mode
|
||||||
|
self.toggle_interesting_region_selection()
|
||||||
elif key == ord("0"): # 0 - Decrease frame difference threshold
|
elif key == ord("0"): # 0 - Decrease frame difference threshold
|
||||||
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 1.0)
|
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 1.0)
|
||||||
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
|
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
|
||||||
@@ -4759,7 +4972,7 @@ class VideoEditor:
|
|||||||
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
||||||
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
||||||
elif key == ord("8"): # 8 - Increase frame difference gap
|
elif key == ord("8"): # 8 - Increase frame difference gap
|
||||||
self.frame_difference_gap = min(100, self.frame_difference_gap + 1)
|
self.frame_difference_gap = self.frame_difference_gap + 1
|
||||||
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
||||||
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
||||||
elif key == ord("N"):
|
elif key == ord("N"):
|
||||||
|
Reference in New Issue
Block a user