Remove a bunch of cringe from grader
This commit is contained in:
196
main.py
196
main.py
@@ -295,47 +295,6 @@ class MediaGrader:
|
||||
merged.append(tuple(new_region))
|
||||
self.watched_regions[file_path] = merged
|
||||
|
||||
def find_largest_unwatched_region(self):
|
||||
"""Find the largest unwatched region in the current video"""
|
||||
if not self.is_video(self.media_files[self.current_index]):
|
||||
return None
|
||||
|
||||
current_file = str(self.media_files[self.current_index])
|
||||
watched = self.watched_regions.get(current_file, [])
|
||||
|
||||
if not watched:
|
||||
# No regions watched yet, return the beginning
|
||||
return (0, self.total_frames // 4)
|
||||
|
||||
# Sort watched regions by start frame
|
||||
watched.sort(key=lambda x: x[0])
|
||||
|
||||
# Find gaps between watched regions
|
||||
gaps = []
|
||||
|
||||
# Gap before first watched region
|
||||
if watched[0][0] > 0:
|
||||
gaps.append((0, watched[0][0]))
|
||||
|
||||
# Gaps between watched regions
|
||||
for i in range(len(watched) - 1):
|
||||
gap_start = watched[i][1]
|
||||
gap_end = watched[i + 1][0]
|
||||
if gap_end > gap_start:
|
||||
gaps.append((gap_start, gap_end))
|
||||
|
||||
# Gap after last watched region
|
||||
if watched[-1][1] < self.total_frames:
|
||||
gaps.append((watched[-1][1], self.total_frames))
|
||||
|
||||
if not gaps:
|
||||
# Everything watched, return None
|
||||
return None
|
||||
|
||||
# Return the largest gap
|
||||
largest_gap = max(gaps, key=lambda x: x[1] - x[0])
|
||||
return largest_gap
|
||||
|
||||
def get_sample_points(self):
|
||||
"""Get standardized sample points for video navigation"""
|
||||
segments = 8 # Divide video into 8 segments for sampling
|
||||
@@ -489,36 +448,6 @@ class MediaGrader:
|
||||
print(f"Bisected forwards to frame {midpoint} ({percentage:.1f}% through video)")
|
||||
return True
|
||||
|
||||
def undo_jump(self):
|
||||
"""Undo the last L jump by returning to previous position"""
|
||||
if not self.is_video(self.media_files[self.current_index]):
|
||||
return False
|
||||
|
||||
current_file = str(self.media_files[self.current_index])
|
||||
|
||||
if current_file not in self.jump_history or not self.jump_history[current_file]:
|
||||
print("No jump history to undo. Use L first to establish jump points.")
|
||||
return False
|
||||
|
||||
# Get the last position before the most recent jump
|
||||
if len(self.jump_history[current_file]) < 1:
|
||||
print("No previous position to return to.")
|
||||
return False
|
||||
|
||||
# Remove the current position from history and get the previous one
|
||||
previous_position = self.jump_history[current_file].pop()
|
||||
|
||||
# Jump back to previous position
|
||||
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, previous_position)
|
||||
self.load_current_frame()
|
||||
|
||||
# Update last jump position for bisection reference
|
||||
self.last_jump_position[current_file] = previous_position
|
||||
|
||||
percentage = (previous_position / self.total_frames) * 100
|
||||
print(f"Undid jump: returned to frame {previous_position} ({percentage:.1f}% through video)")
|
||||
return True
|
||||
|
||||
def toggle_multi_segment_mode(self):
|
||||
"""Toggle between single and multi-segment video mode"""
|
||||
if not self.is_video(self.media_files[self.current_index]):
|
||||
@@ -544,125 +473,12 @@ class MediaGrader:
|
||||
print(f"Timeline {'visible' if self.timeline_visible else 'hidden'}")
|
||||
return True
|
||||
|
||||
def load_segment_frame_fast(self, segment_index, start_frame, shared_cap):
|
||||
"""Load a single segment frame using a shared capture (much faster)"""
|
||||
segment_start_time = time.time()
|
||||
try:
|
||||
# Time the seek operation
|
||||
seek_start = time.time()
|
||||
shared_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
||||
seek_time = (time.time() - seek_start) * 1000
|
||||
|
||||
# Time the frame read
|
||||
read_start = time.time()
|
||||
ret, frame = shared_cap.read()
|
||||
read_time = (time.time() - read_start) * 1000
|
||||
|
||||
total_time = (time.time() - segment_start_time) * 1000
|
||||
print(f"Segment {segment_index}: Total={total_time:.1f}ms (Seek={seek_time:.1f}ms, Read={read_time:.1f}ms)")
|
||||
|
||||
if ret:
|
||||
return segment_index, frame.copy(), start_frame # Copy frame since we'll reuse the capture
|
||||
else:
|
||||
return segment_index, None, start_frame
|
||||
except Exception as e:
|
||||
error_time = (time.time() - segment_start_time) * 1000
|
||||
print(f"Segment {segment_index}: ERROR in {error_time:.1f}ms: {e}")
|
||||
return segment_index, None, start_frame
|
||||
|
||||
def setup_segment_captures_blazing_fast(self):
|
||||
"""BLAZING FAST: Sample frames at intervals without any seeking (10-50ms total)"""
|
||||
def setup_segment_captures(self):
|
||||
if not self.is_video(self.media_files[self.current_index]):
|
||||
return
|
||||
|
||||
start_time = time.time()
|
||||
print(f"Setting up {self.segment_count} segments with BLAZING FAST method...")
|
||||
|
||||
# Clean up existing segment captures
|
||||
self.cleanup_segment_captures()
|
||||
|
||||
current_file = self.media_files[self.current_index]
|
||||
|
||||
# Initialize arrays
|
||||
self.segment_caps = [None] * self.segment_count
|
||||
self.segment_frames = [None] * self.segment_count
|
||||
self.segment_positions = [0] * self.segment_count # We'll update these as we sample
|
||||
|
||||
# BLAZING FAST METHOD: Sample frames at even intervals without seeking
|
||||
load_start = time.time()
|
||||
print("Sampling frames at regular intervals (NO SEEKING)...")
|
||||
|
||||
shared_cap_start = time.time()
|
||||
shared_cap = cv2.VideoCapture(str(current_file))
|
||||
shared_cap_create_time = (time.time() - shared_cap_start) * 1000
|
||||
print(f"Capture creation: {shared_cap_create_time:.1f}ms")
|
||||
|
||||
if shared_cap.isOpened():
|
||||
frames_start = time.time()
|
||||
|
||||
# Calculate sampling interval
|
||||
sample_interval = max(1, self.total_frames // (self.segment_count * 2)) # Sample more frequently than needed
|
||||
print(f"Sampling every {sample_interval} frames from {self.total_frames} total frames")
|
||||
|
||||
current_frame = 0
|
||||
segment_index = 0
|
||||
segments_filled = 0
|
||||
|
||||
sample_start = time.time()
|
||||
|
||||
while segments_filled < self.segment_count:
|
||||
ret, frame = shared_cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
# Check if this frame should be used for a segment
|
||||
if segment_index < self.segment_count:
|
||||
target_frame_for_segment = int((segment_index / max(1, self.segment_count - 1)) * (self.total_frames - 1))
|
||||
|
||||
# If we're close enough to the target frame, use this frame
|
||||
if abs(current_frame - target_frame_for_segment) <= sample_interval:
|
||||
self.segment_frames[segment_index] = frame.copy()
|
||||
self.segment_positions[segment_index] = current_frame
|
||||
|
||||
print(f"Segment {segment_index}: Frame {current_frame} (target was {target_frame_for_segment})")
|
||||
segment_index += 1
|
||||
segments_filled += 1
|
||||
|
||||
current_frame += 1
|
||||
|
||||
# Skip frames to speed up sampling if we have many frames
|
||||
if sample_interval > 1:
|
||||
for _ in range(sample_interval - 1):
|
||||
ret, _ = shared_cap.read()
|
||||
if not ret:
|
||||
break
|
||||
current_frame += 1
|
||||
if not ret:
|
||||
break
|
||||
|
||||
sample_time = (time.time() - sample_start) * 1000
|
||||
frames_time = (time.time() - frames_start) * 1000
|
||||
print(f"Frame sampling: {sample_time:.1f}ms for {segments_filled} segments")
|
||||
print(f"Total frame loading: {frames_time:.1f}ms")
|
||||
|
||||
shared_cap.release()
|
||||
else:
|
||||
print("Failed to create shared capture!")
|
||||
|
||||
total_time = time.time() - start_time
|
||||
print(f"BLAZING FAST Total setup time: {total_time * 1000:.1f}ms")
|
||||
|
||||
# Report success
|
||||
successful_segments = sum(1 for frame in self.segment_frames if frame is not None)
|
||||
print(f"Successfully sampled {successful_segments}/{self.segment_count} segments")
|
||||
|
||||
def setup_segment_captures_lightning_fast(self):
|
||||
"""LIGHTNING FAST: Use intelligent skipping to get segments in minimal time"""
|
||||
if not self.is_video(self.media_files[self.current_index]):
|
||||
return
|
||||
|
||||
start_time = time.time()
|
||||
print(f"Setting up {self.segment_count} segments with LIGHTNING FAST method...")
|
||||
print(f"Setting up {self.segment_count} segments...")
|
||||
|
||||
# Clean up existing segment captures
|
||||
self.cleanup_segment_captures()
|
||||
@@ -680,9 +496,7 @@ class MediaGrader:
|
||||
start_frame = int(position_ratio * (self.total_frames - 1))
|
||||
self.segment_positions.append(start_frame)
|
||||
|
||||
# LIGHTNING FAST: Smart skipping strategy
|
||||
load_start = time.time()
|
||||
print("Using SMART SKIPPING strategy...")
|
||||
|
||||
shared_cap_start = time.time()
|
||||
shared_cap = cv2.VideoCapture(str(current_file))
|
||||
@@ -742,16 +556,12 @@ class MediaGrader:
|
||||
print("Failed to create shared capture!")
|
||||
|
||||
total_time = time.time() - start_time
|
||||
print(f"LIGHTNING FAST Total setup time: {total_time * 1000:.1f}ms")
|
||||
print(f"Total setup time: {total_time * 1000:.1f}ms")
|
||||
|
||||
# Report success
|
||||
successful_segments = sum(1 for frame in self.segment_frames if frame is not None)
|
||||
print(f"Successfully approximated {successful_segments}/{self.segment_count} segments")
|
||||
|
||||
def setup_segment_captures(self):
|
||||
"""Use the lightning fast approximation method for maximum speed"""
|
||||
self.setup_segment_captures_lightning_fast()
|
||||
|
||||
def cleanup_segment_captures(self):
|
||||
"""Clean up all segment video captures"""
|
||||
for cap in self.segment_caps:
|
||||
|
Reference in New Issue
Block a user