feat(main.py): implement video sampling and watch tracking features
This commit is contained in:
169
main.py
169
main.py
@@ -79,6 +79,11 @@ class MediaGrader:
|
|||||||
# Undo functionality
|
# Undo functionality
|
||||||
self.undo_history = [] # List of (source_path, destination_path, original_index) tuples
|
self.undo_history = [] # List of (source_path, destination_path, original_index) tuples
|
||||||
|
|
||||||
|
# Watch tracking for "good look" feature
|
||||||
|
self.watched_regions = {} # Dict[file_path: List[Tuple[start_frame, end_frame]]]
|
||||||
|
self.current_watch_start = None # Frame where current viewing session started
|
||||||
|
self.last_frame_position = 0 # Track last known frame position
|
||||||
|
|
||||||
def find_media_files(self) -> List[Path]:
|
def find_media_files(self) -> List[Path]:
|
||||||
"""Find all media files recursively in the directory"""
|
"""Find all media files recursively in the directory"""
|
||||||
media_files = []
|
media_files = []
|
||||||
@@ -138,6 +143,10 @@ class MediaGrader:
|
|||||||
|
|
||||||
# Load initial frame
|
# Load initial frame
|
||||||
self.load_current_frame()
|
self.load_current_frame()
|
||||||
|
|
||||||
|
# Start watch tracking session for videos
|
||||||
|
self.start_watch_session()
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def load_current_frame(self):
|
def load_current_frame(self):
|
||||||
@@ -159,6 +168,158 @@ class MediaGrader:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def start_watch_session(self):
|
||||||
|
"""Start tracking a new viewing session"""
|
||||||
|
if self.is_video(self.media_files[self.current_index]):
|
||||||
|
self.current_watch_start = self.current_frame
|
||||||
|
self.last_frame_position = self.current_frame
|
||||||
|
|
||||||
|
def update_watch_tracking(self):
|
||||||
|
"""Update watch tracking based on current frame position"""
|
||||||
|
if not self.is_video(self.media_files[self.current_index]) or self.current_watch_start is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
current_file = str(self.media_files[self.current_index])
|
||||||
|
|
||||||
|
# If we've moved more than a few frames from last position, record the watched region
|
||||||
|
if abs(self.current_frame - self.last_frame_position) > 5 or \
|
||||||
|
abs(self.current_frame - self.current_watch_start) > 30:
|
||||||
|
|
||||||
|
# Record the region we just watched
|
||||||
|
start_frame = min(self.current_watch_start, self.last_frame_position)
|
||||||
|
end_frame = max(self.current_watch_start, self.last_frame_position)
|
||||||
|
|
||||||
|
if current_file not in self.watched_regions:
|
||||||
|
self.watched_regions[current_file] = []
|
||||||
|
|
||||||
|
# Merge with existing regions if they overlap
|
||||||
|
self.add_watched_region(current_file, start_frame, end_frame)
|
||||||
|
|
||||||
|
# Start new session from current position
|
||||||
|
self.current_watch_start = self.current_frame
|
||||||
|
|
||||||
|
self.last_frame_position = self.current_frame
|
||||||
|
|
||||||
|
def add_watched_region(self, file_path, start_frame, end_frame):
|
||||||
|
"""Add a watched region, merging with existing overlapping regions"""
|
||||||
|
if file_path not in self.watched_regions:
|
||||||
|
self.watched_regions[file_path] = []
|
||||||
|
|
||||||
|
regions = self.watched_regions[file_path]
|
||||||
|
new_region = [start_frame, end_frame]
|
||||||
|
|
||||||
|
# Merge overlapping regions
|
||||||
|
merged = []
|
||||||
|
for region in regions:
|
||||||
|
if new_region[1] < region[0] or new_region[0] > region[1]:
|
||||||
|
# No overlap
|
||||||
|
merged.append(region)
|
||||||
|
else:
|
||||||
|
# Overlap, merge
|
||||||
|
new_region[0] = min(new_region[0], region[0])
|
||||||
|
new_region[1] = max(new_region[1], region[1])
|
||||||
|
|
||||||
|
merged.append(tuple(new_region))
|
||||||
|
self.watched_regions[file_path] = merged
|
||||||
|
|
||||||
|
def find_largest_unwatched_region(self):
|
||||||
|
"""Find the largest unwatched region in the current video"""
|
||||||
|
if not self.is_video(self.media_files[self.current_index]):
|
||||||
|
return None
|
||||||
|
|
||||||
|
current_file = str(self.media_files[self.current_index])
|
||||||
|
watched = self.watched_regions.get(current_file, [])
|
||||||
|
|
||||||
|
if not watched:
|
||||||
|
# No regions watched yet, return the beginning
|
||||||
|
return (0, self.total_frames // 4)
|
||||||
|
|
||||||
|
# Sort watched regions by start frame
|
||||||
|
watched.sort(key=lambda x: x[0])
|
||||||
|
|
||||||
|
# Find gaps between watched regions
|
||||||
|
gaps = []
|
||||||
|
|
||||||
|
# Gap before first watched region
|
||||||
|
if watched[0][0] > 0:
|
||||||
|
gaps.append((0, watched[0][0]))
|
||||||
|
|
||||||
|
# Gaps between watched regions
|
||||||
|
for i in range(len(watched) - 1):
|
||||||
|
gap_start = watched[i][1]
|
||||||
|
gap_end = watched[i + 1][0]
|
||||||
|
if gap_end > gap_start:
|
||||||
|
gaps.append((gap_start, gap_end))
|
||||||
|
|
||||||
|
# Gap after last watched region
|
||||||
|
if watched[-1][1] < self.total_frames:
|
||||||
|
gaps.append((watched[-1][1], self.total_frames))
|
||||||
|
|
||||||
|
if not gaps:
|
||||||
|
# Everything watched, return None
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Return the largest gap
|
||||||
|
largest_gap = max(gaps, key=lambda x: x[1] - x[0])
|
||||||
|
return largest_gap
|
||||||
|
|
||||||
|
def jump_to_unwatched_region(self):
|
||||||
|
"""Jump to the next unwatched region of the video"""
|
||||||
|
if not self.is_video(self.media_files[self.current_index]):
|
||||||
|
return False
|
||||||
|
|
||||||
|
current_file = str(self.media_files[self.current_index])
|
||||||
|
|
||||||
|
# Get or initialize jump counter for this file
|
||||||
|
if not hasattr(self, 'jump_counters'):
|
||||||
|
self.jump_counters = {}
|
||||||
|
|
||||||
|
if current_file not in self.jump_counters:
|
||||||
|
self.jump_counters[current_file] = 0
|
||||||
|
|
||||||
|
# Define sampling strategy: divide video into segments and sample them
|
||||||
|
segments = 8 # Divide video into 8 segments for sampling
|
||||||
|
segment_size = self.total_frames // segments
|
||||||
|
|
||||||
|
if segment_size == 0:
|
||||||
|
print("Video too short for sampling")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Jump to different segments in a smart order
|
||||||
|
# Start with 1/4, 1/2, 3/4, then fill in the gaps
|
||||||
|
sample_points = [
|
||||||
|
segment_size * 2, # 1/4 through
|
||||||
|
segment_size * 4, # 1/2 through
|
||||||
|
segment_size * 6, # 3/4 through
|
||||||
|
segment_size * 1, # 1/8 through
|
||||||
|
segment_size * 3, # 3/8 through
|
||||||
|
segment_size * 5, # 5/8 through
|
||||||
|
segment_size * 7, # 7/8 through
|
||||||
|
0 # Beginning
|
||||||
|
]
|
||||||
|
|
||||||
|
current_jump = self.jump_counters[current_file]
|
||||||
|
|
||||||
|
if current_jump >= len(sample_points):
|
||||||
|
print("All sample points visited! Video fully sampled.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
target_frame = sample_points[current_jump]
|
||||||
|
target_frame = min(target_frame, self.total_frames - 1)
|
||||||
|
|
||||||
|
# Jump to the target frame
|
||||||
|
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
||||||
|
self.load_current_frame()
|
||||||
|
|
||||||
|
# Increment jump counter
|
||||||
|
self.jump_counters[current_file] += 1
|
||||||
|
|
||||||
|
# Calculate percentage through video
|
||||||
|
percentage = (target_frame / self.total_frames) * 100
|
||||||
|
|
||||||
|
print(f"Sample {current_jump + 1}/{len(sample_points)}: jumped to frame {target_frame} ({percentage:.1f}% through video)")
|
||||||
|
return True
|
||||||
|
|
||||||
def display_current_frame(self):
|
def display_current_frame(self):
|
||||||
"""Display the current cached frame with overlays"""
|
"""Display the current cached frame with overlays"""
|
||||||
if self.current_display_frame is None:
|
if self.current_display_frame is None:
|
||||||
@@ -282,6 +443,10 @@ class MediaGrader:
|
|||||||
|
|
||||||
self.current_display_frame = frame
|
self.current_display_frame = frame
|
||||||
self.current_frame = int(self.current_cap.get(cv2.CAP_PROP_POS_FRAMES))
|
self.current_frame = int(self.current_cap.get(cv2.CAP_PROP_POS_FRAMES))
|
||||||
|
|
||||||
|
# Update watch tracking
|
||||||
|
self.update_watch_tracking()
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def seek_video(self, frames_delta: int):
|
def seek_video(self, frames_delta: int):
|
||||||
@@ -458,6 +623,7 @@ class MediaGrader:
|
|||||||
print(" N: Next file")
|
print(" N: Next file")
|
||||||
print(" P: Previous file")
|
print(" P: Previous file")
|
||||||
print(" U: Undo last grading action")
|
print(" U: Undo last grading action")
|
||||||
|
print(" L: Sample video at key points (videos only)")
|
||||||
print(" Q/ESC: Quit")
|
print(" Q/ESC: Quit")
|
||||||
|
|
||||||
cv2.namedWindow("Media Grader", cv2.WINDOW_NORMAL)
|
cv2.namedWindow("Media Grader", cv2.WINDOW_NORMAL)
|
||||||
@@ -512,6 +678,9 @@ class MediaGrader:
|
|||||||
if self.undo_last_action():
|
if self.undo_last_action():
|
||||||
# File was restored, reload it
|
# File was restored, reload it
|
||||||
break
|
break
|
||||||
|
elif key == ord("l"):
|
||||||
|
# Jump to largest unwatched region
|
||||||
|
self.jump_to_unwatched_region()
|
||||||
elif key in [ord("1"), ord("2"), ord("3"), ord("4"), ord("5")]:
|
elif key in [ord("1"), ord("2"), ord("3"), ord("4"), ord("5")]:
|
||||||
grade = int(chr(key))
|
grade = int(chr(key))
|
||||||
if not self.grade_media(grade):
|
if not self.grade_media(grade):
|
||||||
|
Reference in New Issue
Block a user