2127 lines
		
	
	
		
			86 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			2127 lines
		
	
	
		
			86 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
import os
 | 
						|
import sys
 | 
						|
import cv2
 | 
						|
import argparse
 | 
						|
import numpy as np
 | 
						|
from pathlib import Path
 | 
						|
from typing import List
 | 
						|
import time
 | 
						|
import re
 | 
						|
import json
 | 
						|
import threading
 | 
						|
import queue
 | 
						|
 | 
						|
class VideoEditor:
 | 
						|
    # Configuration constants
 | 
						|
    BASE_FRAME_DELAY_MS = 16  # ~60 FPS
 | 
						|
    SPEED_INCREMENT = 0.2
 | 
						|
    MIN_PLAYBACK_SPEED = 0.1
 | 
						|
    MAX_PLAYBACK_SPEED = 10.0
 | 
						|
    
 | 
						|
    # Seek multiplier configuration
 | 
						|
    SEEK_MULTIPLIER_INCREMENT = 2.0
 | 
						|
    MIN_SEEK_MULTIPLIER = 1.0
 | 
						|
    MAX_SEEK_MULTIPLIER = 100.0
 | 
						|
    
 | 
						|
    # Auto-repeat seeking configuration
 | 
						|
    AUTO_REPEAT_DISPLAY_RATE = 1.0
 | 
						|
 | 
						|
    # Timeline configuration
 | 
						|
    TIMELINE_HEIGHT = 60
 | 
						|
    TIMELINE_MARGIN = 20
 | 
						|
    TIMELINE_BAR_HEIGHT = 12
 | 
						|
    TIMELINE_HANDLE_SIZE = 12
 | 
						|
    TIMELINE_COLOR_BG = (80, 80, 80)
 | 
						|
    TIMELINE_COLOR_PROGRESS = (0, 120, 255)
 | 
						|
    TIMELINE_COLOR_HANDLE = (255, 255, 255)
 | 
						|
    TIMELINE_COLOR_BORDER = (200, 200, 200)
 | 
						|
    TIMELINE_COLOR_CUT_POINT = (255, 0, 0)
 | 
						|
 | 
						|
    # Progress bar configuration
 | 
						|
    PROGRESS_BAR_HEIGHT = 30
 | 
						|
    PROGRESS_BAR_MARGIN_PERCENT = 5  # 5% margin on each side
 | 
						|
    PROGRESS_BAR_TOP_MARGIN = 20  # Fixed top margin
 | 
						|
    PROGRESS_BAR_FADE_DURATION = 3.0  # seconds to fade out after completion
 | 
						|
    PROGRESS_BAR_COLOR_BG = (50, 50, 50)
 | 
						|
    PROGRESS_BAR_COLOR_FILL = (0, 255, 0)  # Green when complete
 | 
						|
    PROGRESS_BAR_COLOR_PROGRESS = (0, 120, 255)  # Blue during progress
 | 
						|
    PROGRESS_BAR_COLOR_BORDER = (200, 200, 200)
 | 
						|
 | 
						|
    # Zoom and crop settings
 | 
						|
    MIN_ZOOM = 0.1
 | 
						|
    MAX_ZOOM = 10.0
 | 
						|
    ZOOM_INCREMENT = 0.1
 | 
						|
 | 
						|
    # Supported video extensions
 | 
						|
    VIDEO_EXTENSIONS = {".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"}
 | 
						|
    
 | 
						|
    # Supported image extensions
 | 
						|
    IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp", ".jp2", ".pbm", ".pgm", ".ppm", ".sr", ".ras"}
 | 
						|
    
 | 
						|
    # Crop adjustment settings
 | 
						|
    CROP_SIZE_STEP = 15  # pixels to expand/contract crop
 | 
						|
 | 
						|
    def __init__(self, path: str):
 | 
						|
        self.path = Path(path)
 | 
						|
 | 
						|
        # Video file management
 | 
						|
        self.video_files = []
 | 
						|
        self.current_video_index = 0
 | 
						|
        
 | 
						|
        # Media type tracking
 | 
						|
        self.is_image_mode = False  # True if current file is an image
 | 
						|
 | 
						|
        # Determine if path is file or directory
 | 
						|
        if self.path.is_file():
 | 
						|
            self.video_files = [self.path]
 | 
						|
        elif self.path.is_dir():
 | 
						|
            # Load all media files from directory
 | 
						|
            self.video_files = self._get_media_files_from_directory(self.path)
 | 
						|
            if not self.video_files:
 | 
						|
                raise ValueError(f"No media files found in directory: {path}")
 | 
						|
        else:
 | 
						|
            raise ValueError(f"Path does not exist: {path}")
 | 
						|
 | 
						|
        # Initialize with first video
 | 
						|
        self._load_video(self.video_files[0])
 | 
						|
 | 
						|
        # Mouse and keyboard interaction
 | 
						|
        self.mouse_dragging = False
 | 
						|
        self.timeline_rect = None
 | 
						|
        self.window_width = 1200
 | 
						|
        self.window_height = 800
 | 
						|
 | 
						|
        # Auto-repeat seeking state
 | 
						|
        self.auto_repeat_active = False
 | 
						|
        self.auto_repeat_direction = 0
 | 
						|
        self.auto_repeat_shift = False
 | 
						|
        self.auto_repeat_ctrl = False
 | 
						|
        self.last_display_update = 0
 | 
						|
 | 
						|
        # Crop settings
 | 
						|
        self.crop_rect = None  # (x, y, width, height)
 | 
						|
        self.crop_selecting = False
 | 
						|
        self.crop_start_point = None
 | 
						|
        self.crop_preview_rect = None
 | 
						|
        self.crop_history = []  # For undo
 | 
						|
 | 
						|
        # Zoom settings
 | 
						|
        self.zoom_factor = 1.0
 | 
						|
        self.zoom_center = None  # (x, y) center point for zoom
 | 
						|
 | 
						|
        # Rotation settings
 | 
						|
        self.rotation_angle = 0  # 0, 90, 180, 270 degrees
 | 
						|
 | 
						|
        # Brightness and contrast settings
 | 
						|
        self.brightness = 0  # -100 to 100
 | 
						|
        self.contrast = 1.0  # 0.1 to 3.0
 | 
						|
 | 
						|
        # Marker looping state
 | 
						|
        self.looping_between_markers = False
 | 
						|
 | 
						|
        # Display offset for panning when zoomed
 | 
						|
        self.display_offset = [0, 0]
 | 
						|
 | 
						|
        # Progress bar state
 | 
						|
        self.progress_bar_visible = False
 | 
						|
        self.progress_bar_progress = 0.0  # 0.0 to 1.0
 | 
						|
        self.progress_bar_complete = False
 | 
						|
        self.progress_bar_complete_time = None
 | 
						|
        self.progress_bar_text = ""
 | 
						|
        self.progress_bar_fps = 0.0  # Current rendering FPS
 | 
						|
        
 | 
						|
        # Feedback message state
 | 
						|
        self.feedback_message = ""
 | 
						|
        self.feedback_message_time = None
 | 
						|
        self.feedback_message_duration = 0.5  # seconds to show message
 | 
						|
        
 | 
						|
        # Crop adjustment settings
 | 
						|
        self.crop_size_step = self.CROP_SIZE_STEP
 | 
						|
        
 | 
						|
        # Render thread management
 | 
						|
        self.render_thread = None
 | 
						|
        self.render_cancelled = False
 | 
						|
        self.render_progress_queue = queue.Queue()
 | 
						|
 | 
						|
    def _get_state_file_path(self) -> Path:
 | 
						|
        """Get the state file path for the current media file"""
 | 
						|
        if not hasattr(self, 'video_path') or not self.video_path:
 | 
						|
            print("DEBUG: No video_path available for state file")
 | 
						|
            return None
 | 
						|
        state_path = self.video_path.with_suffix('.json')
 | 
						|
        print(f"DEBUG: State file path would be: {state_path}")
 | 
						|
        return state_path
 | 
						|
 | 
						|
    def save_state(self):
 | 
						|
        """Save current editor state to JSON file"""
 | 
						|
        state_file = self._get_state_file_path()
 | 
						|
        if not state_file:
 | 
						|
            print("No state file path available")
 | 
						|
            return False
 | 
						|
 | 
						|
        try:
 | 
						|
            state = {
 | 
						|
                'timestamp': time.time(),
 | 
						|
                'current_frame': getattr(self, 'current_frame', 0),
 | 
						|
                'crop_rect': self.crop_rect,
 | 
						|
                'zoom_factor': self.zoom_factor,
 | 
						|
                'zoom_center': self.zoom_center,
 | 
						|
                'rotation_angle': self.rotation_angle,
 | 
						|
                'brightness': self.brightness,
 | 
						|
                'contrast': self.contrast,
 | 
						|
                'cut_start_frame': self.cut_start_frame,
 | 
						|
                'cut_end_frame': self.cut_end_frame,
 | 
						|
                'looping_between_markers': self.looping_between_markers,
 | 
						|
                'display_offset': self.display_offset,
 | 
						|
                'playback_speed': getattr(self, 'playback_speed', 1.0),
 | 
						|
                'seek_multiplier': getattr(self, 'seek_multiplier', 1.0),
 | 
						|
                'is_playing': getattr(self, 'is_playing', False)
 | 
						|
            }
 | 
						|
            
 | 
						|
            with open(state_file, 'w') as f:
 | 
						|
                json.dump(state, f, indent=2)
 | 
						|
            print(f"State saved to {state_file}")
 | 
						|
            return True
 | 
						|
        except Exception as e:
 | 
						|
            print(f"Error saving state: {e}")
 | 
						|
            return False
 | 
						|
 | 
						|
    def load_state(self) -> bool:
 | 
						|
        """Load editor state from JSON file"""
 | 
						|
        state_file = self._get_state_file_path()
 | 
						|
        if not state_file:
 | 
						|
            print("No state file path available")
 | 
						|
            return False
 | 
						|
        if not state_file.exists():
 | 
						|
            print(f"State file does not exist: {state_file}")
 | 
						|
            return False
 | 
						|
 | 
						|
        print(f"Loading state from: {state_file}")
 | 
						|
        try:
 | 
						|
            with open(state_file, 'r') as f:
 | 
						|
                state = json.load(f)
 | 
						|
            
 | 
						|
            print(f"State file contents: {state}")
 | 
						|
            
 | 
						|
            # Restore state values
 | 
						|
            if 'current_frame' in state:
 | 
						|
                self.current_frame = state['current_frame']
 | 
						|
            if 'crop_rect' in state and state['crop_rect']:
 | 
						|
                self.crop_rect = tuple(state['crop_rect'])
 | 
						|
            if 'zoom_factor' in state:
 | 
						|
                self.zoom_factor = state['zoom_factor']
 | 
						|
            if 'zoom_center' in state and state['zoom_center']:
 | 
						|
                self.zoom_center = tuple(state['zoom_center'])
 | 
						|
            if 'rotation_angle' in state:
 | 
						|
                self.rotation_angle = state['rotation_angle']
 | 
						|
            if 'brightness' in state:
 | 
						|
                self.brightness = state['brightness']
 | 
						|
            if 'contrast' in state:
 | 
						|
                self.contrast = state['contrast']
 | 
						|
            if 'cut_start_frame' in state:
 | 
						|
                self.cut_start_frame = state['cut_start_frame']
 | 
						|
                print(f"Restored cut_start_frame: {self.cut_start_frame}")
 | 
						|
            if 'cut_end_frame' in state:
 | 
						|
                self.cut_end_frame = state['cut_end_frame']
 | 
						|
                print(f"Restored cut_end_frame: {self.cut_end_frame}")
 | 
						|
            
 | 
						|
            # Calculate and show marker positions on timeline
 | 
						|
            if self.cut_start_frame is not None and self.cut_end_frame is not None:
 | 
						|
                start_progress = self.cut_start_frame / max(1, self.total_frames - 1)
 | 
						|
                end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
 | 
						|
                print(f"Markers will be drawn at: Start {start_progress:.4f} ({self.cut_start_frame}/{self.total_frames}), End {end_progress:.4f} ({self.cut_end_frame}/{self.total_frames})")
 | 
						|
            if 'looping_between_markers' in state:
 | 
						|
                self.looping_between_markers = state['looping_between_markers']
 | 
						|
            if 'display_offset' in state:
 | 
						|
                self.display_offset = state['display_offset']
 | 
						|
            if 'playback_speed' in state:
 | 
						|
                self.playback_speed = state['playback_speed']
 | 
						|
            if 'seek_multiplier' in state:
 | 
						|
                self.seek_multiplier = state['seek_multiplier']
 | 
						|
            if 'is_playing' in state:
 | 
						|
                self.is_playing = state['is_playing']
 | 
						|
            
 | 
						|
            # Validate and clamp values
 | 
						|
            self.current_frame = max(0, min(self.current_frame, getattr(self, 'total_frames', 1) - 1))
 | 
						|
            self.zoom_factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, self.zoom_factor))
 | 
						|
            self.brightness = max(-100, min(100, self.brightness))
 | 
						|
            self.contrast = max(0.1, min(3.0, self.contrast))
 | 
						|
            self.playback_speed = max(self.MIN_PLAYBACK_SPEED, min(self.MAX_PLAYBACK_SPEED, self.playback_speed))
 | 
						|
            self.seek_multiplier = max(self.MIN_SEEK_MULTIPLIER, min(self.MAX_SEEK_MULTIPLIER, self.seek_multiplier))
 | 
						|
            
 | 
						|
            return True
 | 
						|
        except Exception as e:
 | 
						|
            print(f"Error loading state: {e}")
 | 
						|
            return False
 | 
						|
 | 
						|
    def _is_video_file(self, file_path: Path) -> bool:
 | 
						|
        """Check if file is a supported video format"""
 | 
						|
        return file_path.suffix.lower() in self.VIDEO_EXTENSIONS
 | 
						|
    
 | 
						|
    def _is_image_file(self, file_path: Path) -> bool:
 | 
						|
        """Check if file is a supported image format"""
 | 
						|
        return file_path.suffix.lower() in self.IMAGE_EXTENSIONS
 | 
						|
    
 | 
						|
    def _is_media_file(self, file_path: Path) -> bool:
 | 
						|
        """Check if file is a supported media format (video or image)"""
 | 
						|
        return self._is_video_file(file_path) or self._is_image_file(file_path)
 | 
						|
 | 
						|
 | 
						|
    def _get_next_screenshot_filename(self, video_path: Path) -> str:
 | 
						|
        """Generate the next available screenshot filename: video_frame_00001.jpg, video_frame_00002.jpg, etc."""
 | 
						|
        directory = video_path.parent
 | 
						|
        base_name = video_path.stem
 | 
						|
        
 | 
						|
        # Pattern to match existing screenshot files: video_frame_00001.jpg, video_frame_00002.jpg, etc.
 | 
						|
        pattern = re.compile(rf"^{re.escape(base_name)}_frame_(\d{{5}})\.(jpg|jpeg|png)$")
 | 
						|
        
 | 
						|
        existing_numbers = set()
 | 
						|
        for file_path in directory.iterdir():
 | 
						|
            if file_path.is_file():
 | 
						|
                match = pattern.match(file_path.name)
 | 
						|
                if match:
 | 
						|
                    existing_numbers.add(int(match.group(1)))
 | 
						|
        
 | 
						|
        # Find the next available number starting from 1
 | 
						|
        next_number = 1
 | 
						|
        while next_number in existing_numbers:
 | 
						|
            next_number += 1
 | 
						|
        
 | 
						|
        return f"{base_name}_frame_{next_number:05d}.jpg"
 | 
						|
 | 
						|
    def save_current_frame(self):
 | 
						|
        """Save the current frame as a screenshot"""
 | 
						|
        if self.current_display_frame is None:
 | 
						|
            print("No frame to save")
 | 
						|
            return False
 | 
						|
        
 | 
						|
        # Generate the next available screenshot filename
 | 
						|
        screenshot_name = self._get_next_screenshot_filename(self.video_path)
 | 
						|
        screenshot_path = self.video_path.parent / screenshot_name
 | 
						|
        
 | 
						|
        # Apply current transformations (crop, zoom, rotation, brightness/contrast) to the frame
 | 
						|
        processed_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame.copy())
 | 
						|
        
 | 
						|
        if processed_frame is not None:
 | 
						|
            # Save the processed frame
 | 
						|
            success = cv2.imwrite(str(screenshot_path), processed_frame)
 | 
						|
            if success:
 | 
						|
                print(f"Screenshot saved: {screenshot_name}")
 | 
						|
                self.show_feedback_message(f"Screenshot saved: {screenshot_name}")
 | 
						|
                return True
 | 
						|
            else:
 | 
						|
                print(f"Error: Could not save screenshot to {screenshot_path}")
 | 
						|
                self.show_feedback_message("Error: Could not save screenshot")
 | 
						|
                return False
 | 
						|
        else:
 | 
						|
            print("Error: Could not process frame for screenshot")
 | 
						|
            self.show_feedback_message("Error: Could not process frame")
 | 
						|
            return False
 | 
						|
 | 
						|
    def _get_media_files_from_directory(self, directory: Path) -> List[Path]:
 | 
						|
        """Get all media files (video and image) from a directory, sorted by name"""
 | 
						|
        media_files = set()
 | 
						|
        for file_path in directory.iterdir():
 | 
						|
            if (
 | 
						|
                file_path.is_file()
 | 
						|
                and self._is_media_file(file_path)
 | 
						|
            ):
 | 
						|
                media_files.add(file_path)
 | 
						|
 | 
						|
        # Pattern to match edited files: basename_edited_001.ext, basename_edited_002.ext, etc.
 | 
						|
        edited_pattern = re.compile(r"^(.+)_edited_\d{3}$")
 | 
						|
        
 | 
						|
        edited_base_names = set()
 | 
						|
        for file_path in media_files:
 | 
						|
            match = edited_pattern.match(file_path.stem)
 | 
						|
            if match:
 | 
						|
                edited_base_names.add(match.group(1))
 | 
						|
 | 
						|
        non_edited_media = set()
 | 
						|
        for file_path in media_files:
 | 
						|
            # Skip if this is an edited file
 | 
						|
            if edited_pattern.match(file_path.stem):
 | 
						|
                continue
 | 
						|
            
 | 
						|
            # Skip if there's already an edited version of this file
 | 
						|
            if file_path.stem in edited_base_names:
 | 
						|
                continue
 | 
						|
                
 | 
						|
            non_edited_media.add(file_path)
 | 
						|
 | 
						|
        return sorted(non_edited_media)
 | 
						|
    def _load_video(self, media_path: Path):
 | 
						|
        """Load a media file (video or image) and initialize properties"""
 | 
						|
        if hasattr(self, "cap") and self.cap:
 | 
						|
            self.cap.release()
 | 
						|
 | 
						|
        self.video_path = media_path
 | 
						|
        self.is_image_mode = self._is_image_file(media_path)
 | 
						|
        
 | 
						|
        if self.is_image_mode:
 | 
						|
            # Load static image
 | 
						|
            self.static_image = cv2.imread(str(media_path))
 | 
						|
            if self.static_image is None:
 | 
						|
                raise ValueError(f"Could not load image file: {media_path}")
 | 
						|
            
 | 
						|
            # Set up image properties to mimic video interface
 | 
						|
            self.frame_height, self.frame_width = self.static_image.shape[:2]
 | 
						|
            self.total_frames = 1
 | 
						|
            self.fps = 30  # Dummy FPS for image mode
 | 
						|
            self.cap = None
 | 
						|
            
 | 
						|
            print(f"Loaded image: {self.video_path.name}")
 | 
						|
            print(f"  Resolution: {self.frame_width}x{self.frame_height}")
 | 
						|
        else:
 | 
						|
            # Try different backends for better performance
 | 
						|
            # Order of preference: FFmpeg (best for video files), DirectShow (cameras), any available
 | 
						|
            backends_to_try = []
 | 
						|
            if hasattr(cv2, 'CAP_FFMPEG'):  # FFmpeg - best for video files
 | 
						|
                backends_to_try.append(cv2.CAP_FFMPEG)
 | 
						|
            if hasattr(cv2, 'CAP_DSHOW'):  # DirectShow - usually for cameras
 | 
						|
                backends_to_try.append(cv2.CAP_DSHOW)
 | 
						|
            backends_to_try.append(cv2.CAP_ANY)  # Fallback
 | 
						|
            
 | 
						|
            self.cap = None
 | 
						|
            for backend in backends_to_try:
 | 
						|
                try:
 | 
						|
                    self.cap = cv2.VideoCapture(str(self.video_path), backend)
 | 
						|
                    if self.cap.isOpened():
 | 
						|
                        # Optimize buffer settings for better performance
 | 
						|
                        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # Minimize buffer to reduce latency
 | 
						|
                        # Try to set hardware acceleration if available
 | 
						|
                        if hasattr(cv2, 'CAP_PROP_HW_ACCELERATION'):
 | 
						|
                            self.cap.set(cv2.CAP_PROP_HW_ACCELERATION, cv2.VIDEO_ACCELERATION_ANY)
 | 
						|
                        break
 | 
						|
                    self.cap.release()
 | 
						|
                except Exception:
 | 
						|
                    continue
 | 
						|
            
 | 
						|
            if not self.cap or not self.cap.isOpened():
 | 
						|
                raise ValueError(f"Could not open video file: {media_path}")
 | 
						|
            
 | 
						|
            # Video properties
 | 
						|
            self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
 | 
						|
            self.fps = self.cap.get(cv2.CAP_PROP_FPS)
 | 
						|
            self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
 | 
						|
            self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 | 
						|
 | 
						|
            # Get codec information for debugging
 | 
						|
            fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC))
 | 
						|
            codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
 | 
						|
            
 | 
						|
            # Get backend information
 | 
						|
            backend = self.cap.getBackendName()
 | 
						|
            
 | 
						|
            print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})")
 | 
						|
            print(f"  Codec: {codec} | Backend: {backend} | Resolution: {self.frame_width}x{self.frame_height}")
 | 
						|
            print(f"  FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
 | 
						|
            
 | 
						|
            # Performance warning for known problematic cases
 | 
						|
            if codec in ['H264', 'H.264', 'AVC1', 'avc1'] and self.total_frames > 10000:
 | 
						|
                print("  Warning: Large H.264 video detected - seeking may be slow")
 | 
						|
            if self.frame_width * self.frame_height > 1920 * 1080:
 | 
						|
                print("  Warning: High resolution video - decoding may be slow")
 | 
						|
            if self.fps > 60:
 | 
						|
                print("  Warning: High framerate video - may impact playback smoothness")
 | 
						|
 | 
						|
        # Reset playback state for new media
 | 
						|
        self.current_frame = 0
 | 
						|
        self.is_playing = False if self.is_image_mode else False  # Images start paused
 | 
						|
        self.playback_speed = 1.0
 | 
						|
        self.seek_multiplier = 1.0
 | 
						|
        self.current_display_frame = None
 | 
						|
 | 
						|
        # Reset crop, zoom, rotation, brightness/contrast, and cut settings for new media
 | 
						|
        self.crop_rect = None
 | 
						|
        self.crop_history = []
 | 
						|
        self.zoom_factor = 1.0
 | 
						|
        self.zoom_center = None
 | 
						|
        self.rotation_angle = 0
 | 
						|
        self.brightness = 0
 | 
						|
        self.contrast = 1.0
 | 
						|
        self.cut_start_frame = None
 | 
						|
        self.cut_end_frame = None
 | 
						|
        self.display_offset = [0, 0]
 | 
						|
 | 
						|
        # Try to load saved state for this media file
 | 
						|
        if self.load_state():
 | 
						|
            print("Loaded saved state for this media file")
 | 
						|
            if self.cut_start_frame is not None:
 | 
						|
                print(f"  Cut start frame: {self.cut_start_frame}")
 | 
						|
            if self.cut_end_frame is not None:
 | 
						|
                print(f"  Cut end frame: {self.cut_end_frame}")
 | 
						|
        else:
 | 
						|
            print("No saved state found for this media file")
 | 
						|
 | 
						|
    def switch_to_video(self, index: int):
 | 
						|
        """Switch to a specific video by index"""
 | 
						|
        if 0 <= index < len(self.video_files):
 | 
						|
            self.current_video_index = index
 | 
						|
            self._load_video(self.video_files[index])
 | 
						|
            self.load_current_frame()
 | 
						|
 | 
						|
    def next_video(self):
 | 
						|
        """Switch to the next video"""
 | 
						|
        next_index = (self.current_video_index + 1) % len(self.video_files)
 | 
						|
        self.switch_to_video(next_index)
 | 
						|
 | 
						|
    def previous_video(self):
 | 
						|
        """Switch to the previous video"""
 | 
						|
        prev_index = (self.current_video_index - 1) % len(self.video_files)
 | 
						|
        self.switch_to_video(prev_index)
 | 
						|
 | 
						|
    def load_current_frame(self) -> bool:
 | 
						|
        """Load the current frame into display cache"""
 | 
						|
        if self.is_image_mode:
 | 
						|
            # For images, just copy the static image
 | 
						|
            self.current_display_frame = self.static_image.copy()
 | 
						|
            return True
 | 
						|
        else:
 | 
						|
            # For videos, seek and read frame
 | 
						|
            self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame)
 | 
						|
            ret, frame = self.cap.read()
 | 
						|
            if ret:
 | 
						|
                self.current_display_frame = frame
 | 
						|
                return True
 | 
						|
            return False
 | 
						|
 | 
						|
    def calculate_frame_delay(self) -> int:
 | 
						|
        """Calculate frame delay in milliseconds based on playback speed"""
 | 
						|
        delay_ms = int(self.BASE_FRAME_DELAY_MS / self.playback_speed)
 | 
						|
        return max(1, delay_ms)
 | 
						|
 | 
						|
    def seek_video(self, frames_delta: int):
 | 
						|
        """Seek video by specified number of frames"""
 | 
						|
        target_frame = max(
 | 
						|
            0, min(self.current_frame + frames_delta, self.total_frames - 1)
 | 
						|
        )
 | 
						|
        self.current_frame = target_frame
 | 
						|
        self.load_current_frame()
 | 
						|
 | 
						|
 | 
						|
    def seek_video_with_modifier(
 | 
						|
        self, direction: int, shift_pressed: bool, ctrl_pressed: bool
 | 
						|
    ):
 | 
						|
        """Seek video with different frame counts based on modifiers and seek multiplier"""
 | 
						|
        if ctrl_pressed:
 | 
						|
            base_frames = 60  # Ctrl: 60 frames
 | 
						|
        elif shift_pressed:
 | 
						|
            base_frames = 10  # Shift: 10 frames
 | 
						|
        else:
 | 
						|
            base_frames = 1  # Default: 1 frame
 | 
						|
 | 
						|
        # Apply seek multiplier to the base frame count
 | 
						|
        frames = direction * int(base_frames * self.seek_multiplier)
 | 
						|
        self.seek_video(frames)
 | 
						|
 | 
						|
    def start_auto_repeat_seek(self, direction: int, shift_pressed: bool, ctrl_pressed: bool):
 | 
						|
        """Start auto-repeat seeking"""
 | 
						|
        if self.is_image_mode:
 | 
						|
            return
 | 
						|
            
 | 
						|
        self.auto_repeat_active = True
 | 
						|
        self.auto_repeat_direction = direction
 | 
						|
        self.auto_repeat_shift = shift_pressed
 | 
						|
        self.auto_repeat_ctrl = ctrl_pressed
 | 
						|
        
 | 
						|
        # Initialize last_display_update to prevent immediate auto-repeat
 | 
						|
        self.last_display_update = time.time()
 | 
						|
        
 | 
						|
        self.seek_video_with_modifier(direction, shift_pressed, ctrl_pressed)
 | 
						|
 | 
						|
    def stop_auto_repeat_seek(self):
 | 
						|
        """Stop auto-repeat seeking"""
 | 
						|
        self.auto_repeat_active = False
 | 
						|
        self.auto_repeat_direction = 0
 | 
						|
        self.auto_repeat_shift = False
 | 
						|
        self.auto_repeat_ctrl = False
 | 
						|
 | 
						|
    def update_auto_repeat_seek(self):
 | 
						|
        """Update auto-repeat seeking"""
 | 
						|
        if not self.auto_repeat_active or self.is_image_mode:
 | 
						|
            return
 | 
						|
            
 | 
						|
        current_time = time.time()
 | 
						|
        
 | 
						|
        if current_time - self.last_display_update >= self.AUTO_REPEAT_DISPLAY_RATE:
 | 
						|
            self.seek_video_with_modifier(
 | 
						|
                self.auto_repeat_direction, 
 | 
						|
                self.auto_repeat_shift, 
 | 
						|
                self.auto_repeat_ctrl
 | 
						|
            )
 | 
						|
            self.last_display_update = current_time
 | 
						|
 | 
						|
    def should_update_display(self) -> bool:
 | 
						|
        """Check if display should be updated"""
 | 
						|
        return True
 | 
						|
 | 
						|
    def seek_to_frame(self, frame_number: int):
 | 
						|
        """Seek to specific frame"""
 | 
						|
        self.current_frame = max(0, min(frame_number, self.total_frames - 1))
 | 
						|
        self.load_current_frame()
 | 
						|
 | 
						|
    def advance_frame(self) -> bool:
 | 
						|
        """Advance to next frame - optimized to avoid seeking, handles playback speed"""
 | 
						|
        if not self.is_playing:
 | 
						|
            return True
 | 
						|
 | 
						|
        # Calculate how many frames to advance based on speed
 | 
						|
        # For speeds > 1.0, we skip frames. For speeds < 1.0, we delay in main loop
 | 
						|
        frames_to_advance = max(1, int(self.playback_speed))
 | 
						|
        
 | 
						|
        new_frame = self.current_frame + frames_to_advance
 | 
						|
        
 | 
						|
        # Handle marker looping bounds
 | 
						|
        if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None:
 | 
						|
            if new_frame >= self.cut_end_frame:
 | 
						|
                # Loop back to start marker
 | 
						|
                new_frame = self.cut_start_frame
 | 
						|
                self.current_frame = new_frame
 | 
						|
                self.load_current_frame()
 | 
						|
                return True
 | 
						|
        elif new_frame >= self.total_frames:
 | 
						|
            new_frame = 0  # Loop - this will require a seek
 | 
						|
            self.current_frame = new_frame
 | 
						|
            self.load_current_frame()
 | 
						|
            return True
 | 
						|
 | 
						|
        # For sequential playback at normal speed, just read the next frame without seeking
 | 
						|
        if frames_to_advance == 1:
 | 
						|
            ret, frame = self.cap.read()
 | 
						|
            if ret:
 | 
						|
                self.current_frame = new_frame
 | 
						|
                self.current_display_frame = frame
 | 
						|
                return True
 | 
						|
 | 
						|
            else:
 | 
						|
                # If sequential read failed, we've hit the actual end of video
 | 
						|
                # Update total_frames to the actual count and loop
 | 
						|
                print(f"Reached actual end of video at frame {self.current_frame} (reported: {self.total_frames})")
 | 
						|
                self.total_frames = self.current_frame
 | 
						|
                self.current_frame = 0  # Loop back to start
 | 
						|
                self.load_current_frame()
 | 
						|
                return True
 | 
						|
        else:
 | 
						|
            # For speed > 1.0, we need to seek to skip frames
 | 
						|
            self.current_frame = new_frame
 | 
						|
            success = self.load_current_frame()
 | 
						|
            if not success:
 | 
						|
                # Hit actual end of video
 | 
						|
                print(f"Reached actual end of video at frame {self.current_frame} (reported: {self.total_frames})")
 | 
						|
                self.total_frames = self.current_frame
 | 
						|
                if self.looping_between_markers and self.cut_start_frame is not None:
 | 
						|
                    self.current_frame = self.cut_start_frame  # Loop back to start marker
 | 
						|
                else:
 | 
						|
                    self.current_frame = 0  # Loop back to start
 | 
						|
                self.load_current_frame()
 | 
						|
                return True
 | 
						|
            
 | 
						|
            # Handle marker looping after successful frame load
 | 
						|
            if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None:
 | 
						|
                if self.current_frame >= self.cut_end_frame:
 | 
						|
                    self.current_frame = self.cut_start_frame
 | 
						|
                    self.load_current_frame()
 | 
						|
                    return True
 | 
						|
            
 | 
						|
            return success
 | 
						|
 | 
						|
    def apply_crop_zoom_and_rotation(self, frame):
 | 
						|
        """Apply current crop, zoom, rotation, and brightness/contrast settings to frame"""
 | 
						|
        if frame is None:
 | 
						|
            return None
 | 
						|
 | 
						|
        processed_frame = frame.copy()
 | 
						|
 | 
						|
        # Apply brightness/contrast first (to original frame for best quality)
 | 
						|
        processed_frame = self.apply_brightness_contrast(processed_frame)
 | 
						|
 | 
						|
        
 | 
						|
        # Apply crop
 | 
						|
        if self.crop_rect:
 | 
						|
            x, y, w, h = self.crop_rect
 | 
						|
            x, y, w, h = int(x), int(y), int(w), int(h)
 | 
						|
            # Ensure crop is within frame bounds
 | 
						|
            x = max(0, min(x, processed_frame.shape[1] - 1))
 | 
						|
            y = max(0, min(y, processed_frame.shape[0] - 1))
 | 
						|
            w = min(w, processed_frame.shape[1] - x)
 | 
						|
            h = min(h, processed_frame.shape[0] - y)
 | 
						|
            if w > 0 and h > 0:
 | 
						|
                processed_frame = processed_frame[y : y + h, x : x + w]
 | 
						|
 | 
						|
        # Apply rotation
 | 
						|
        if self.rotation_angle != 0:
 | 
						|
            processed_frame = self.apply_rotation(processed_frame)
 | 
						|
 | 
						|
        # Apply zoom
 | 
						|
        if self.zoom_factor != 1.0:
 | 
						|
            height, width = processed_frame.shape[:2]
 | 
						|
            new_width = int(width * self.zoom_factor)
 | 
						|
            new_height = int(height * self.zoom_factor)
 | 
						|
            processed_frame = cv2.resize(
 | 
						|
                processed_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR
 | 
						|
            )
 | 
						|
 | 
						|
            # Handle zoom center and display offset
 | 
						|
            if new_width > self.window_width or new_height > self.window_height:
 | 
						|
                # Calculate crop from zoomed image to fit window
 | 
						|
                start_x = max(0, self.display_offset[0])
 | 
						|
                start_y = max(0, self.display_offset[1])
 | 
						|
                end_x = min(new_width, start_x + self.window_width)
 | 
						|
                end_y = min(new_height, start_y + self.window_height)
 | 
						|
                processed_frame = processed_frame[start_y:end_y, start_x:end_x]
 | 
						|
 | 
						|
        return processed_frame
 | 
						|
 | 
						|
    def apply_rotation(self, frame):
 | 
						|
        """Apply rotation to frame"""
 | 
						|
        if self.rotation_angle == 0:
 | 
						|
            return frame
 | 
						|
        elif self.rotation_angle == 90:
 | 
						|
            return cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
 | 
						|
        elif self.rotation_angle == 180:
 | 
						|
            return cv2.rotate(frame, cv2.ROTATE_180)
 | 
						|
        elif self.rotation_angle == 270:
 | 
						|
            return cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
 | 
						|
        return frame
 | 
						|
 | 
						|
    def rotate_clockwise(self):
 | 
						|
        """Rotate video 90 degrees clockwise"""
 | 
						|
        self.rotation_angle = (self.rotation_angle + 90) % 360
 | 
						|
 | 
						|
    def apply_brightness_contrast(self, frame):
 | 
						|
        """Apply brightness and contrast adjustments to frame"""
 | 
						|
        if self.brightness == 0 and self.contrast == 1.0:
 | 
						|
            return frame
 | 
						|
 | 
						|
        # Convert brightness from -100/100 range to -255/255 range
 | 
						|
        brightness_value = self.brightness * 2.55
 | 
						|
 | 
						|
        # Apply brightness and contrast: new_pixel = contrast * old_pixel + brightness
 | 
						|
        adjusted = cv2.convertScaleAbs(
 | 
						|
            frame, alpha=self.contrast, beta=brightness_value
 | 
						|
        )
 | 
						|
        return adjusted
 | 
						|
 | 
						|
    def adjust_brightness(self, delta: int):
 | 
						|
        """Adjust brightness by delta (-100 to 100)"""
 | 
						|
        self.brightness = max(-100, min(100, self.brightness + delta))
 | 
						|
 | 
						|
    def adjust_contrast(self, delta: float):
 | 
						|
        """Adjust contrast by delta (0.1 to 3.0)"""
 | 
						|
        self.contrast = max(0.1, min(3.0, self.contrast + delta))
 | 
						|
 | 
						|
    def show_progress_bar(self, text: str = "Processing..."):
 | 
						|
        """Show progress bar with given text"""
 | 
						|
        self.progress_bar_visible = True
 | 
						|
        self.progress_bar_progress = 0.0
 | 
						|
        self.progress_bar_complete = False
 | 
						|
        self.progress_bar_complete_time = None
 | 
						|
        self.progress_bar_text = text
 | 
						|
 | 
						|
    def update_progress_bar(self, progress: float, text: str = None, fps: float = None):
 | 
						|
        """Update progress bar progress (0.0 to 1.0) and optionally text and FPS"""
 | 
						|
        if self.progress_bar_visible:
 | 
						|
            self.progress_bar_progress = max(0.0, min(1.0, progress))
 | 
						|
            if text is not None:
 | 
						|
                self.progress_bar_text = text
 | 
						|
            if fps is not None:
 | 
						|
                self.progress_bar_fps = fps
 | 
						|
 | 
						|
            # Mark as complete when reaching 100%
 | 
						|
            if self.progress_bar_progress >= 1.0 and not self.progress_bar_complete:
 | 
						|
                self.progress_bar_complete = True
 | 
						|
                self.progress_bar_complete_time = time.time()
 | 
						|
 | 
						|
    def hide_progress_bar(self):
 | 
						|
        """Hide progress bar"""
 | 
						|
        self.progress_bar_visible = False
 | 
						|
        self.progress_bar_complete = False
 | 
						|
        self.progress_bar_complete_time = None
 | 
						|
        self.progress_bar_fps = 0.0
 | 
						|
 | 
						|
    def show_feedback_message(self, message: str):
 | 
						|
        """Show a feedback message on screen for a few seconds"""
 | 
						|
        self.feedback_message = message
 | 
						|
        self.feedback_message_time = time.time()
 | 
						|
 | 
						|
    def draw_feedback_message(self, frame):
 | 
						|
        """Draw feedback message on frame if visible"""
 | 
						|
        if not self.feedback_message or not self.feedback_message_time:
 | 
						|
            return
 | 
						|
        
 | 
						|
        # Check if message should still be shown
 | 
						|
        elapsed = time.time() - self.feedback_message_time
 | 
						|
        if elapsed > self.feedback_message_duration:
 | 
						|
            self.feedback_message = ""
 | 
						|
            self.feedback_message_time = None
 | 
						|
            return
 | 
						|
        
 | 
						|
        height, width = frame.shape[:2]
 | 
						|
        
 | 
						|
        # Calculate message position (center of frame)
 | 
						|
        font = cv2.FONT_HERSHEY_SIMPLEX
 | 
						|
        font_scale = 1.0
 | 
						|
        thickness = 2
 | 
						|
        
 | 
						|
        # Get text size
 | 
						|
        text_size = cv2.getTextSize(self.feedback_message, font, font_scale, thickness)[0]
 | 
						|
        text_x = (width - text_size[0]) // 2
 | 
						|
        text_y = (height + text_size[1]) // 2
 | 
						|
        
 | 
						|
        # Draw background rectangle
 | 
						|
        padding = 10
 | 
						|
        rect_x1 = text_x - padding
 | 
						|
        rect_y1 = text_y - text_size[1] - padding
 | 
						|
        rect_x2 = text_x + text_size[0] + padding
 | 
						|
        rect_y2 = text_y + padding
 | 
						|
        
 | 
						|
        # Semi-transparent background
 | 
						|
        overlay = frame.copy()
 | 
						|
        cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (0, 0, 0), -1)
 | 
						|
        alpha = 0.7
 | 
						|
        cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
 | 
						|
        
 | 
						|
        # Draw text with shadow
 | 
						|
        cv2.putText(frame, self.feedback_message, (text_x + 2, text_y + 2), font, font_scale, (0, 0, 0), thickness + 1)
 | 
						|
        cv2.putText(frame, self.feedback_message, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)
 | 
						|
 | 
						|
    def draw_progress_bar(self, frame):
 | 
						|
        """Draw progress bar on frame if visible - positioned at top with full width"""
 | 
						|
        if not self.progress_bar_visible:
 | 
						|
            return
 | 
						|
 | 
						|
        # Check if we should fade out
 | 
						|
        if self.progress_bar_complete and self.progress_bar_complete_time:
 | 
						|
            elapsed = time.time() - self.progress_bar_complete_time
 | 
						|
            if elapsed > self.PROGRESS_BAR_FADE_DURATION:
 | 
						|
                self.hide_progress_bar()
 | 
						|
                return
 | 
						|
 | 
						|
            # Calculate fade alpha (1.0 at start, 0.0 at end)
 | 
						|
            fade_alpha = max(0.0, 1.0 - (elapsed / self.PROGRESS_BAR_FADE_DURATION))
 | 
						|
        else:
 | 
						|
            fade_alpha = 1.0
 | 
						|
 | 
						|
        height, width = frame.shape[:2]
 | 
						|
 | 
						|
        # Calculate progress bar position (top of frame with 5% margins)
 | 
						|
        margin_width = int(width * self.PROGRESS_BAR_MARGIN_PERCENT / 100)
 | 
						|
        bar_width = width - (2 * margin_width)
 | 
						|
        bar_x = margin_width
 | 
						|
        bar_y = self.PROGRESS_BAR_TOP_MARGIN
 | 
						|
 | 
						|
        # Apply fade alpha to colors
 | 
						|
        bg_color = tuple(int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BG)
 | 
						|
        border_color = tuple(
 | 
						|
            int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BORDER
 | 
						|
        )
 | 
						|
 | 
						|
        if self.progress_bar_complete:
 | 
						|
            fill_color = tuple(
 | 
						|
                int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_FILL
 | 
						|
            )
 | 
						|
        else:
 | 
						|
            fill_color = tuple(
 | 
						|
                int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_PROGRESS
 | 
						|
            )
 | 
						|
 | 
						|
        # Draw background
 | 
						|
        cv2.rectangle(
 | 
						|
            frame,
 | 
						|
            (bar_x, bar_y),
 | 
						|
            (bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
 | 
						|
            bg_color,
 | 
						|
            -1,
 | 
						|
        )
 | 
						|
 | 
						|
        # Draw progress fill
 | 
						|
        fill_width = int(bar_width * self.progress_bar_progress)
 | 
						|
        if fill_width > 0:
 | 
						|
            cv2.rectangle(
 | 
						|
                frame,
 | 
						|
                (bar_x, bar_y),
 | 
						|
                (bar_x + fill_width, bar_y + self.PROGRESS_BAR_HEIGHT),
 | 
						|
                fill_color,
 | 
						|
                -1,
 | 
						|
            )
 | 
						|
 | 
						|
        # Draw border
 | 
						|
        cv2.rectangle(
 | 
						|
            frame,
 | 
						|
            (bar_x, bar_y),
 | 
						|
            (bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
 | 
						|
            border_color,
 | 
						|
            2,
 | 
						|
        )
 | 
						|
 | 
						|
        # Draw progress percentage on the left
 | 
						|
        percentage_text = f"{self.progress_bar_progress * 100:.1f}%"
 | 
						|
        text_color = tuple(int(255 * fade_alpha) for _ in range(3))
 | 
						|
        cv2.putText(
 | 
						|
            frame,
 | 
						|
            percentage_text,
 | 
						|
            (bar_x + 12, bar_y + 22),
 | 
						|
            cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
            0.5,
 | 
						|
            (0, 0, 0),
 | 
						|
            4,
 | 
						|
        )
 | 
						|
        cv2.putText(
 | 
						|
            frame,
 | 
						|
            percentage_text,
 | 
						|
            (bar_x + 10, bar_y + 20),
 | 
						|
            cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
            0.5,
 | 
						|
            text_color,
 | 
						|
            2,
 | 
						|
        )
 | 
						|
 | 
						|
        # Draw FPS on the right if available
 | 
						|
        if self.progress_bar_fps > 0:
 | 
						|
            fps_text = f"{self.progress_bar_fps:.1f} FPS"
 | 
						|
            fps_text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[
 | 
						|
                0
 | 
						|
            ]
 | 
						|
            fps_x = bar_x + bar_width - fps_text_size[0] - 10
 | 
						|
            cv2.putText(
 | 
						|
                frame,
 | 
						|
                fps_text,
 | 
						|
                (fps_x + 2, bar_y + 22),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.5,
 | 
						|
                (0, 0, 0),
 | 
						|
                4,
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                frame,
 | 
						|
                fps_text,
 | 
						|
                (fps_x, bar_y + 20),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.5,
 | 
						|
                text_color,
 | 
						|
                2,
 | 
						|
            )
 | 
						|
 | 
						|
        # Draw main text in center
 | 
						|
        if self.progress_bar_text:
 | 
						|
            text_size = cv2.getTextSize(
 | 
						|
                self.progress_bar_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
 | 
						|
            )[0]
 | 
						|
            text_x = bar_x + (bar_width - text_size[0]) // 2
 | 
						|
            text_y = bar_y + 20
 | 
						|
 | 
						|
            # Draw text shadow for better visibility
 | 
						|
            cv2.putText(
 | 
						|
                frame,
 | 
						|
                self.progress_bar_text,
 | 
						|
                (text_x + 2, text_y + 2),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.5,
 | 
						|
                (0, 0, 0),
 | 
						|
                4,
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                frame,
 | 
						|
                self.progress_bar_text,
 | 
						|
                (text_x, text_y),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.5,
 | 
						|
                text_color,
 | 
						|
                2,
 | 
						|
            )
 | 
						|
 | 
						|
    def draw_timeline(self, frame):
 | 
						|
        """Draw timeline at the bottom of the frame"""
 | 
						|
        # Don't draw timeline for images
 | 
						|
        if self.is_image_mode:
 | 
						|
            return
 | 
						|
            
 | 
						|
        height, width = frame.shape[:2]
 | 
						|
 | 
						|
        # Timeline background area
 | 
						|
        timeline_y = height - self.TIMELINE_HEIGHT
 | 
						|
        cv2.rectangle(frame, (0, timeline_y), (width, height), (40, 40, 40), -1)
 | 
						|
 | 
						|
        # Calculate timeline bar position
 | 
						|
        bar_y = timeline_y + (self.TIMELINE_HEIGHT - self.TIMELINE_BAR_HEIGHT) // 2
 | 
						|
        bar_x_start = self.TIMELINE_MARGIN
 | 
						|
        bar_x_end = width - self.TIMELINE_MARGIN
 | 
						|
        bar_width = bar_x_end - bar_x_start
 | 
						|
 | 
						|
        self.timeline_rect = (bar_x_start, bar_y, bar_width, self.TIMELINE_BAR_HEIGHT)
 | 
						|
 | 
						|
        # Draw timeline background
 | 
						|
        cv2.rectangle(
 | 
						|
            frame,
 | 
						|
            (bar_x_start, bar_y),
 | 
						|
            (bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
 | 
						|
            self.TIMELINE_COLOR_BG,
 | 
						|
            -1,
 | 
						|
        )
 | 
						|
        cv2.rectangle(
 | 
						|
            frame,
 | 
						|
            (bar_x_start, bar_y),
 | 
						|
            (bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
 | 
						|
            self.TIMELINE_COLOR_BORDER,
 | 
						|
            1,
 | 
						|
        )
 | 
						|
 | 
						|
        # Draw progress
 | 
						|
        if self.total_frames > 0:
 | 
						|
            progress = self.current_frame / max(1, self.total_frames - 1)
 | 
						|
            progress_width = int(bar_width * progress)
 | 
						|
            if progress_width > 0:
 | 
						|
                cv2.rectangle(
 | 
						|
                    frame,
 | 
						|
                    (bar_x_start, bar_y),
 | 
						|
                    (bar_x_start + progress_width, bar_y + self.TIMELINE_BAR_HEIGHT),
 | 
						|
                    self.TIMELINE_COLOR_PROGRESS,
 | 
						|
                    -1,
 | 
						|
                )
 | 
						|
 | 
						|
            # Draw current position handle
 | 
						|
            handle_x = bar_x_start + progress_width
 | 
						|
            handle_y = bar_y + self.TIMELINE_BAR_HEIGHT // 2
 | 
						|
            cv2.circle(
 | 
						|
                frame,
 | 
						|
                (handle_x, handle_y),
 | 
						|
                self.TIMELINE_HANDLE_SIZE // 2,
 | 
						|
                self.TIMELINE_COLOR_HANDLE,
 | 
						|
                -1,
 | 
						|
            )
 | 
						|
            cv2.circle(
 | 
						|
                frame,
 | 
						|
                (handle_x, handle_y),
 | 
						|
                self.TIMELINE_HANDLE_SIZE // 2,
 | 
						|
                self.TIMELINE_COLOR_BORDER,
 | 
						|
                2,
 | 
						|
            )
 | 
						|
 | 
						|
            # Draw cut points
 | 
						|
            if self.cut_start_frame is not None:
 | 
						|
                cut_start_progress = self.cut_start_frame / max(
 | 
						|
                    1, self.total_frames - 1
 | 
						|
                )
 | 
						|
                cut_start_x = bar_x_start + int(bar_width * cut_start_progress)
 | 
						|
                cv2.line(
 | 
						|
                    frame,
 | 
						|
                    (cut_start_x, bar_y),
 | 
						|
                    (cut_start_x, bar_y + self.TIMELINE_BAR_HEIGHT),
 | 
						|
                    self.TIMELINE_COLOR_CUT_POINT,
 | 
						|
                    3,
 | 
						|
                )
 | 
						|
                cv2.putText(
 | 
						|
                    frame,
 | 
						|
                    "1",
 | 
						|
                    (cut_start_x - 5, bar_y - 5),
 | 
						|
                    cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                    0.4,
 | 
						|
                    self.TIMELINE_COLOR_CUT_POINT,
 | 
						|
                    1,
 | 
						|
                )
 | 
						|
 | 
						|
            if self.cut_end_frame is not None:
 | 
						|
                cut_end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
 | 
						|
                cut_end_x = bar_x_start + int(bar_width * cut_end_progress)
 | 
						|
                cv2.line(
 | 
						|
                    frame,
 | 
						|
                    (cut_end_x, bar_y),
 | 
						|
                    (cut_end_x, bar_y + self.TIMELINE_BAR_HEIGHT),
 | 
						|
                    self.TIMELINE_COLOR_CUT_POINT,
 | 
						|
                    3,
 | 
						|
                )
 | 
						|
                cv2.putText(
 | 
						|
                    frame,
 | 
						|
                    "2",
 | 
						|
                    (cut_end_x - 5, bar_y - 5),
 | 
						|
                    cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                    0.4,
 | 
						|
                    self.TIMELINE_COLOR_CUT_POINT,
 | 
						|
                    1,
 | 
						|
                )
 | 
						|
 | 
						|
    def display_current_frame(self):
 | 
						|
        """Display the current frame with all overlays"""
 | 
						|
        if self.current_display_frame is None:
 | 
						|
            return
 | 
						|
 | 
						|
        # Apply crop, zoom, and rotation transformations for preview
 | 
						|
        display_frame = self.apply_crop_zoom_and_rotation(
 | 
						|
            self.current_display_frame.copy()
 | 
						|
        )
 | 
						|
 | 
						|
        if display_frame is None:
 | 
						|
            return
 | 
						|
 | 
						|
        # Resize to fit window while maintaining aspect ratio
 | 
						|
        height, width = display_frame.shape[:2]
 | 
						|
        available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
 | 
						|
 | 
						|
        scale = min(self.window_width / width, available_height / height)
 | 
						|
        if scale < 1.0:
 | 
						|
            new_width = int(width * scale)
 | 
						|
            new_height = int(height * scale)
 | 
						|
            display_frame = cv2.resize(display_frame, (new_width, new_height))
 | 
						|
 | 
						|
        # Create canvas with timeline space
 | 
						|
        canvas = np.zeros((self.window_height, self.window_width, 3), dtype=np.uint8)
 | 
						|
 | 
						|
        # Center the frame on canvas
 | 
						|
        frame_height, frame_width = display_frame.shape[:2]
 | 
						|
        start_y = (available_height - frame_height) // 2
 | 
						|
        start_x = (self.window_width - frame_width) // 2
 | 
						|
 | 
						|
        canvas[start_y : start_y + frame_height, start_x : start_x + frame_width] = (
 | 
						|
            display_frame
 | 
						|
        )
 | 
						|
 | 
						|
        # Draw crop selection preview during Shift+Click+Drag
 | 
						|
        if self.crop_preview_rect:
 | 
						|
            x, y, w, h = self.crop_preview_rect
 | 
						|
            cv2.rectangle(
 | 
						|
                canvas, (int(x), int(y)), (int(x + w), int(y + h)), (0, 255, 0), 2
 | 
						|
            )
 | 
						|
 | 
						|
        # Add info overlay
 | 
						|
        rotation_text = (
 | 
						|
            f" | Rotation: {self.rotation_angle}°" if self.rotation_angle != 0 else ""
 | 
						|
        )
 | 
						|
        brightness_text = (
 | 
						|
            f" | Brightness: {self.brightness}" if self.brightness != 0 else ""
 | 
						|
        )
 | 
						|
        contrast_text = (
 | 
						|
            f" | Contrast: {self.contrast:.1f}" if self.contrast != 1.0 else ""
 | 
						|
        )
 | 
						|
        seek_multiplier_text = (
 | 
						|
            f" | Seek: {self.seek_multiplier:.1f}x" if self.seek_multiplier != 1.0 else ""
 | 
						|
        )
 | 
						|
        if self.is_image_mode:
 | 
						|
            info_text = f"Image | Zoom: {self.zoom_factor:.1f}x{rotation_text}{brightness_text}{contrast_text}"
 | 
						|
        else:
 | 
						|
            info_text = f"Frame: {self.current_frame}/{self.total_frames} | Speed: {self.playback_speed:.1f}x | Zoom: {self.zoom_factor:.1f}x{seek_multiplier_text}{rotation_text}{brightness_text}{contrast_text} | {'Playing' if self.is_playing else 'Paused'}"
 | 
						|
        cv2.putText(
 | 
						|
            canvas,
 | 
						|
            info_text,
 | 
						|
            (10, 30),
 | 
						|
            cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
            0.7,
 | 
						|
            (255, 255, 255),
 | 
						|
            2,
 | 
						|
        )
 | 
						|
        cv2.putText(
 | 
						|
            canvas, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
 | 
						|
        )
 | 
						|
 | 
						|
        # Add video navigation info
 | 
						|
        if len(self.video_files) > 1:
 | 
						|
            video_text = f"Video: {self.current_video_index + 1}/{len(self.video_files)} - {self.video_path.name}"
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                video_text,
 | 
						|
                (10, 60),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (255, 255, 255),
 | 
						|
                2,
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                video_text,
 | 
						|
                (10, 60),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (0, 0, 0),
 | 
						|
                1,
 | 
						|
            )
 | 
						|
            y_offset = 90
 | 
						|
        else:
 | 
						|
            y_offset = 60
 | 
						|
 | 
						|
        # Add crop info
 | 
						|
        if self.crop_rect:
 | 
						|
            crop_text = f"Crop: {int(self.crop_rect[0])},{int(self.crop_rect[1])} {int(self.crop_rect[2])}x{int(self.crop_rect[3])}"
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                crop_text,
 | 
						|
                (10, y_offset),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (255, 255, 255),
 | 
						|
                2,
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                crop_text,
 | 
						|
                (10, y_offset),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (0, 0, 0),
 | 
						|
                1,
 | 
						|
            )
 | 
						|
            y_offset += 30
 | 
						|
 | 
						|
        # Add cut info
 | 
						|
        if self.cut_start_frame is not None or self.cut_end_frame is not None:
 | 
						|
            cut_text = (
 | 
						|
                f"Cut: {self.cut_start_frame or '?'} - {self.cut_end_frame or '?'}"
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                cut_text,
 | 
						|
                (10, y_offset),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (255, 255, 255),
 | 
						|
                2,
 | 
						|
            )
 | 
						|
            cv2.putText(
 | 
						|
                canvas,
 | 
						|
                cut_text,
 | 
						|
                (10, y_offset),
 | 
						|
                cv2.FONT_HERSHEY_SIMPLEX,
 | 
						|
                0.6,
 | 
						|
                (0, 0, 0),
 | 
						|
                1,
 | 
						|
            )
 | 
						|
 | 
						|
        # Draw timeline
 | 
						|
        self.draw_timeline(canvas)
 | 
						|
 | 
						|
        # Draw progress bar (if visible)
 | 
						|
        self.draw_progress_bar(canvas)
 | 
						|
 | 
						|
        # Draw feedback message (if visible)
 | 
						|
        self.draw_feedback_message(canvas)
 | 
						|
 | 
						|
        window_title = "Image Editor" if self.is_image_mode else "Video Editor"
 | 
						|
        cv2.imshow(window_title, canvas)
 | 
						|
 | 
						|
    def mouse_callback(self, event, x, y, flags, _):
 | 
						|
        """Handle mouse events"""
 | 
						|
        # Handle timeline interaction (not for images)
 | 
						|
        if self.timeline_rect and not self.is_image_mode:
 | 
						|
            bar_x_start, bar_y, bar_width, bar_height = self.timeline_rect
 | 
						|
            bar_x_end = bar_x_start + bar_width
 | 
						|
 | 
						|
            if bar_y <= y <= bar_y + bar_height + 10:
 | 
						|
                if event == cv2.EVENT_LBUTTONDOWN:
 | 
						|
                    if bar_x_start <= x <= bar_x_end:
 | 
						|
                        self.mouse_dragging = True
 | 
						|
                        self.seek_to_timeline_position(x, bar_x_start, bar_width)
 | 
						|
                elif event == cv2.EVENT_MOUSEMOVE and self.mouse_dragging:
 | 
						|
                    if bar_x_start <= x <= bar_x_end:
 | 
						|
                        self.seek_to_timeline_position(x, bar_x_start, bar_width)
 | 
						|
                elif event == cv2.EVENT_LBUTTONUP:
 | 
						|
                    self.mouse_dragging = False
 | 
						|
                return
 | 
						|
 | 
						|
        # Handle crop selection (Shift + click and drag)
 | 
						|
        if flags & cv2.EVENT_FLAG_SHIFTKEY:
 | 
						|
 | 
						|
            if event == cv2.EVENT_LBUTTONDOWN:
 | 
						|
                self.crop_selecting = True
 | 
						|
                self.crop_start_point = (x, y)
 | 
						|
                self.crop_preview_rect = None
 | 
						|
            elif event == cv2.EVENT_MOUSEMOVE and self.crop_selecting:
 | 
						|
                if self.crop_start_point:
 | 
						|
                    start_x, start_y = self.crop_start_point
 | 
						|
                    width = abs(x - start_x)
 | 
						|
                    height = abs(y - start_y)
 | 
						|
                    crop_x = min(start_x, x)
 | 
						|
                    crop_y = min(start_y, y)
 | 
						|
                    self.crop_preview_rect = (crop_x, crop_y, width, height)
 | 
						|
            elif event == cv2.EVENT_LBUTTONUP and self.crop_selecting:
 | 
						|
                if self.crop_start_point and self.crop_preview_rect:
 | 
						|
                    # Convert screen coordinates to video coordinates
 | 
						|
                    self.set_crop_from_screen_coords(self.crop_preview_rect)
 | 
						|
                self.crop_selecting = False
 | 
						|
                self.crop_start_point = None
 | 
						|
                self.crop_preview_rect = None
 | 
						|
 | 
						|
        # Handle zoom center (Ctrl + click)
 | 
						|
        if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN:
 | 
						|
            self.zoom_center = (x, y)
 | 
						|
 | 
						|
        # Handle scroll wheel for zoom (Ctrl + scroll)
 | 
						|
        if flags & cv2.EVENT_FLAG_CTRLKEY:
 | 
						|
            if event == cv2.EVENT_MOUSEWHEEL:
 | 
						|
                if flags > 0:  # Scroll up
 | 
						|
                    self.zoom_factor = min(
 | 
						|
                        self.MAX_ZOOM, self.zoom_factor + self.ZOOM_INCREMENT
 | 
						|
                    )
 | 
						|
                else:  # Scroll down
 | 
						|
                    self.zoom_factor = max(
 | 
						|
                        self.MIN_ZOOM, self.zoom_factor - self.ZOOM_INCREMENT
 | 
						|
                    )
 | 
						|
 | 
						|
    def set_crop_from_screen_coords(self, screen_rect):
 | 
						|
        """Convert screen coordinates to video frame coordinates and set crop"""
 | 
						|
        x, y, w, h = screen_rect
 | 
						|
 | 
						|
        if self.current_display_frame is None:
 | 
						|
            return
 | 
						|
 | 
						|
        # Get the original frame dimensions
 | 
						|
        original_height, original_width = self.current_display_frame.shape[:2]
 | 
						|
        available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
 | 
						|
 | 
						|
        # Calculate how the original frame is displayed (after crop/zoom/rotation)
 | 
						|
        display_frame = self.apply_crop_zoom_and_rotation(
 | 
						|
            self.current_display_frame.copy()
 | 
						|
        )
 | 
						|
        if display_frame is None:
 | 
						|
            return
 | 
						|
 | 
						|
        display_height, display_width = display_frame.shape[:2]
 | 
						|
 | 
						|
        # Calculate scale for the display frame
 | 
						|
        scale = min(
 | 
						|
            self.window_width / display_width, available_height / display_height
 | 
						|
        )
 | 
						|
        if scale < 1.0:
 | 
						|
            final_display_width = int(display_width * scale)
 | 
						|
            final_display_height = int(display_height * scale)
 | 
						|
        else:
 | 
						|
            final_display_width = display_width
 | 
						|
            final_display_height = display_height
 | 
						|
            scale = 1.0
 | 
						|
 | 
						|
        start_x = (self.window_width - final_display_width) // 2
 | 
						|
        start_y = (available_height - final_display_height) // 2
 | 
						|
 | 
						|
        # Convert screen coordinates to display frame coordinates
 | 
						|
        display_x = (x - start_x) / scale
 | 
						|
        display_y = (y - start_y) / scale
 | 
						|
        display_w = w / scale
 | 
						|
        display_h = h / scale
 | 
						|
 | 
						|
        # Clamp to display frame bounds
 | 
						|
        display_x = max(0, min(display_x, display_width))
 | 
						|
        display_y = max(0, min(display_y, display_height))
 | 
						|
        display_w = min(display_w, display_width - display_x)
 | 
						|
        display_h = min(display_h, display_height - display_y)
 | 
						|
 | 
						|
        # Now we need to convert from the display frame coordinates back to original frame coordinates
 | 
						|
        # The display frame is the result of: original -> crop -> rotation -> zoom
 | 
						|
        
 | 
						|
        # Step 1: Reverse zoom
 | 
						|
        if self.zoom_factor != 1.0:
 | 
						|
            display_x = display_x / self.zoom_factor
 | 
						|
            display_y = display_y / self.zoom_factor
 | 
						|
            display_w = display_w / self.zoom_factor
 | 
						|
            display_h = display_h / self.zoom_factor
 | 
						|
 | 
						|
        # Step 2: Reverse rotation
 | 
						|
        if self.rotation_angle != 0:
 | 
						|
            # Get the dimensions of the frame after crop but before rotation
 | 
						|
            if self.crop_rect:
 | 
						|
                crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3])
 | 
						|
            else:
 | 
						|
                crop_w, crop_h = original_width, original_height
 | 
						|
            
 | 
						|
            # Apply inverse rotation to coordinates
 | 
						|
            # The key insight: we need to use the dimensions of the ROTATED frame for the coordinate transformation
 | 
						|
            # because the coordinates we have are in the rotated coordinate system
 | 
						|
            if self.rotation_angle == 90:
 | 
						|
                # 90° clockwise rotation: (x,y) -> (y, rotated_width-x-w)
 | 
						|
                # The rotated frame has dimensions: height x width (swapped)
 | 
						|
                rotated_w, rotated_h = crop_h, crop_w
 | 
						|
                new_x = display_y
 | 
						|
                new_y = rotated_w - display_x - display_w
 | 
						|
                new_w = display_h
 | 
						|
                new_h = display_w
 | 
						|
            elif self.rotation_angle == 180:
 | 
						|
                # 180° rotation: (x,y) -> (width-x-w, height-y-h)
 | 
						|
                new_x = crop_w - display_x - display_w
 | 
						|
                new_y = crop_h - display_y - display_h
 | 
						|
                new_w = display_w
 | 
						|
                new_h = display_h
 | 
						|
            elif self.rotation_angle == 270:
 | 
						|
                # 270° clockwise rotation: (x,y) -> (rotated_height-y-h, x)
 | 
						|
                # The rotated frame has dimensions: height x width (swapped)
 | 
						|
                rotated_w, rotated_h = crop_h, crop_w
 | 
						|
                new_x = rotated_h - display_y - display_h
 | 
						|
                new_y = display_x
 | 
						|
                new_w = display_h
 | 
						|
                new_h = display_w
 | 
						|
            else:
 | 
						|
                new_x, new_y, new_w, new_h = display_x, display_y, display_w, display_h
 | 
						|
            
 | 
						|
            display_x, display_y, display_w, display_h = new_x, new_y, new_w, new_h
 | 
						|
 | 
						|
        # Step 3: Convert from cropped frame coordinates to original frame coordinates
 | 
						|
        original_x = display_x
 | 
						|
        original_y = display_y
 | 
						|
        original_w = display_w
 | 
						|
        original_h = display_h
 | 
						|
 | 
						|
        # Add the crop offset to get back to original frame coordinates
 | 
						|
        if self.crop_rect:
 | 
						|
            crop_x, crop_y, crop_w, crop_h = self.crop_rect
 | 
						|
            original_x += crop_x
 | 
						|
            original_y += crop_y
 | 
						|
 | 
						|
        # Clamp to original frame bounds
 | 
						|
        original_x = max(0, min(original_x, original_width))
 | 
						|
        original_y = max(0, min(original_y, original_height))
 | 
						|
        original_w = min(original_w, original_width - original_x)
 | 
						|
        original_h = min(original_h, original_height - original_y)
 | 
						|
 | 
						|
        if original_w > 10 and original_h > 10:  # Minimum size check
 | 
						|
            # Save current crop for undo
 | 
						|
            if self.crop_rect:
 | 
						|
                self.crop_history.append(self.crop_rect)
 | 
						|
            self.crop_rect = (original_x, original_y, original_w, original_h)
 | 
						|
 | 
						|
    def seek_to_timeline_position(self, mouse_x, bar_x_start, bar_width):
 | 
						|
        """Seek to position based on mouse click on timeline"""
 | 
						|
        relative_x = mouse_x - bar_x_start
 | 
						|
        position_ratio = max(0, min(1, relative_x / bar_width))
 | 
						|
        target_frame = int(position_ratio * (self.total_frames - 1))
 | 
						|
        self.seek_to_frame(target_frame)
 | 
						|
 | 
						|
    def undo_crop(self):
 | 
						|
        """Undo the last crop operation"""
 | 
						|
        if self.crop_history:
 | 
						|
            self.crop_rect = self.crop_history.pop()
 | 
						|
        else:
 | 
						|
            self.crop_rect = None
 | 
						|
 | 
						|
    def toggle_marker_looping(self):
 | 
						|
        """Toggle looping between cut markers"""
 | 
						|
        # Check if both markers are set
 | 
						|
        if self.cut_start_frame is None or self.cut_end_frame is None:
 | 
						|
            print("Both markers must be set to enable looping. Use '1' and '2' to set markers.")
 | 
						|
            return False
 | 
						|
            
 | 
						|
        if self.cut_start_frame >= self.cut_end_frame:
 | 
						|
            print("Invalid marker range - start frame must be before end frame")
 | 
						|
            return False
 | 
						|
            
 | 
						|
        self.looping_between_markers = not self.looping_between_markers
 | 
						|
        
 | 
						|
        if self.looping_between_markers:
 | 
						|
            print(f"Marker looping ENABLED: frames {self.cut_start_frame} - {self.cut_end_frame}")
 | 
						|
            # Jump to start marker when enabling
 | 
						|
            self.seek_to_frame(self.cut_start_frame)
 | 
						|
        else:
 | 
						|
            print("Marker looping DISABLED")
 | 
						|
            
 | 
						|
        return True
 | 
						|
 | 
						|
 | 
						|
 | 
						|
    def adjust_crop_size(self, direction: str, expand: bool, amount: int = None):
 | 
						|
        """
 | 
						|
        Adjust crop size in given direction
 | 
						|
        direction: 'up', 'down', 'left', 'right'
 | 
						|
        expand: True to expand, False to contract
 | 
						|
        amount: pixels to adjust by (uses self.crop_size_step if None)
 | 
						|
        """
 | 
						|
        if amount is None:
 | 
						|
            amount = self.crop_size_step
 | 
						|
        if not self.crop_rect:
 | 
						|
            # If no crop exists, create a default one in the center
 | 
						|
            center_x = self.frame_width // 2
 | 
						|
            center_y = self.frame_height // 2
 | 
						|
            default_size = min(self.frame_width, self.frame_height) // 4
 | 
						|
            self.crop_rect = (
 | 
						|
                center_x - default_size // 2,
 | 
						|
                center_y - default_size // 2,
 | 
						|
                default_size,
 | 
						|
                default_size
 | 
						|
            )
 | 
						|
            return
 | 
						|
 | 
						|
        x, y, w, h = self.crop_rect
 | 
						|
        
 | 
						|
        if direction == 'up':
 | 
						|
            if expand:
 | 
						|
                # Expand upward - decrease y, increase height
 | 
						|
                new_y = max(0, y - amount)
 | 
						|
                new_h = h + (y - new_y)
 | 
						|
                self.crop_rect = (x, new_y, w, new_h)
 | 
						|
            else:
 | 
						|
                # Contract from bottom - decrease height
 | 
						|
                new_h = max(10, h - amount)  # Minimum size of 10 pixels
 | 
						|
                self.crop_rect = (x, y, w, new_h)
 | 
						|
                
 | 
						|
        elif direction == 'down':
 | 
						|
            if expand:
 | 
						|
                # Expand downward - increase height
 | 
						|
                new_h = min(self.frame_height - y, h + amount)
 | 
						|
                self.crop_rect = (x, y, w, new_h)
 | 
						|
            else:
 | 
						|
                # Contract from top - increase y, decrease height
 | 
						|
                amount = min(amount, h - 10)  # Don't make it smaller than 10 pixels
 | 
						|
                new_y = y + amount
 | 
						|
                new_h = h - amount
 | 
						|
                self.crop_rect = (x, new_y, w, new_h)
 | 
						|
                
 | 
						|
        elif direction == 'left':
 | 
						|
            if expand:
 | 
						|
                # Expand leftward - decrease x, increase width
 | 
						|
                new_x = max(0, x - amount)
 | 
						|
                new_w = w + (x - new_x)
 | 
						|
                self.crop_rect = (new_x, y, new_w, h)
 | 
						|
            else:
 | 
						|
                # Contract from right - decrease width
 | 
						|
                new_w = max(10, w - amount)  # Minimum size of 10 pixels
 | 
						|
                self.crop_rect = (x, y, new_w, h)
 | 
						|
                
 | 
						|
        elif direction == 'right':
 | 
						|
            if expand:
 | 
						|
                # Expand rightward - increase width
 | 
						|
                new_w = min(self.frame_width - x, w + amount)
 | 
						|
                self.crop_rect = (x, y, new_w, h)
 | 
						|
            else:
 | 
						|
                # Contract from left - increase x, decrease width
 | 
						|
                amount = min(amount, w - 10)  # Don't make it smaller than 10 pixels
 | 
						|
                new_x = x + amount
 | 
						|
                new_w = w - amount
 | 
						|
                self.crop_rect = (new_x, y, new_w, h)
 | 
						|
 | 
						|
    def render_video(self, output_path: str):
 | 
						|
        """Render video or save image with current edits applied"""
 | 
						|
        if self.is_image_mode:
 | 
						|
            return self._render_image(output_path)
 | 
						|
        else:
 | 
						|
            return self._render_video_threaded(output_path)
 | 
						|
    
 | 
						|
    def _render_video_threaded(self, output_path: str):
 | 
						|
        """Start video rendering in a separate thread"""
 | 
						|
        # Check if already rendering
 | 
						|
        if self.render_thread and self.render_thread.is_alive():
 | 
						|
            print("Render already in progress! Use 'X' to cancel first.")
 | 
						|
            return False
 | 
						|
        
 | 
						|
        # Reset render state
 | 
						|
        self.render_cancelled = False
 | 
						|
        
 | 
						|
        # Start render thread
 | 
						|
        self.render_thread = threading.Thread(
 | 
						|
            target=self._render_video_worker,
 | 
						|
            args=(output_path,),
 | 
						|
            daemon=True
 | 
						|
        )
 | 
						|
        self.render_thread.start()
 | 
						|
        
 | 
						|
        print(f"Started rendering to {output_path} in background thread...")
 | 
						|
        print("You can continue editing while rendering. Press 'X' to cancel.")
 | 
						|
        return True
 | 
						|
    
 | 
						|
    def _render_video_worker(self, output_path: str):
 | 
						|
        """Worker method that runs in the render thread"""
 | 
						|
        render_cap = None
 | 
						|
        try:
 | 
						|
            if not output_path.endswith(".mp4"):
 | 
						|
                output_path += ".mp4"
 | 
						|
 | 
						|
            start_time = time.time()
 | 
						|
 | 
						|
            # Send progress update to main thread
 | 
						|
            self.render_progress_queue.put(("init", "Initializing render...", 0.0, 0.0))
 | 
						|
 | 
						|
            # Create a separate VideoCapture for the render thread to avoid thread safety issues
 | 
						|
            render_cap = cv2.VideoCapture(str(self.video_path))
 | 
						|
            if not render_cap.isOpened():
 | 
						|
                self.render_progress_queue.put(("error", "Could not open video for rendering!", 1.0, 0.0))
 | 
						|
                return False
 | 
						|
 | 
						|
            # Determine frame range
 | 
						|
            start_frame = self.cut_start_frame if self.cut_start_frame is not None else 0
 | 
						|
            end_frame = (
 | 
						|
                self.cut_end_frame
 | 
						|
                if self.cut_end_frame is not None
 | 
						|
                else self.total_frames - 1
 | 
						|
            )
 | 
						|
 | 
						|
            if start_frame >= end_frame:
 | 
						|
                self.render_progress_queue.put(("error", "Invalid cut range!", 1.0, 0.0))
 | 
						|
                return False
 | 
						|
 | 
						|
            # Send progress update
 | 
						|
            self.render_progress_queue.put(("progress", "Calculating output dimensions...", 0.05, 0.0))
 | 
						|
 | 
						|
            # Calculate output dimensions (accounting for rotation)
 | 
						|
            if self.crop_rect:
 | 
						|
                crop_width = int(self.crop_rect[2])
 | 
						|
                crop_height = int(self.crop_rect[3])
 | 
						|
            else:
 | 
						|
                crop_width = self.frame_width
 | 
						|
                crop_height = self.frame_height
 | 
						|
 | 
						|
            # Swap dimensions if rotation is 90 or 270 degrees
 | 
						|
            if self.rotation_angle == 90 or self.rotation_angle == 270:
 | 
						|
                output_width = int(crop_height * self.zoom_factor)
 | 
						|
                output_height = int(crop_width * self.zoom_factor)
 | 
						|
            else:
 | 
						|
                output_width = int(crop_width * self.zoom_factor)
 | 
						|
                output_height = int(crop_height * self.zoom_factor)
 | 
						|
 | 
						|
            # Send progress update
 | 
						|
            self.render_progress_queue.put(("progress", "Setting up video writer...", 0.1, 0.0))
 | 
						|
 | 
						|
            # Use mp4v codec (most compatible with MP4)
 | 
						|
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
 | 
						|
            out = cv2.VideoWriter(
 | 
						|
                output_path, fourcc, self.fps, (output_width, output_height)
 | 
						|
            )
 | 
						|
 | 
						|
            if not out.isOpened():
 | 
						|
                self.render_progress_queue.put(("error", "Could not open video writer!", 1.0, 0.0))
 | 
						|
                return False
 | 
						|
 | 
						|
            # Simple sequential processing - the I/O is the bottleneck anyway
 | 
						|
            total_output_frames = end_frame - start_frame + 1
 | 
						|
            last_progress_update = 0
 | 
						|
 | 
						|
            for frame_idx in range(start_frame, end_frame + 1):
 | 
						|
                # Check for cancellation
 | 
						|
                if self.render_cancelled:
 | 
						|
                    out.release()
 | 
						|
                    self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0))
 | 
						|
                    return False
 | 
						|
 | 
						|
                # Read frame using the separate VideoCapture
 | 
						|
                render_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
 | 
						|
                ret, frame = render_cap.read()
 | 
						|
 | 
						|
                if not ret:
 | 
						|
                    break
 | 
						|
 | 
						|
                # Process and write frame directly (minimize memory copies)
 | 
						|
                processed_frame = self._process_frame_for_render(
 | 
						|
                    frame, output_width, output_height
 | 
						|
                )
 | 
						|
 | 
						|
                if processed_frame is not None:
 | 
						|
                    out.write(processed_frame)
 | 
						|
 | 
						|
                    frames_written = frame_idx - start_frame + 1
 | 
						|
                    current_time = time.time()
 | 
						|
 | 
						|
                    # Update progress bar (10% to 95% of progress reserved for frame processing)
 | 
						|
                    progress = 0.1 + (0.85 * (frames_written / total_output_frames))
 | 
						|
 | 
						|
                    # Throttled progress update
 | 
						|
                    if current_time - last_progress_update > 0.5:
 | 
						|
                        elapsed = current_time - start_time
 | 
						|
                        fps_rate = frames_written / elapsed
 | 
						|
                        eta = (elapsed / frames_written) * (
 | 
						|
                            total_output_frames - frames_written
 | 
						|
                        )
 | 
						|
 | 
						|
                        progress_text = f"Rendering {frames_written}/{total_output_frames} frames (ETA: {eta:.1f}s)"
 | 
						|
                        self.render_progress_queue.put(("progress", progress_text, progress, fps_rate))
 | 
						|
                        last_progress_update = current_time
 | 
						|
 | 
						|
            out.release()
 | 
						|
            
 | 
						|
            # Ensure the video writer is completely closed and file handles are freed
 | 
						|
            del out
 | 
						|
            time.sleep(0.1)  # Small delay to ensure file is unlocked
 | 
						|
 | 
						|
            total_time = time.time() - start_time
 | 
						|
            total_frames_written = end_frame - start_frame + 1
 | 
						|
            avg_fps = total_frames_written / total_time if total_time > 0 else 0
 | 
						|
 | 
						|
            # Complete the progress bar
 | 
						|
            self.render_progress_queue.put(("complete", f"Complete! Rendered {total_frames_written} frames in {total_time:.1f}s", 1.0, avg_fps))
 | 
						|
 | 
						|
            print(f"\nVideo rendered successfully to {output_path}")
 | 
						|
            print(f"Rendered {total_frames_written} frames in {total_time:.2f}s (avg {avg_fps:.1f} FPS)")
 | 
						|
            return True
 | 
						|
 | 
						|
        except Exception as e:
 | 
						|
            error_msg = str(e)
 | 
						|
            # Handle specific FFmpeg threading errors
 | 
						|
            if "async_lock" in error_msg or "pthread_frame" in error_msg:
 | 
						|
                error_msg = "FFmpeg threading error - try restarting the application"
 | 
						|
            elif "Assertion" in error_msg:
 | 
						|
                error_msg = "Video codec error - the video file may be corrupted or incompatible"
 | 
						|
            
 | 
						|
            self.render_progress_queue.put(("error", f"Render error: {error_msg}", 1.0, 0.0))
 | 
						|
            print(f"Render error: {error_msg}")
 | 
						|
            return False
 | 
						|
        finally:
 | 
						|
            # Always clean up the render VideoCapture
 | 
						|
            if render_cap:
 | 
						|
                render_cap.release()
 | 
						|
    
 | 
						|
    def update_render_progress(self):
 | 
						|
        """Process progress updates from the render thread"""
 | 
						|
        try:
 | 
						|
            while True:
 | 
						|
                # Non-blocking get from queue
 | 
						|
                update_type, text, progress, fps = self.render_progress_queue.get_nowait()
 | 
						|
                
 | 
						|
                if update_type == "init":
 | 
						|
                    self.show_progress_bar(text)
 | 
						|
                elif update_type == "progress":
 | 
						|
                    self.update_progress_bar(progress, text, fps)
 | 
						|
                elif update_type == "complete":
 | 
						|
                    self.update_progress_bar(progress, text, fps)
 | 
						|
                elif update_type == "error":
 | 
						|
                    self.update_progress_bar(progress, text, fps)
 | 
						|
                elif update_type == "cancelled":
 | 
						|
                    self.hide_progress_bar()
 | 
						|
                    self.show_feedback_message("Render cancelled")
 | 
						|
                    
 | 
						|
        except queue.Empty:
 | 
						|
            # No more updates in queue
 | 
						|
            pass
 | 
						|
    
 | 
						|
    def cancel_render(self):
 | 
						|
        """Cancel the current render operation"""
 | 
						|
        if self.render_thread and self.render_thread.is_alive():
 | 
						|
            self.render_cancelled = True
 | 
						|
            print("Render cancellation requested...")
 | 
						|
            return True
 | 
						|
        return False
 | 
						|
    
 | 
						|
    def is_rendering(self):
 | 
						|
        """Check if a render operation is currently active"""
 | 
						|
        return self.render_thread and self.render_thread.is_alive()
 | 
						|
    
 | 
						|
    def cleanup_render_thread(self):
 | 
						|
        """Clean up render thread resources"""
 | 
						|
        if self.render_thread and self.render_thread.is_alive():
 | 
						|
            self.render_cancelled = True
 | 
						|
            # Wait a bit for the thread to finish gracefully
 | 
						|
            self.render_thread.join(timeout=2.0)
 | 
						|
            if self.render_thread.is_alive():
 | 
						|
                print("Warning: Render thread did not finish gracefully")
 | 
						|
        self.render_thread = None
 | 
						|
        self.render_cancelled = False
 | 
						|
    
 | 
						|
    def _render_image(self, output_path: str):
 | 
						|
        """Save image with current edits applied"""
 | 
						|
        # Get the appropriate file extension
 | 
						|
        original_ext = self.video_path.suffix.lower()
 | 
						|
        if not output_path.endswith(original_ext):
 | 
						|
            output_path += original_ext
 | 
						|
 | 
						|
        print(f"Saving image to {output_path}...")
 | 
						|
 | 
						|
        # Apply all transformations to the image
 | 
						|
        processed_image = self.apply_crop_zoom_and_rotation(self.static_image.copy())
 | 
						|
        
 | 
						|
        if processed_image is not None:
 | 
						|
            # Save the image
 | 
						|
            success = cv2.imwrite(output_path, processed_image)
 | 
						|
            if success:
 | 
						|
                print(f"Image saved successfully to {output_path}")
 | 
						|
                return True
 | 
						|
            else:
 | 
						|
                print(f"Error: Could not save image to {output_path}")
 | 
						|
                return False
 | 
						|
        else:
 | 
						|
            print("Error: Could not process image")
 | 
						|
            return False
 | 
						|
 | 
						|
 | 
						|
    def _process_frame_for_render(self, frame, output_width: int, output_height: int):
 | 
						|
        """Process a single frame for rendering (optimized for speed)"""
 | 
						|
        try:
 | 
						|
            # Apply crop (vectorized operation)
 | 
						|
            if self.crop_rect:
 | 
						|
                x, y, w, h = map(int, self.crop_rect)
 | 
						|
 | 
						|
                # Clamp coordinates to frame bounds
 | 
						|
                h_frame, w_frame = frame.shape[:2]
 | 
						|
                x = max(0, min(x, w_frame - 1))
 | 
						|
                y = max(0, min(y, h_frame - 1))
 | 
						|
                w = min(w, w_frame - x)
 | 
						|
                h = min(h, h_frame - y)
 | 
						|
 | 
						|
                if w > 0 and h > 0:
 | 
						|
                    frame = frame[y : y + h, x : x + w]
 | 
						|
                else:
 | 
						|
                    return None
 | 
						|
 | 
						|
            # Apply brightness and contrast
 | 
						|
            frame = self.apply_brightness_contrast(frame)
 | 
						|
 | 
						|
            # Apply rotation
 | 
						|
            if self.rotation_angle != 0:
 | 
						|
                frame = self.apply_rotation(frame)
 | 
						|
 | 
						|
            # Apply zoom and resize in one step for efficiency
 | 
						|
            if self.zoom_factor != 1.0:
 | 
						|
                height, width = frame.shape[:2]
 | 
						|
                intermediate_width = int(width * self.zoom_factor)
 | 
						|
                intermediate_height = int(height * self.zoom_factor)
 | 
						|
 | 
						|
                # If zoom results in different dimensions than output, resize directly to output
 | 
						|
                if (
 | 
						|
                    intermediate_width != output_width
 | 
						|
                    or intermediate_height != output_height
 | 
						|
                ):
 | 
						|
                    frame = cv2.resize(
 | 
						|
                        frame,
 | 
						|
                        (output_width, output_height),
 | 
						|
                        interpolation=cv2.INTER_LINEAR,
 | 
						|
                    )
 | 
						|
                else:
 | 
						|
                    frame = cv2.resize(
 | 
						|
                        frame,
 | 
						|
                        (intermediate_width, intermediate_height),
 | 
						|
                        interpolation=cv2.INTER_LINEAR,
 | 
						|
                    )
 | 
						|
 | 
						|
            # Final size check and resize if needed
 | 
						|
            if frame.shape[1] != output_width or frame.shape[0] != output_height:
 | 
						|
                frame = cv2.resize(
 | 
						|
                    frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
 | 
						|
                )
 | 
						|
 | 
						|
            return frame
 | 
						|
 | 
						|
        except Exception as e:
 | 
						|
            print(f"Error processing frame: {e}")
 | 
						|
            return None
 | 
						|
 | 
						|
    def run(self):
 | 
						|
        """Main editor loop"""
 | 
						|
        if self.is_image_mode:
 | 
						|
            print("Image Editor Controls:")
 | 
						|
            print("  E/Shift+E: Increase/Decrease brightness")
 | 
						|
            print("  R/Shift+R: Increase/Decrease contrast")
 | 
						|
            print("  -: Rotate clockwise 90°")
 | 
						|
            print()
 | 
						|
            print("Crop Controls:")
 | 
						|
            print("  Shift+Click+Drag: Select crop area")
 | 
						|
            print("  h/j/k/l: Contract crop (left/down/up/right)")
 | 
						|
            print("  H/J/K/L: Expand crop (left/down/up/right)")
 | 
						|
            print("  U: Undo crop")
 | 
						|
            print("  C: Clear crop")
 | 
						|
            print()
 | 
						|
            print("Other Controls:")
 | 
						|
            print("  Ctrl+Scroll: Zoom in/out")
 | 
						|
            print("  Shift+S: Save screenshot")
 | 
						|
            if len(self.video_files) > 1:
 | 
						|
                print("  N: Next file")
 | 
						|
                print("  n: Previous file")
 | 
						|
            print("  Enter: Save image (overwrites if '_edited_' in name)")
 | 
						|
            print("  n: Save image as _edited_edited")
 | 
						|
            print("  Q/ESC: Quit")
 | 
						|
            print()
 | 
						|
        else:
 | 
						|
            print("Video Editor Controls:")
 | 
						|
            print("  Space: Play/Pause")
 | 
						|
            print("  A/D: Seek backward/forward (1 frame)")
 | 
						|
            print("  Shift+A/D: Seek backward/forward (10 frames)")
 | 
						|
            print("  Ctrl+A/D: Seek backward/forward (60 frames)")
 | 
						|
            print("  W/S: Increase/Decrease speed")
 | 
						|
            print("  Q/Y: Increase/Decrease seek multiplier")
 | 
						|
            print("  E/Shift+E: Increase/Decrease brightness")
 | 
						|
            print("  R/Shift+R: Increase/Decrease contrast")
 | 
						|
            print("  -: Rotate clockwise 90°")
 | 
						|
            print()
 | 
						|
            print("Crop Controls:")
 | 
						|
            print("  Shift+Click+Drag: Select crop area")
 | 
						|
            print("  h/j/k/l: Contract crop (left/down/up/right)")
 | 
						|
            print("  H/J/K/L: Expand crop (left/down/up/right)")
 | 
						|
            print("  U: Undo crop")
 | 
						|
            print("  C: Clear crop")
 | 
						|
            print()
 | 
						|
            print("Other Controls:")
 | 
						|
            print("  Ctrl+Scroll: Zoom in/out")
 | 
						|
            print("  Shift+S: Save screenshot")
 | 
						|
            print("  1: Set cut start point")
 | 
						|
            print("  2: Set cut end point")
 | 
						|
            print("  T: Toggle loop between markers")
 | 
						|
            if len(self.video_files) > 1:
 | 
						|
                print("  N: Next video")
 | 
						|
                print("  n: Previous video")
 | 
						|
            print("  Enter: Render video (overwrites if '_edited_' in name)")
 | 
						|
            print("  n: Render video as _edited_edited")
 | 
						|
            print("  X: Cancel render")
 | 
						|
            print("  Q/ESC: Quit")
 | 
						|
            print()
 | 
						|
 | 
						|
        window_title = "Image Editor" if self.is_image_mode else "Video Editor"
 | 
						|
        cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
 | 
						|
        cv2.resizeWindow(window_title, self.window_width, self.window_height)
 | 
						|
        cv2.setMouseCallback(window_title, self.mouse_callback)
 | 
						|
 | 
						|
        self.load_current_frame()
 | 
						|
 | 
						|
        while True:
 | 
						|
            # Update auto-repeat seeking if active
 | 
						|
            self.update_auto_repeat_seek()
 | 
						|
            
 | 
						|
            # Update render progress from background thread
 | 
						|
            self.update_render_progress()
 | 
						|
            
 | 
						|
            # Only update display if needed and throttled
 | 
						|
            if self.should_update_display():
 | 
						|
                self.display_current_frame()
 | 
						|
 | 
						|
            delay = self.calculate_frame_delay() if self.is_playing else 1  # Very short delay for responsive key detection
 | 
						|
            key = cv2.waitKey(delay) & 0xFF
 | 
						|
            
 | 
						|
            
 | 
						|
            # Handle auto-repeat - stop if no key is pressed
 | 
						|
            if key == 255 and self.auto_repeat_active:  # 255 means no key pressed
 | 
						|
                self.stop_auto_repeat_seek()
 | 
						|
 | 
						|
            # Get modifier key states
 | 
						|
            window_title = "Image Editor" if self.is_image_mode else "Video Editor"
 | 
						|
            # Note: OpenCV doesn't provide direct access to modifier keys in waitKey
 | 
						|
            # We'll handle this through special key combinations
 | 
						|
 | 
						|
            if key == ord("q") or key == 27:  # ESC
 | 
						|
                self.stop_auto_repeat_seek()
 | 
						|
                self.save_state()
 | 
						|
                break
 | 
						|
            elif key == ord(" "):
 | 
						|
                # Don't allow play/pause for images
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.stop_auto_repeat_seek()  # Stop seeking when toggling play/pause
 | 
						|
                    self.is_playing = not self.is_playing
 | 
						|
            elif key == ord("a") or key == ord("A"):
 | 
						|
                # Seeking only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    # Check if it's uppercase A (Shift+A)
 | 
						|
                    if key == ord("A"):
 | 
						|
                        if not self.auto_repeat_active:
 | 
						|
                            self.start_auto_repeat_seek(-1, True, False)  # Shift+A: -10 frames
 | 
						|
                    else:
 | 
						|
                        if not self.auto_repeat_active:
 | 
						|
                            self.start_auto_repeat_seek(-1, False, False)  # A: -1 frame
 | 
						|
            elif key == ord("d") or key == ord("D"):
 | 
						|
                # Seeking only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    # Check if it's uppercase D (Shift+D)
 | 
						|
                    if key == ord("D"):
 | 
						|
                        if not self.auto_repeat_active:
 | 
						|
                            self.start_auto_repeat_seek(1, True, False)  # Shift+D: +10 frames
 | 
						|
                    else:
 | 
						|
                        if not self.auto_repeat_active:
 | 
						|
                            self.start_auto_repeat_seek(1, False, False)  # D: +1 frame
 | 
						|
            elif key == 1:  # Ctrl+A
 | 
						|
                # Seeking only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    if not self.auto_repeat_active:
 | 
						|
                        self.start_auto_repeat_seek(-1, False, True)  # Ctrl+A: -60 frames
 | 
						|
            elif key == 4:  # Ctrl+D
 | 
						|
                # Seeking only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    if not self.auto_repeat_active:
 | 
						|
                        self.start_auto_repeat_seek(1, False, True)  # Ctrl+D: +60 frames
 | 
						|
            elif key == ord("-") or key == ord("_"):
 | 
						|
                self.rotate_clockwise()
 | 
						|
                print(f"Rotated to {self.rotation_angle}°")
 | 
						|
            elif key == ord("s"):  # Shift+S - Save screenshot
 | 
						|
                self.save_current_frame()
 | 
						|
            elif key == ord("W"):
 | 
						|
                # Speed control only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.playback_speed = min(
 | 
						|
                        self.MAX_PLAYBACK_SPEED, self.playback_speed + self.SPEED_INCREMENT
 | 
						|
                    )
 | 
						|
            elif key == ord("S"):
 | 
						|
                # Speed control only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.playback_speed = max(
 | 
						|
                        self.MIN_PLAYBACK_SPEED, self.playback_speed - self.SPEED_INCREMENT
 | 
						|
                    )
 | 
						|
            elif key == ord("Q"):
 | 
						|
                # Seek multiplier control only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.seek_multiplier = min(
 | 
						|
                        self.MAX_SEEK_MULTIPLIER, self.seek_multiplier + self.SEEK_MULTIPLIER_INCREMENT
 | 
						|
                    )
 | 
						|
                    print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
 | 
						|
            elif key == ord("Y"):
 | 
						|
                # Seek multiplier control only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.seek_multiplier = max(
 | 
						|
                        self.MIN_SEEK_MULTIPLIER, self.seek_multiplier - self.SEEK_MULTIPLIER_INCREMENT
 | 
						|
                    )
 | 
						|
                    print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
 | 
						|
            elif key == ord("e") or key == ord("E"):
 | 
						|
                # Brightness adjustment: E (increase), Shift+E (decrease)
 | 
						|
                if key == ord("E"):
 | 
						|
                    self.adjust_brightness(-5)
 | 
						|
                    print(f"Brightness: {self.brightness}")
 | 
						|
                else:
 | 
						|
                    self.adjust_brightness(5)
 | 
						|
                    print(f"Brightness: {self.brightness}")
 | 
						|
            elif key == ord("r") or key == ord("R"):
 | 
						|
                # Contrast adjustment: R (increase), Shift+R (decrease)
 | 
						|
                if key == ord("R"):
 | 
						|
                    self.adjust_contrast(-0.1)
 | 
						|
                    print(f"Contrast: {self.contrast:.1f}")
 | 
						|
                else:
 | 
						|
                    self.adjust_contrast(0.1)
 | 
						|
                    print(f"Contrast: {self.contrast:.1f}")
 | 
						|
            elif key == ord("u"):
 | 
						|
                self.undo_crop()
 | 
						|
            elif key == ord("c"):
 | 
						|
                if self.crop_rect:
 | 
						|
                    self.crop_history.append(self.crop_rect)
 | 
						|
                self.crop_rect = None
 | 
						|
            elif key == ord("1"):
 | 
						|
                # Cut markers only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.cut_start_frame = self.current_frame
 | 
						|
                    print(f"Set cut start at frame {self.current_frame}")
 | 
						|
            elif key == ord("2"):
 | 
						|
                # Cut markers only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.cut_end_frame = self.current_frame
 | 
						|
                    print(f"Set cut end at frame {self.current_frame}")
 | 
						|
            elif key == ord("N"):
 | 
						|
                if len(self.video_files) > 1:
 | 
						|
                    self.previous_video()
 | 
						|
            elif key == ord("n"):
 | 
						|
                if len(self.video_files) > 1:
 | 
						|
                    self.next_video()
 | 
						|
                else:
 | 
						|
                    # n - Create _edited_edited file
 | 
						|
                    directory = self.video_path.parent
 | 
						|
                    base_name = self.video_path.stem
 | 
						|
                    extension = self.video_path.suffix
 | 
						|
                    
 | 
						|
                    # Create _edited_edited filename
 | 
						|
                    if "_edited_" in base_name:
 | 
						|
                        # If already edited, create _edited_edited
 | 
						|
                        new_name = f"{base_name}_edited{extension}"
 | 
						|
                    else:
 | 
						|
                        # If not edited, create _edited_edited
 | 
						|
                        new_name = f"{base_name}_edited_edited{extension}"
 | 
						|
                    
 | 
						|
                    output_path = str(directory / new_name)
 | 
						|
                    success = self.render_video(output_path)
 | 
						|
            elif key == 13:  # Enter
 | 
						|
                # Only overwrite if file already contains "_edited_" in name
 | 
						|
                if "_edited_" in self.video_path.stem:
 | 
						|
                    output_path = str(self.video_path)
 | 
						|
                    
 | 
						|
                    # If we're overwriting the same file, use a temporary file first
 | 
						|
                    import tempfile
 | 
						|
                    temp_dir = self.video_path.parent
 | 
						|
                    temp_fd, temp_path = tempfile.mkstemp(suffix=self.video_path.suffix, dir=temp_dir)
 | 
						|
                    os.close(temp_fd)  # Close the file descriptor, we just need the path
 | 
						|
                    
 | 
						|
                    print("Rendering to temporary file first...")
 | 
						|
                    success = self.render_video(temp_path)
 | 
						|
                    
 | 
						|
                    if success:
 | 
						|
                        print("Replacing original file...")
 | 
						|
                        # Release current video capture before replacing the file
 | 
						|
                        if hasattr(self, 'cap') and self.cap:
 | 
						|
                            self.cap.release()
 | 
						|
                        
 | 
						|
                        # Replace the original file with the temporary file
 | 
						|
                        import shutil
 | 
						|
                        shutil.move(temp_path, str(self.video_path))
 | 
						|
                        
 | 
						|
                        # Small delay to ensure file system operations are complete
 | 
						|
                        time.sleep(0.1)
 | 
						|
                        
 | 
						|
                        try:
 | 
						|
                            self._load_video(self.video_path)
 | 
						|
                            self.load_current_frame()
 | 
						|
                            print("File reloaded successfully")
 | 
						|
                        except Exception as e:
 | 
						|
                            print(f"Warning: Could not reload file after overwrite: {e}")
 | 
						|
                            print("The file was saved successfully, but you may need to restart the editor to continue editing it.")
 | 
						|
                    else:
 | 
						|
                        # Clean up temp file if rendering failed
 | 
						|
                        if os.path.exists(temp_path):
 | 
						|
                            os.remove(temp_path)
 | 
						|
                else:
 | 
						|
                    print("Enter key only overwrites files with '_edited_' in the name. Use 'n' to create new files.")
 | 
						|
            elif key == ord("t"):
 | 
						|
                # Marker looping only for videos
 | 
						|
                if not self.is_image_mode:
 | 
						|
                    self.toggle_marker_looping()
 | 
						|
            elif key == ord("x"):
 | 
						|
                # Cancel render if active
 | 
						|
                if self.is_rendering():
 | 
						|
                    self.cancel_render()
 | 
						|
                    print("Render cancellation requested")
 | 
						|
                else:
 | 
						|
                    print("No render operation to cancel")
 | 
						|
            
 | 
						|
            # Individual direction controls using shift combinations we can detect
 | 
						|
            elif key == ord("J"):  # Shift+i - expand up
 | 
						|
                self.adjust_crop_size('up', False)
 | 
						|
                print(f"Expanded crop upward by {self.crop_size_step}px")
 | 
						|
            elif key == ord("K"):  # Shift+k - expand down
 | 
						|
                self.adjust_crop_size('down', False)
 | 
						|
                print(f"Expanded crop downward by {self.crop_size_step}px")
 | 
						|
            elif key == ord("L"):  # Shift+j - expand left
 | 
						|
                self.adjust_crop_size('left', False)
 | 
						|
                print(f"Expanded crop leftward by {self.crop_size_step}px")
 | 
						|
            elif key == ord("H"):  # Shift+l - expand right
 | 
						|
                self.adjust_crop_size('right', False)
 | 
						|
                print(f"Expanded crop rightward by {self.crop_size_step}px")
 | 
						|
            
 | 
						|
            # Contract in specific directions
 | 
						|
            elif key == ord("k"):  # i - contract from bottom (reduce height from bottom)
 | 
						|
                self.adjust_crop_size('up', True)
 | 
						|
                print(f"Contracted crop from bottom by {self.crop_size_step}px")
 | 
						|
            elif key == ord("j"):  # k - contract from top (reduce height from top)
 | 
						|
                self.adjust_crop_size('down', True)
 | 
						|
                print(f"Contracted crop from top by {self.crop_size_step}px")
 | 
						|
            elif key == ord("h"):  # j - contract from right (reduce width from right)
 | 
						|
                self.adjust_crop_size('left', True)
 | 
						|
                print(f"Contracted crop from right by {self.crop_size_step}px")
 | 
						|
            elif key == ord("l"):  # l - contract from left (reduce width from left)
 | 
						|
                self.adjust_crop_size('right', True)
 | 
						|
                print(f"Contracted crop from left by {self.crop_size_step}px")
 | 
						|
            
 | 
						|
 | 
						|
            # Auto advance frame when playing (videos only)
 | 
						|
            if self.is_playing and not self.is_image_mode:
 | 
						|
                self.advance_frame()
 | 
						|
 | 
						|
        self.save_state()
 | 
						|
        self.cleanup_render_thread()
 | 
						|
        if hasattr(self, 'cap') and self.cap:
 | 
						|
            self.cap.release()
 | 
						|
        cv2.destroyAllWindows()
 | 
						|
 | 
						|
 | 
						|
def main():
 | 
						|
    parser = argparse.ArgumentParser(
 | 
						|
        description="Fast Media Editor - Crop, Zoom, and Edit videos and images"
 | 
						|
    )
 | 
						|
    parser.add_argument(
 | 
						|
        "media", help="Path to media file or directory containing videos/images"
 | 
						|
    )
 | 
						|
 | 
						|
    try:
 | 
						|
        args = parser.parse_args()
 | 
						|
    except SystemExit:
 | 
						|
        # If launched from context menu without arguments, this might fail
 | 
						|
        input("Argument parsing failed. Press Enter to exit...")
 | 
						|
        return
 | 
						|
 | 
						|
    if not os.path.exists(args.media):
 | 
						|
        error_msg = f"Error: {args.media} does not exist"
 | 
						|
        print(error_msg)
 | 
						|
        input("Press Enter to exit...")  # Keep window open in context menu
 | 
						|
        sys.exit(1)
 | 
						|
 | 
						|
    try:
 | 
						|
        editor = VideoEditor(args.media)
 | 
						|
        editor.run()
 | 
						|
    except Exception as e:
 | 
						|
        error_msg = f"Error initializing media editor: {e}"
 | 
						|
        print(error_msg)
 | 
						|
        import traceback
 | 
						|
        traceback.print_exc()  # Full error trace for debugging
 | 
						|
        input("Press Enter to exit...")  # Keep window open in context menu
 | 
						|
        sys.exit(1)
 | 
						|
 | 
						|
 | 
						|
if __name__ == "__main__":
 | 
						|
    main()
 |