This commit enhances the _map_original_to_screen and _map_screen_to_original methods by clarifying the calculations for zoom and rotation. It introduces new variables for better readability and ensures accurate mapping of coordinates, including adjustments for display offsets. The changes streamline the processing of frame dimensions and improve the overall functionality of the video editing experience.
3005 lines
124 KiB
Python
3005 lines
124 KiB
Python
import os
|
|
import sys
|
|
import cv2
|
|
import argparse
|
|
import numpy as np
|
|
from pathlib import Path
|
|
from typing import List
|
|
import time
|
|
import re
|
|
import json
|
|
import threading
|
|
import queue
|
|
import subprocess
|
|
import ctypes
|
|
|
|
class Cv2BufferedCap:
|
|
"""Buffered wrapper around cv2.VideoCapture that handles frame loading, seeking, and caching correctly"""
|
|
|
|
def __init__(self, video_path, backend=None):
|
|
self.video_path = video_path
|
|
self.cap = cv2.VideoCapture(str(video_path), backend)
|
|
if not self.cap.isOpened():
|
|
raise ValueError(f"Could not open video: {video_path}")
|
|
|
|
# Video properties
|
|
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
|
self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
|
|
# Frame cache
|
|
self.frame_cache = {}
|
|
self.cache_access_order = []
|
|
self.MAX_CACHE_FRAMES = 3000
|
|
|
|
# Current position tracking
|
|
self.current_frame = 0
|
|
|
|
def _manage_cache(self):
|
|
"""Manage cache size using LRU eviction"""
|
|
while len(self.frame_cache) > self.MAX_CACHE_FRAMES:
|
|
oldest_frame = self.cache_access_order.pop(0)
|
|
if oldest_frame in self.frame_cache:
|
|
del self.frame_cache[oldest_frame]
|
|
|
|
def _add_to_cache(self, frame_number, frame):
|
|
"""Add frame to cache"""
|
|
self.frame_cache[frame_number] = frame.copy()
|
|
if frame_number in self.cache_access_order:
|
|
self.cache_access_order.remove(frame_number)
|
|
self.cache_access_order.append(frame_number)
|
|
self._manage_cache()
|
|
|
|
def _get_from_cache(self, frame_number):
|
|
"""Get frame from cache and update LRU"""
|
|
if frame_number in self.frame_cache:
|
|
if frame_number in self.cache_access_order:
|
|
self.cache_access_order.remove(frame_number)
|
|
self.cache_access_order.append(frame_number)
|
|
return self.frame_cache[frame_number].copy()
|
|
return None
|
|
|
|
def get_frame(self, frame_number):
|
|
"""Get frame at specific index - always accurate"""
|
|
# Clamp frame number to valid range
|
|
frame_number = max(0, min(frame_number, self.total_frames - 1))
|
|
|
|
# Check cache first
|
|
cached_frame = self._get_from_cache(frame_number)
|
|
if cached_frame is not None:
|
|
self.current_frame = frame_number
|
|
return cached_frame
|
|
|
|
# Not in cache, seek to frame and read
|
|
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
|
ret, frame = self.cap.read()
|
|
|
|
if ret:
|
|
self._add_to_cache(frame_number, frame)
|
|
self.current_frame = frame_number
|
|
return frame
|
|
else:
|
|
raise ValueError(f"Failed to read frame {frame_number}")
|
|
|
|
def advance_frame(self, frames=1):
|
|
"""Advance by specified number of frames"""
|
|
new_frame = self.current_frame + frames
|
|
return self.get_frame(new_frame)
|
|
|
|
def release(self):
|
|
"""Release the video capture"""
|
|
if self.cap:
|
|
self.cap.release()
|
|
|
|
def isOpened(self):
|
|
"""Check if capture is opened"""
|
|
return self.cap and self.cap.isOpened()
|
|
|
|
def get_active_window_title():
|
|
"""Get the title of the currently active window"""
|
|
try:
|
|
# Get handle to foreground window
|
|
hwnd = ctypes.windll.user32.GetForegroundWindow()
|
|
|
|
# Get window title length
|
|
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd)
|
|
|
|
# Create buffer and get window title
|
|
buffer = ctypes.create_unicode_buffer(length + 1)
|
|
ctypes.windll.user32.GetWindowTextW(hwnd, buffer, length + 1)
|
|
|
|
return buffer.value
|
|
except:
|
|
return ""
|
|
|
|
class ProjectView:
|
|
"""Project view that displays videos in current directory with progress bars"""
|
|
|
|
# Project view configuration
|
|
THUMBNAIL_SIZE = (200, 150) # Width, Height
|
|
THUMBNAIL_MARGIN = 20
|
|
PROGRESS_BAR_HEIGHT = 8
|
|
TEXT_HEIGHT = 30
|
|
|
|
# Colors
|
|
BG_COLOR = (40, 40, 40)
|
|
THUMBNAIL_BG_COLOR = (60, 60, 60)
|
|
PROGRESS_BG_COLOR = (80, 80, 80)
|
|
PROGRESS_FILL_COLOR = (0, 120, 255)
|
|
TEXT_COLOR = (255, 255, 255)
|
|
SELECTED_COLOR = (255, 165, 0)
|
|
|
|
def __init__(self, directory: Path, video_editor):
|
|
self.directory = directory
|
|
self.video_editor = video_editor
|
|
self.video_files = []
|
|
self.thumbnails = {}
|
|
self.progress_data = {}
|
|
self.selected_index = 0
|
|
self.scroll_offset = 0
|
|
self.items_per_row = 2 # Default to 2 items per row
|
|
self.window_width = 1200
|
|
self.window_height = 800
|
|
|
|
self._load_video_files()
|
|
self._load_progress_data()
|
|
|
|
def _calculate_thumbnail_size(self, window_width: int) -> tuple:
|
|
"""Calculate thumbnail size based on items per row and window width"""
|
|
available_width = window_width - self.THUMBNAIL_MARGIN
|
|
item_width = (available_width - (self.items_per_row - 1) * self.THUMBNAIL_MARGIN) // self.items_per_row
|
|
thumbnail_width = max(50, item_width) # Minimum 50px width
|
|
thumbnail_height = int(thumbnail_width * self.THUMBNAIL_SIZE[1] / self.THUMBNAIL_SIZE[0]) # Maintain aspect ratio
|
|
return (thumbnail_width, thumbnail_height)
|
|
|
|
def _load_video_files(self):
|
|
"""Load all video files from directory"""
|
|
self.video_files = []
|
|
for file_path in self.directory.iterdir():
|
|
if (file_path.is_file() and
|
|
file_path.suffix.lower() in self.video_editor.VIDEO_EXTENSIONS):
|
|
self.video_files.append(file_path)
|
|
self.video_files.sort(key=lambda x: x.name)
|
|
|
|
def _load_progress_data(self):
|
|
"""Load progress data from JSON state files"""
|
|
self.progress_data = {}
|
|
for video_path in self.video_files:
|
|
state_file = video_path.with_suffix('.json')
|
|
if state_file.exists():
|
|
try:
|
|
with open(state_file, 'r') as f:
|
|
state = json.load(f)
|
|
current_frame = state.get('current_frame', 0)
|
|
|
|
# Get total frames from video
|
|
cap = cv2.VideoCapture(str(video_path))
|
|
if cap.isOpened():
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
cap.release()
|
|
|
|
if total_frames > 0:
|
|
progress = current_frame / (total_frames - 1)
|
|
self.progress_data[video_path] = {
|
|
'current_frame': current_frame,
|
|
'total_frames': total_frames,
|
|
'progress': progress
|
|
}
|
|
except Exception as e:
|
|
print(f"Error loading progress for {video_path.name}: {e}")
|
|
|
|
def refresh_progress_data(self):
|
|
"""Refresh progress data from JSON files (call when editor state changes)"""
|
|
self._load_progress_data()
|
|
|
|
def get_progress_for_video(self, video_path: Path) -> float:
|
|
"""Get progress (0.0 to 1.0) for a video"""
|
|
if video_path in self.progress_data:
|
|
return self.progress_data[video_path]['progress']
|
|
return 0.0
|
|
|
|
def get_thumbnail_for_video(self, video_path: Path, size: tuple = None) -> np.ndarray:
|
|
"""Get thumbnail for a video, generating it if needed"""
|
|
if size is None:
|
|
size = self.THUMBNAIL_SIZE
|
|
|
|
# Cache the original thumbnail by video path only (not size)
|
|
if video_path in self.thumbnails:
|
|
original_thumbnail = self.thumbnails[video_path]
|
|
# Resize the cached thumbnail to the requested size
|
|
return cv2.resize(original_thumbnail, size)
|
|
|
|
# Generate original thumbnail on demand (only once per video)
|
|
try:
|
|
cap = cv2.VideoCapture(str(video_path))
|
|
if cap.isOpened():
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
if total_frames > 0:
|
|
middle_frame = total_frames // 2
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, middle_frame)
|
|
ret, frame = cap.read()
|
|
if ret:
|
|
# Store original thumbnail at original size
|
|
original_thumbnail = cv2.resize(frame, self.THUMBNAIL_SIZE)
|
|
self.thumbnails[video_path] = original_thumbnail
|
|
cap.release()
|
|
# Return resized version
|
|
return cv2.resize(original_thumbnail, size)
|
|
cap.release()
|
|
except Exception as e:
|
|
print(f"Error generating thumbnail for {video_path.name}: {e}")
|
|
|
|
# Return a placeholder if thumbnail generation failed
|
|
placeholder = np.full((size[1], size[0], 3),
|
|
self.THUMBNAIL_BG_COLOR, dtype=np.uint8)
|
|
return placeholder
|
|
|
|
def draw(self) -> np.ndarray:
|
|
"""Draw the project view"""
|
|
# Get actual window size dynamically
|
|
try:
|
|
# Try to get the actual window size from OpenCV
|
|
window_rect = cv2.getWindowImageRect("Project View")
|
|
if window_rect[2] > 0 and window_rect[3] > 0: # width and height > 0
|
|
actual_width = window_rect[2]
|
|
actual_height = window_rect[3]
|
|
else:
|
|
# Fallback to default size
|
|
actual_width = self.window_width
|
|
actual_height = self.window_height
|
|
except:
|
|
# Fallback to default size
|
|
actual_width = self.window_width
|
|
actual_height = self.window_height
|
|
|
|
canvas = np.full((actual_height, actual_width, 3), self.BG_COLOR, dtype=np.uint8)
|
|
|
|
if not self.video_files:
|
|
# No videos message
|
|
text = "No videos found in directory"
|
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
text_size = cv2.getTextSize(text, font, 1.0, 2)[0]
|
|
text_x = (actual_width - text_size[0]) // 2
|
|
text_y = (actual_height - text_size[1]) // 2
|
|
cv2.putText(canvas, text, (text_x, text_y), font, 1.0, self.TEXT_COLOR, 2)
|
|
return canvas
|
|
|
|
# Calculate layout - use fixed items_per_row and calculate thumbnail size to fit
|
|
items_per_row = min(self.items_per_row, len(self.video_files)) # Don't exceed number of videos
|
|
|
|
# Calculate thumbnail size to fit the desired number of items per row
|
|
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(actual_width)
|
|
|
|
# Calculate item height dynamically based on thumbnail size
|
|
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
|
|
|
item_width = (actual_width - (items_per_row + 1) * self.THUMBNAIL_MARGIN) // items_per_row
|
|
|
|
# Draw videos in grid
|
|
for i, video_path in enumerate(self.video_files):
|
|
row = i // items_per_row
|
|
col = i % items_per_row
|
|
|
|
# Skip if scrolled out of view
|
|
if row < self.scroll_offset:
|
|
continue
|
|
if row > self.scroll_offset + (actual_height // item_height):
|
|
break
|
|
|
|
# Calculate position
|
|
x = self.THUMBNAIL_MARGIN + col * (item_width + self.THUMBNAIL_MARGIN)
|
|
y = self.THUMBNAIL_MARGIN + (row - self.scroll_offset) * item_height
|
|
|
|
# Draw thumbnail background
|
|
cv2.rectangle(canvas,
|
|
(x, y),
|
|
(x + thumbnail_width, y + thumbnail_height),
|
|
self.THUMBNAIL_BG_COLOR, -1)
|
|
|
|
# Draw selection highlight
|
|
if i == self.selected_index:
|
|
cv2.rectangle(canvas,
|
|
(x - 2, y - 2),
|
|
(x + thumbnail_width + 2, y + thumbnail_height + 2),
|
|
self.SELECTED_COLOR, 3)
|
|
|
|
# Draw thumbnail
|
|
thumbnail = self.get_thumbnail_for_video(video_path, (thumbnail_width, thumbnail_height))
|
|
# Thumbnail is already the correct size, no need to resize
|
|
resized_thumbnail = thumbnail
|
|
|
|
# Ensure thumbnail doesn't exceed canvas bounds
|
|
end_y = min(y + thumbnail_height, actual_height)
|
|
end_x = min(x + thumbnail_width, actual_width)
|
|
thumb_height = end_y - y
|
|
thumb_width = end_x - x
|
|
|
|
if thumb_height > 0 and thumb_width > 0:
|
|
# Resize thumbnail to fit within bounds if necessary
|
|
if thumb_height != thumbnail_height or thumb_width != thumbnail_width:
|
|
resized_thumbnail = cv2.resize(thumbnail, (thumb_width, thumb_height))
|
|
|
|
canvas[y:end_y, x:end_x] = resized_thumbnail
|
|
|
|
# Draw progress bar
|
|
progress_y = y + thumbnail_height + 5
|
|
progress_width = thumbnail_width
|
|
progress = self.get_progress_for_video(video_path)
|
|
|
|
# Progress background
|
|
cv2.rectangle(canvas,
|
|
(x, progress_y),
|
|
(x + progress_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
|
self.PROGRESS_BG_COLOR, -1)
|
|
|
|
# Progress fill
|
|
if progress > 0:
|
|
fill_width = int(progress_width * progress)
|
|
cv2.rectangle(canvas,
|
|
(x, progress_y),
|
|
(x + fill_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
|
self.PROGRESS_FILL_COLOR, -1)
|
|
|
|
# Draw filename
|
|
filename = video_path.name
|
|
# Truncate if too long
|
|
if len(filename) > 25:
|
|
filename = filename[:22] + "..."
|
|
|
|
text_y = progress_y + self.PROGRESS_BAR_HEIGHT + 20
|
|
cv2.putText(canvas, filename, (x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, self.TEXT_COLOR, 2)
|
|
|
|
# Draw progress percentage
|
|
if video_path in self.progress_data:
|
|
progress_text = f"{progress * 100:.0f}%"
|
|
text_size = cv2.getTextSize(progress_text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)[0]
|
|
progress_text_x = x + progress_width - text_size[0]
|
|
cv2.putText(canvas, progress_text, (progress_text_x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.4, self.TEXT_COLOR, 1)
|
|
|
|
# Draw instructions
|
|
instructions = [
|
|
"Project View - Videos in current directory",
|
|
"WASD: Navigate | E: Open video | Q: Fewer items per row | Y: More items per row | q: Quit | ESC: Back to editor",
|
|
f"Showing {len(self.video_files)} videos | {items_per_row} per row | Thumbnail: {thumbnail_width}x{thumbnail_height}"
|
|
]
|
|
|
|
for i, instruction in enumerate(instructions):
|
|
y_pos = actual_height - 60 + i * 20
|
|
cv2.putText(canvas, instruction, (10, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.TEXT_COLOR, 1)
|
|
|
|
return canvas
|
|
|
|
def handle_key(self, key: int) -> str:
|
|
"""Handle keyboard input, returns action taken"""
|
|
if key == 27: # ESC
|
|
return "back_to_editor"
|
|
elif key == ord('q'): # lowercase q - Quit
|
|
return "quit"
|
|
elif key == ord('e') or key == ord('E'): # E - Open video
|
|
if self.video_files and 0 <= self.selected_index < len(self.video_files):
|
|
return f"open_video:{self.video_files[self.selected_index]}"
|
|
elif key == ord('w') or key == ord('W'): # W - Up
|
|
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
|
if self.selected_index >= current_items_per_row:
|
|
self.selected_index -= current_items_per_row
|
|
else:
|
|
self.selected_index = 0
|
|
self._update_scroll()
|
|
elif key == ord('s') or key == ord('S'): # S - Down
|
|
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
|
if self.selected_index + current_items_per_row < len(self.video_files):
|
|
self.selected_index += current_items_per_row
|
|
else:
|
|
self.selected_index = len(self.video_files) - 1
|
|
self._update_scroll()
|
|
elif key == ord('a') or key == ord('A'): # A - Left
|
|
if self.selected_index > 0:
|
|
self.selected_index -= 1
|
|
self._update_scroll()
|
|
elif key == ord('d') or key == ord('D'): # D - Right
|
|
if self.selected_index < len(self.video_files) - 1:
|
|
self.selected_index += 1
|
|
self._update_scroll()
|
|
elif key == ord('Q'): # uppercase Q - Fewer items per row (larger thumbnails)
|
|
if self.items_per_row > 1:
|
|
self.items_per_row -= 1
|
|
print(f"Items per row: {self.items_per_row}")
|
|
elif key == ord('y') or key == ord('Y'): # Y - More items per row (smaller thumbnails)
|
|
self.items_per_row += 1
|
|
print(f"Items per row: {self.items_per_row}")
|
|
|
|
return "none"
|
|
|
|
def _update_scroll(self):
|
|
"""Update scroll offset based on selected item"""
|
|
if not self.video_files:
|
|
return
|
|
|
|
# Use fixed items per row
|
|
items_per_row = min(self.items_per_row, len(self.video_files))
|
|
|
|
# Get window dimensions for calculations
|
|
try:
|
|
window_rect = cv2.getWindowImageRect("Project View")
|
|
if window_rect[2] > 0 and window_rect[3] > 0:
|
|
window_width = window_rect[2]
|
|
window_height = window_rect[3]
|
|
else:
|
|
window_width = self.window_width
|
|
window_height = self.window_height
|
|
except:
|
|
window_width = self.window_width
|
|
window_height = self.window_height
|
|
|
|
# Calculate thumbnail size and item height dynamically
|
|
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(window_width)
|
|
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
|
|
|
selected_row = self.selected_index // items_per_row
|
|
visible_rows = max(1, window_height // item_height)
|
|
|
|
# Calculate how many rows we can actually show
|
|
total_rows = (len(self.video_files) + items_per_row - 1) // items_per_row
|
|
|
|
# If we can show all rows, no scrolling needed
|
|
if total_rows <= visible_rows:
|
|
self.scroll_offset = 0
|
|
return
|
|
|
|
# Update scroll to keep selected item visible
|
|
if selected_row < self.scroll_offset:
|
|
self.scroll_offset = selected_row
|
|
elif selected_row >= self.scroll_offset + visible_rows:
|
|
self.scroll_offset = selected_row - visible_rows + 1
|
|
|
|
# Ensure scroll offset doesn't go negative or beyond available content
|
|
self.scroll_offset = max(0, min(self.scroll_offset, total_rows - visible_rows))
|
|
|
|
class VideoEditor:
|
|
# Configuration constants
|
|
BASE_FRAME_DELAY_MS = 16 # ~60 FPS
|
|
SPEED_INCREMENT = 0.2
|
|
MIN_PLAYBACK_SPEED = 0.1
|
|
MAX_PLAYBACK_SPEED = 10.0
|
|
|
|
# Seek multiplier configuration
|
|
SEEK_MULTIPLIER_INCREMENT = 2.0
|
|
MIN_SEEK_MULTIPLIER = 1.0
|
|
MAX_SEEK_MULTIPLIER = 100.0
|
|
|
|
# Auto-repeat seeking configuration
|
|
AUTO_REPEAT_DISPLAY_RATE = 1.0
|
|
|
|
# Timeline configuration
|
|
TIMELINE_HEIGHT = 60
|
|
TIMELINE_MARGIN = 20
|
|
TIMELINE_BAR_HEIGHT = 12
|
|
TIMELINE_HANDLE_SIZE = 12
|
|
TIMELINE_COLOR_BG = (80, 80, 80)
|
|
TIMELINE_COLOR_PROGRESS = (0, 120, 255)
|
|
TIMELINE_COLOR_HANDLE = (255, 255, 255)
|
|
TIMELINE_COLOR_BORDER = (200, 200, 200)
|
|
TIMELINE_COLOR_CUT_POINT = (255, 0, 0)
|
|
|
|
# Progress bar configuration
|
|
PROGRESS_BAR_HEIGHT = 30
|
|
PROGRESS_BAR_MARGIN_PERCENT = 5 # 5% margin on each side
|
|
PROGRESS_BAR_TOP_MARGIN = 20 # Fixed top margin
|
|
PROGRESS_BAR_FADE_DURATION = 3.0 # seconds to fade out after completion
|
|
PROGRESS_BAR_COLOR_BG = (50, 50, 50)
|
|
PROGRESS_BAR_COLOR_FILL = (0, 255, 0) # Green when complete
|
|
PROGRESS_BAR_COLOR_PROGRESS = (0, 120, 255) # Blue during progress
|
|
PROGRESS_BAR_COLOR_BORDER = (200, 200, 200)
|
|
|
|
# Zoom and crop settings
|
|
MIN_ZOOM = 0.1
|
|
MAX_ZOOM = 10.0
|
|
ZOOM_INCREMENT = 0.1
|
|
|
|
# Supported video extensions
|
|
VIDEO_EXTENSIONS = {".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"}
|
|
|
|
# Supported image extensions
|
|
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp", ".jp2", ".pbm", ".pgm", ".ppm", ".sr", ".ras"}
|
|
|
|
# Crop adjustment settings
|
|
CROP_SIZE_STEP = 15 # pixels to expand/contract crop
|
|
|
|
def __init__(self, path: str):
|
|
self.path = Path(path)
|
|
|
|
# Video file management
|
|
self.video_files = []
|
|
self.current_video_index = 0
|
|
|
|
# Media type tracking
|
|
self.is_image_mode = False # True if current file is an image
|
|
|
|
# Determine if path is file or directory
|
|
if self.path.is_file():
|
|
self.video_files = [self.path]
|
|
elif self.path.is_dir():
|
|
# Load all media files from directory
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if not self.video_files:
|
|
raise ValueError(f"No media files found in directory: {path}")
|
|
else:
|
|
raise ValueError(f"Path does not exist: {path}")
|
|
|
|
# Mouse and keyboard interaction
|
|
self.mouse_dragging = False
|
|
self.timeline_rect = None
|
|
self.window_width = 1200
|
|
self.window_height = 800
|
|
|
|
# Auto-repeat seeking state
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
self.last_display_update = 0
|
|
|
|
# Crop settings
|
|
self.crop_rect = None # (x, y, width, height)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
self.crop_history = [] # For undo
|
|
|
|
# Zoom settings
|
|
self.zoom_factor = 1.0
|
|
self.zoom_center = None # (x, y) center point for zoom
|
|
|
|
# Rotation settings
|
|
self.rotation_angle = 0 # 0, 90, 180, 270 degrees
|
|
|
|
# Brightness and contrast settings
|
|
self.brightness = 0 # -100 to 100
|
|
self.contrast = 1.0 # 0.1 to 3.0
|
|
|
|
# Marker looping state
|
|
self.looping_between_markers = False
|
|
|
|
# Display offset for panning when zoomed
|
|
self.display_offset = [0, 0]
|
|
|
|
# Fullscreen state
|
|
self.is_fullscreen = False
|
|
|
|
# Progress bar state
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_progress = 0.0 # 0.0 to 1.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = ""
|
|
self.progress_bar_fps = 0.0 # Current rendering FPS
|
|
|
|
# Feedback message state
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
self.feedback_message_duration = 0.5 # seconds to show message
|
|
|
|
# Crop adjustment settings
|
|
self.crop_size_step = self.CROP_SIZE_STEP
|
|
|
|
# Render thread management
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
self.render_progress_queue = queue.Queue()
|
|
self.ffmpeg_process = None # Track FFmpeg process for cancellation
|
|
|
|
# Display optimization - track when redraw is needed
|
|
self.display_needs_update = True
|
|
self.last_display_state = None
|
|
|
|
# Cached transformations for performance
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
# Motion tracking state
|
|
self.tracking_points = {} # {frame_number: [(x, y), ...]} in original frame coords
|
|
self.tracking_enabled = False
|
|
|
|
# Project view mode
|
|
self.project_view_mode = False
|
|
self.project_view = None
|
|
|
|
# Initialize with first video
|
|
self._load_video(self.video_files[0])
|
|
|
|
# Load saved state after all attributes are initialized
|
|
self.load_state()
|
|
|
|
def _get_state_file_path(self) -> Path:
|
|
"""Get the state file path for the current media file"""
|
|
if not hasattr(self, 'video_path') or not self.video_path:
|
|
print("DEBUG: No video_path available for state file")
|
|
return None
|
|
state_path = self.video_path.with_suffix('.json')
|
|
print(f"DEBUG: State file path would be: {state_path}")
|
|
return state_path
|
|
|
|
def save_state(self):
|
|
"""Save current editor state to JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
|
|
try:
|
|
state = {
|
|
'timestamp': time.time(),
|
|
'current_frame': getattr(self, 'current_frame', 0),
|
|
'crop_rect': self.crop_rect,
|
|
'zoom_factor': self.zoom_factor,
|
|
'zoom_center': self.zoom_center,
|
|
'rotation_angle': self.rotation_angle,
|
|
'brightness': self.brightness,
|
|
'contrast': self.contrast,
|
|
'cut_start_frame': self.cut_start_frame,
|
|
'cut_end_frame': self.cut_end_frame,
|
|
'looping_between_markers': self.looping_between_markers,
|
|
'display_offset': self.display_offset,
|
|
'playback_speed': getattr(self, 'playback_speed', 1.0),
|
|
'seek_multiplier': getattr(self, 'seek_multiplier', 1.0),
|
|
'is_playing': getattr(self, 'is_playing', False),
|
|
'tracking_enabled': self.tracking_enabled,
|
|
'tracking_points': {str(k): v for k, v in self.tracking_points.items()}
|
|
}
|
|
|
|
with open(state_file, 'w') as f:
|
|
json.dump(state, f, indent=2)
|
|
print(f"State saved to {state_file}")
|
|
|
|
# Refresh project view progress data if project view is active
|
|
if self.project_view_mode and self.project_view:
|
|
self.project_view.refresh_progress_data()
|
|
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error saving state: {e}")
|
|
return False
|
|
|
|
def load_state(self) -> bool:
|
|
"""Load editor state from JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
if not state_file.exists():
|
|
print(f"State file does not exist: {state_file}")
|
|
return False
|
|
|
|
print(f"Loading state from: {state_file}")
|
|
try:
|
|
with open(state_file, 'r') as f:
|
|
state = json.load(f)
|
|
|
|
print(f"State file contents: {state}")
|
|
|
|
# Restore state values
|
|
if 'current_frame' in state:
|
|
self.current_frame = state['current_frame']
|
|
print(f"Loaded current_frame: {self.current_frame}")
|
|
if 'crop_rect' in state and state['crop_rect'] is not None:
|
|
self.crop_rect = tuple(state['crop_rect'])
|
|
print(f"Loaded crop_rect: {self.crop_rect}")
|
|
if 'zoom_factor' in state:
|
|
self.zoom_factor = state['zoom_factor']
|
|
print(f"Loaded zoom_factor: {self.zoom_factor}")
|
|
if 'zoom_center' in state and state['zoom_center'] is not None:
|
|
self.zoom_center = tuple(state['zoom_center'])
|
|
print(f"Loaded zoom_center: {self.zoom_center}")
|
|
if 'rotation_angle' in state:
|
|
self.rotation_angle = state['rotation_angle']
|
|
print(f"Loaded rotation_angle: {self.rotation_angle}")
|
|
if 'brightness' in state:
|
|
self.brightness = state['brightness']
|
|
print(f"Loaded brightness: {self.brightness}")
|
|
if 'contrast' in state:
|
|
self.contrast = state['contrast']
|
|
print(f"Loaded contrast: {self.contrast}")
|
|
if 'cut_start_frame' in state:
|
|
self.cut_start_frame = state['cut_start_frame']
|
|
print(f"Loaded cut_start_frame: {self.cut_start_frame}")
|
|
if 'cut_end_frame' in state:
|
|
self.cut_end_frame = state['cut_end_frame']
|
|
print(f"Loaded cut_end_frame: {self.cut_end_frame}")
|
|
if 'looping_between_markers' in state:
|
|
self.looping_between_markers = state['looping_between_markers']
|
|
print(f"Loaded looping_between_markers: {self.looping_between_markers}")
|
|
if 'display_offset' in state:
|
|
self.display_offset = state['display_offset']
|
|
print(f"Loaded display_offset: {self.display_offset}")
|
|
if 'playback_speed' in state:
|
|
self.playback_speed = state['playback_speed']
|
|
print(f"Loaded playback_speed: {self.playback_speed}")
|
|
if 'seek_multiplier' in state:
|
|
self.seek_multiplier = state['seek_multiplier']
|
|
print(f"Loaded seek_multiplier: {self.seek_multiplier}")
|
|
if 'is_playing' in state:
|
|
self.is_playing = state['is_playing']
|
|
print(f"Loaded is_playing: {self.is_playing}")
|
|
if 'tracking_enabled' in state:
|
|
self.tracking_enabled = state['tracking_enabled']
|
|
print(f"Loaded tracking_enabled: {self.tracking_enabled}")
|
|
if 'tracking_points' in state and isinstance(state['tracking_points'], dict):
|
|
self.tracking_points = {int(k): v for k, v in state['tracking_points'].items()}
|
|
print(f"Loaded tracking_points: {sum(len(v) for v in self.tracking_points.values())} points")
|
|
|
|
# Validate cut markers against current video length
|
|
if self.cut_start_frame is not None and self.cut_start_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_start_frame {self.cut_start_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_start_frame = None
|
|
if self.cut_end_frame is not None and self.cut_end_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_end_frame {self.cut_end_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_end_frame = None
|
|
|
|
# Calculate and show marker positions on timeline
|
|
if self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
start_progress = self.cut_start_frame / max(1, self.total_frames - 1)
|
|
end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
print(f"Markers will be drawn at: Start {start_progress:.4f} ({self.cut_start_frame}/{self.total_frames}), End {end_progress:.4f} ({self.cut_end_frame}/{self.total_frames})")
|
|
|
|
# Validate and clamp values
|
|
self.current_frame = max(0, min(self.current_frame, getattr(self, 'total_frames', 1) - 1))
|
|
self.zoom_factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, self.zoom_factor))
|
|
self.brightness = max(-100, min(100, self.brightness))
|
|
self.contrast = max(0.1, min(3.0, self.contrast))
|
|
self.playback_speed = max(self.MIN_PLAYBACK_SPEED, min(self.MAX_PLAYBACK_SPEED, self.playback_speed))
|
|
self.seek_multiplier = max(self.MIN_SEEK_MULTIPLIER, min(self.MAX_SEEK_MULTIPLIER, self.seek_multiplier))
|
|
|
|
# Apply loaded settings
|
|
self.clear_transformation_cache()
|
|
self.load_current_frame()
|
|
|
|
print("Successfully loaded and applied all settings from state file")
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error loading state: {e}")
|
|
return False
|
|
|
|
def _is_video_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported video format"""
|
|
return file_path.suffix.lower() in self.VIDEO_EXTENSIONS
|
|
|
|
def _is_image_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported image format"""
|
|
return file_path.suffix.lower() in self.IMAGE_EXTENSIONS
|
|
|
|
def _is_media_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported media format (video or image)"""
|
|
return self._is_video_file(file_path) or self._is_image_file(file_path)
|
|
|
|
|
|
def _get_next_screenshot_filename(self, video_path: Path) -> str:
|
|
"""Generate the next available screenshot filename: video_frame_00001.jpg, video_frame_00002.jpg, etc."""
|
|
directory = video_path.parent
|
|
base_name = video_path.stem
|
|
|
|
# Pattern to match existing screenshot files: video_frame_00001.jpg, video_frame_00002.jpg, etc.
|
|
pattern = re.compile(rf"^{re.escape(base_name)}_frame_(\d{{5}})\.(jpg|jpeg|png)$")
|
|
|
|
existing_numbers = set()
|
|
for file_path in directory.iterdir():
|
|
if file_path.is_file():
|
|
match = pattern.match(file_path.name)
|
|
if match:
|
|
existing_numbers.add(int(match.group(1)))
|
|
|
|
# Find the next available number starting from 1
|
|
next_number = 1
|
|
while next_number in existing_numbers:
|
|
next_number += 1
|
|
|
|
return f"{base_name}_frame_{next_number:05d}.jpg"
|
|
|
|
def save_current_frame(self):
|
|
"""Save the current frame as a screenshot"""
|
|
if self.current_display_frame is None:
|
|
print("No frame to save")
|
|
return False
|
|
|
|
# Generate the next available screenshot filename
|
|
screenshot_name = self._get_next_screenshot_filename(self.video_path)
|
|
screenshot_path = self.video_path.parent / screenshot_name
|
|
|
|
# Apply current transformations (crop, zoom, rotation, brightness/contrast) to the frame
|
|
processed_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame.copy())
|
|
|
|
if processed_frame is not None:
|
|
# Save the processed frame
|
|
success = cv2.imwrite(str(screenshot_path), processed_frame)
|
|
if success:
|
|
print(f"Screenshot saved: {screenshot_name}")
|
|
self.show_feedback_message(f"Screenshot saved: {screenshot_name}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save screenshot to {screenshot_path}")
|
|
self.show_feedback_message("Error: Could not save screenshot")
|
|
return False
|
|
else:
|
|
print("Error: Could not process frame for screenshot")
|
|
self.show_feedback_message("Error: Could not process frame")
|
|
return False
|
|
|
|
def _get_media_files_from_directory(self, directory: Path) -> List[Path]:
|
|
"""Get all media files (video and image) from a directory, sorted by name"""
|
|
media_files = set()
|
|
for file_path in directory.iterdir():
|
|
if (
|
|
file_path.is_file()
|
|
and self._is_media_file(file_path)
|
|
):
|
|
media_files.add(file_path)
|
|
|
|
# Pattern to match edited files: basename_edited_001.ext, basename_edited_002.ext, etc.
|
|
edited_pattern = re.compile(r"^(.+)_edited_\d{3}$")
|
|
|
|
edited_base_names = set()
|
|
for file_path in media_files:
|
|
match = edited_pattern.match(file_path.stem)
|
|
if match:
|
|
edited_base_names.add(match.group(1))
|
|
|
|
non_edited_media = set()
|
|
for file_path in media_files:
|
|
# Skip if this is an edited file
|
|
if edited_pattern.match(file_path.stem):
|
|
continue
|
|
|
|
# Skip if there's already an edited version of this file
|
|
if file_path.stem in edited_base_names:
|
|
continue
|
|
|
|
non_edited_media.add(file_path)
|
|
|
|
return sorted(non_edited_media)
|
|
def _load_video(self, media_path: Path):
|
|
"""Load a media file (video or image) and initialize properties"""
|
|
if hasattr(self, "cap") and self.cap:
|
|
self.cap.release()
|
|
|
|
|
|
self.video_path = media_path
|
|
self.is_image_mode = self._is_image_file(media_path)
|
|
|
|
if self.is_image_mode:
|
|
# Load static image
|
|
self.static_image = cv2.imread(str(media_path))
|
|
if self.static_image is None:
|
|
raise ValueError(f"Could not load image file: {media_path}")
|
|
|
|
# Set up image properties to mimic video interface
|
|
self.frame_height, self.frame_width = self.static_image.shape[:2]
|
|
self.total_frames = 1
|
|
self.fps = 30 # Dummy FPS for image mode
|
|
self.cap = None
|
|
|
|
print(f"Loaded image: {self.video_path.name}")
|
|
print(f" Resolution: {self.frame_width}x{self.frame_height}")
|
|
else:
|
|
# Try different backends for better performance
|
|
# Order of preference: FFmpeg (best for video files), DirectShow (cameras), any available
|
|
backends_to_try = []
|
|
if hasattr(cv2, 'CAP_FFMPEG'): # FFmpeg - best for video files
|
|
backends_to_try.append(cv2.CAP_FFMPEG)
|
|
if hasattr(cv2, 'CAP_DSHOW'): # DirectShow - usually for cameras
|
|
backends_to_try.append(cv2.CAP_DSHOW)
|
|
backends_to_try.append(cv2.CAP_ANY) # Fallback
|
|
|
|
self.cap = None
|
|
for backend in backends_to_try:
|
|
try:
|
|
self.cap = Cv2BufferedCap(self.video_path, backend)
|
|
if self.cap.isOpened():
|
|
break
|
|
except Exception:
|
|
continue
|
|
|
|
if not self.cap or not self.cap.isOpened():
|
|
raise ValueError(f"Could not open video file: {media_path}")
|
|
|
|
# Video properties from buffered cap
|
|
self.total_frames = self.cap.total_frames
|
|
self.fps = self.cap.fps
|
|
self.frame_width = self.cap.frame_width
|
|
self.frame_height = self.cap.frame_height
|
|
|
|
# Get codec information for debugging
|
|
fourcc = int(self.cap.cap.get(cv2.CAP_PROP_FOURCC))
|
|
codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
|
|
|
|
# Get backend information
|
|
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
|
|
|
|
print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})")
|
|
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
|
|
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
|
|
|
|
# Performance warning for known problematic cases
|
|
if codec in ['H264', 'H.264', 'AVC1', 'avc1'] and self.total_frames > 10000:
|
|
print(" Warning: Large H.264 video detected - seeking may be slow")
|
|
if self.frame_width * self.frame_height > 1920 * 1080:
|
|
print(" Warning: High resolution video - decoding may be slow")
|
|
if self.fps > 60:
|
|
print(" Warning: High framerate video - may impact playback smoothness")
|
|
|
|
# Set default values for video-specific properties
|
|
self.current_frame = 0
|
|
self.is_playing = False if self.is_image_mode else False # Images start paused
|
|
self.playback_speed = 1.0
|
|
self.seek_multiplier = 1.0
|
|
self.cut_start_frame = None
|
|
self.cut_end_frame = None
|
|
|
|
# Always reset these regardless of state
|
|
self.current_display_frame = None
|
|
|
|
def switch_to_video(self, index: int):
|
|
"""Switch to a specific video by index"""
|
|
if 0 <= index < len(self.video_files):
|
|
self.current_video_index = index
|
|
self._load_video(self.video_files[index])
|
|
self.load_current_frame()
|
|
|
|
def next_video(self):
|
|
"""Switch to the next video"""
|
|
self.save_state() # Save current video state before switching
|
|
next_index = (self.current_video_index + 1) % len(self.video_files)
|
|
self.switch_to_video(next_index)
|
|
|
|
def previous_video(self):
|
|
"""Switch to the previous video"""
|
|
self.save_state() # Save current video state before switching
|
|
prev_index = (self.current_video_index - 1) % len(self.video_files)
|
|
self.switch_to_video(prev_index)
|
|
|
|
def load_current_frame(self) -> bool:
|
|
"""Load the current frame into display cache"""
|
|
if self.is_image_mode:
|
|
# For images, just copy the static image
|
|
self.current_display_frame = self.static_image.copy()
|
|
return True
|
|
else:
|
|
# Use buffered cap to get frame
|
|
try:
|
|
self.current_display_frame = self.cap.get_frame(self.current_frame)
|
|
return True
|
|
except Exception as e:
|
|
print(f"Failed to load frame {self.current_frame}: {e}")
|
|
return False
|
|
|
|
|
|
def calculate_frame_delay(self) -> int:
|
|
"""Calculate frame delay in milliseconds based on playback speed"""
|
|
delay_ms = int(self.BASE_FRAME_DELAY_MS / self.playback_speed)
|
|
return max(1, delay_ms)
|
|
|
|
def seek_video(self, frames_delta: int):
|
|
"""Seek video by specified number of frames"""
|
|
target_frame = max(
|
|
0, min(self.current_frame + frames_delta, self.total_frames - 1)
|
|
)
|
|
self.current_frame = target_frame
|
|
self.load_current_frame()
|
|
self.display_needs_update = True
|
|
|
|
|
|
def seek_video_with_modifier(
|
|
self, direction: int, shift_pressed: bool, ctrl_pressed: bool
|
|
):
|
|
"""Seek video with different frame counts based on modifiers and seek multiplier"""
|
|
if ctrl_pressed:
|
|
base_frames = 60 # Ctrl: 60 frames
|
|
elif shift_pressed:
|
|
base_frames = 10 # Shift: 10 frames
|
|
else:
|
|
base_frames = 1 # Default: 1 frame
|
|
|
|
# Apply seek multiplier to the base frame count
|
|
frames = direction * int(base_frames * self.seek_multiplier)
|
|
self.seek_video(frames)
|
|
|
|
def start_auto_repeat_seek(self, direction: int, shift_pressed: bool, ctrl_pressed: bool):
|
|
"""Start auto-repeat seeking"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
self.auto_repeat_active = True
|
|
self.auto_repeat_direction = direction
|
|
self.auto_repeat_shift = shift_pressed
|
|
self.auto_repeat_ctrl = ctrl_pressed
|
|
|
|
# Initialize last_display_update to prevent immediate auto-repeat
|
|
self.last_display_update = time.time()
|
|
|
|
self.seek_video_with_modifier(direction, shift_pressed, ctrl_pressed)
|
|
|
|
def stop_auto_repeat_seek(self):
|
|
"""Stop auto-repeat seeking"""
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
|
|
def update_auto_repeat_seek(self):
|
|
"""Update auto-repeat seeking"""
|
|
if not self.auto_repeat_active or self.is_image_mode:
|
|
return
|
|
|
|
current_time = time.time()
|
|
|
|
if current_time - self.last_display_update >= self.AUTO_REPEAT_DISPLAY_RATE:
|
|
self.seek_video_with_modifier(
|
|
self.auto_repeat_direction,
|
|
self.auto_repeat_shift,
|
|
self.auto_repeat_ctrl
|
|
)
|
|
self.last_display_update = current_time
|
|
|
|
|
|
def seek_to_frame(self, frame_number: int):
|
|
"""Seek to specific frame"""
|
|
self.current_frame = max(0, min(frame_number, self.total_frames - 1))
|
|
self.load_current_frame()
|
|
|
|
def advance_frame(self) -> bool:
|
|
"""Advance to next frame - handles playback speed and marker looping"""
|
|
if not self.is_playing:
|
|
return True
|
|
|
|
# Calculate how many frames to advance based on speed
|
|
frames_to_advance = max(1, int(self.playback_speed))
|
|
new_frame = self.current_frame + frames_to_advance
|
|
|
|
# Handle marker looping bounds
|
|
if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
if new_frame >= self.cut_end_frame:
|
|
# Loop back to start marker
|
|
new_frame = self.cut_start_frame
|
|
elif new_frame >= self.total_frames:
|
|
# Loop to beginning
|
|
new_frame = 0
|
|
|
|
# Update current frame and load it
|
|
self.current_frame = new_frame
|
|
return self.load_current_frame()
|
|
|
|
def apply_crop_zoom_and_rotation(self, frame):
|
|
"""Apply current crop, zoom, rotation, and brightness/contrast settings to frame"""
|
|
if frame is None:
|
|
return None
|
|
|
|
# Create a hash of the transformation parameters for caching
|
|
transform_hash = hash((
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
tuple(self.display_offset)
|
|
))
|
|
|
|
# Check if we can use cached transformation during auto-repeat seeking
|
|
if (self.auto_repeat_active and
|
|
self.cached_transformed_frame is not None and
|
|
self.cached_frame_number == self.current_frame and
|
|
self.cached_transform_hash == transform_hash):
|
|
return self.cached_transformed_frame.copy()
|
|
|
|
# Work in-place when possible to avoid unnecessary copying
|
|
processed_frame = frame
|
|
|
|
# Apply brightness/contrast first (to original frame for best quality)
|
|
processed_frame = self.apply_brightness_contrast(processed_frame)
|
|
|
|
# Apply crop (with motion tracking follow if enabled)
|
|
if self.crop_rect:
|
|
x, y, w, h = self.crop_rect
|
|
if self.tracking_enabled:
|
|
interp = self._get_interpolated_tracking_position(getattr(self, 'current_frame', 0))
|
|
if interp:
|
|
cx, cy = interp
|
|
x = int(round(cx - w / 2))
|
|
y = int(round(cy - h / 2))
|
|
x, y, w, h = int(x), int(y), int(w), int(h)
|
|
# Ensure crop is within frame bounds
|
|
x = max(0, min(x, processed_frame.shape[1] - 1))
|
|
y = max(0, min(y, processed_frame.shape[0] - 1))
|
|
w = min(w, processed_frame.shape[1] - x)
|
|
h = min(h, processed_frame.shape[0] - y)
|
|
if w > 0 and h > 0:
|
|
processed_frame = processed_frame[y : y + h, x : x + w]
|
|
|
|
# Apply rotation
|
|
if self.rotation_angle != 0:
|
|
processed_frame = self.apply_rotation(processed_frame)
|
|
|
|
# Apply zoom
|
|
if self.zoom_factor != 1.0:
|
|
height, width = processed_frame.shape[:2]
|
|
new_width = int(width * self.zoom_factor)
|
|
new_height = int(height * self.zoom_factor)
|
|
processed_frame = cv2.resize(
|
|
processed_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
# Handle zoom center and display offset
|
|
if new_width > self.window_width or new_height > self.window_height:
|
|
# Calculate crop from zoomed image to fit window
|
|
start_x = max(0, self.display_offset[0])
|
|
start_y = max(0, self.display_offset[1])
|
|
end_x = min(new_width, start_x + self.window_width)
|
|
end_y = min(new_height, start_y + self.window_height)
|
|
processed_frame = processed_frame[start_y:end_y, start_x:end_x]
|
|
|
|
# Cache the result for auto-repeat seeking performance
|
|
if self.auto_repeat_active:
|
|
self.cached_transformed_frame = processed_frame.copy()
|
|
self.cached_frame_number = self.current_frame
|
|
self.cached_transform_hash = transform_hash
|
|
|
|
return processed_frame
|
|
|
|
# --- Motion tracking helpers ---
|
|
def _get_effective_crop_rect_for_frame(self, frame_number):
|
|
"""Compute crop rect applied to a given frame, considering tracking follow."""
|
|
if not self.crop_rect:
|
|
return (0, 0, self.frame_width, self.frame_height)
|
|
x, y, w, h = map(int, self.crop_rect)
|
|
if self.tracking_enabled:
|
|
pos = self._get_interpolated_tracking_position(frame_number)
|
|
if pos:
|
|
cx, cy = pos
|
|
x = int(round(cx - w / 2))
|
|
y = int(round(cy - h / 2))
|
|
# Clamp to frame bounds
|
|
x = max(0, min(x, self.frame_width - 1))
|
|
y = max(0, min(y, self.frame_height - 1))
|
|
w = min(w, self.frame_width - x)
|
|
h = min(h, self.frame_height - y)
|
|
return (x, y, w, h)
|
|
|
|
def _get_interpolated_tracking_position(self, frame_number):
|
|
"""Linear interpolation between keyed tracking points.
|
|
Returns (x, y) in original frame coords or None.
|
|
"""
|
|
if not self.tracking_points:
|
|
return None
|
|
frames = sorted(self.tracking_points.keys())
|
|
if not frames:
|
|
return None
|
|
if frame_number in self.tracking_points and self.tracking_points[frame_number]:
|
|
pts = self.tracking_points[frame_number]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts))
|
|
if frame_number < frames[0]:
|
|
pts = self.tracking_points[frames[0]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
if frame_number > frames[-1]:
|
|
pts = self.tracking_points[frames[-1]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
for i in range(len(frames) - 1):
|
|
f1, f2 = frames[i], frames[i + 1]
|
|
if f1 <= frame_number <= f2:
|
|
pts1 = self.tracking_points.get(f1) or []
|
|
pts2 = self.tracking_points.get(f2) or []
|
|
if not pts1 or not pts2:
|
|
continue
|
|
x1 = sum(p[0] for p in pts1) / len(pts1)
|
|
y1 = sum(p[1] for p in pts1) / len(pts1)
|
|
x2 = sum(p[0] for p in pts2) / len(pts2)
|
|
y2 = sum(p[1] for p in pts2) / len(pts2)
|
|
t = (frame_number - f1) / (f2 - f1) if f2 != f1 else 0.0
|
|
return (x1 + t * (x2 - x1), y1 + t * (y2 - y1))
|
|
return None
|
|
|
|
def _map_original_to_screen(self, ox, oy):
|
|
"""Map a point in original frame coords to canvas screen coords."""
|
|
frame_number = getattr(self, 'current_frame', 0)
|
|
cx, cy, cw, ch = self._get_effective_crop_rect_for_frame(frame_number)
|
|
# Relative to effective crop
|
|
px = ox - cx
|
|
py = oy - cy
|
|
angle = self.rotation_angle
|
|
# Dimensions after rotation
|
|
if angle in (90, 270):
|
|
rotated_w, rotated_h = ch, cw
|
|
else:
|
|
rotated_w, rotated_h = cw, ch
|
|
# Forward rotation mapping
|
|
if angle == 90:
|
|
rx, ry = py, rotated_w - px
|
|
elif angle == 180:
|
|
rx, ry = rotated_w - px, rotated_h - py
|
|
elif angle == 270:
|
|
rx, ry = rotated_h - py, px
|
|
else:
|
|
rx, ry = px, py
|
|
# Zoom
|
|
zx = rx * self.zoom_factor
|
|
zy = ry * self.zoom_factor
|
|
# Apply display offset cropping in zoomed space
|
|
new_w = int(rotated_w * self.zoom_factor)
|
|
new_h = int(rotated_h * self.zoom_factor)
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
inframe_x = zx - offx
|
|
inframe_y = zy - offy
|
|
# Size of processed_frame from apply_crop_zoom_and_rotation
|
|
base_w = new_w if new_w <= self.window_width else self.window_width
|
|
base_h = new_h if new_h <= self.window_height else self.window_height
|
|
# Final scale and canvas placement
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale = min(self.window_width / max(1, base_w), available_height / max(1, base_h))
|
|
final_w = int(base_w * scale)
|
|
final_h = int(base_h * scale)
|
|
start_x_canvas = (self.window_width - final_w) // 2
|
|
start_y_canvas = (available_height - final_h) // 2
|
|
sx = int(round(start_x_canvas + inframe_x * scale))
|
|
sy = int(round(start_y_canvas + inframe_y * scale))
|
|
return sx, sy
|
|
|
|
def _map_screen_to_original(self, sx, sy):
|
|
"""Map a point on canvas screen coords back to original frame coords."""
|
|
frame_number = getattr(self, 'current_frame', 0)
|
|
cx, cy, cw, ch = self._get_effective_crop_rect_for_frame(frame_number)
|
|
angle = self.rotation_angle
|
|
# Dimensions after rotation
|
|
if angle in (90, 270):
|
|
rotated_w, rotated_h = ch, cw
|
|
else:
|
|
rotated_w, rotated_h = cw, ch
|
|
# Zoomed dimensions and base processed dimensions (after window cropping)
|
|
new_w = int(rotated_w * self.zoom_factor)
|
|
new_h = int(rotated_h * self.zoom_factor)
|
|
base_w = new_w if new_w <= self.window_width else self.window_width
|
|
base_h = new_h if new_h <= self.window_height else self.window_height
|
|
# Final scaling used in display
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale = min(self.window_width / max(1, base_w), available_height / max(1, base_h))
|
|
final_w = int(base_w * scale)
|
|
final_h = int(base_h * scale)
|
|
start_x_canvas = (self.window_width - final_w) // 2
|
|
start_y_canvas = (available_height - final_h) // 2
|
|
# Back to processed (zoomed+cropped) space
|
|
zx = (sx - start_x_canvas) / max(1e-6, scale)
|
|
zy = (sy - start_y_canvas) / max(1e-6, scale)
|
|
# Add display offset in zoomed space
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
zx += offx
|
|
zy += offy
|
|
# Reverse zoom
|
|
rx = zx / max(1e-6, self.zoom_factor)
|
|
ry = zy / max(1e-6, self.zoom_factor)
|
|
# Reverse rotation
|
|
if angle == 90:
|
|
px, py = rotated_w - ry, rx
|
|
elif angle == 180:
|
|
px, py = rotated_w - rx, rotated_h - ry
|
|
elif angle == 270:
|
|
px, py = ry, rotated_h - rx
|
|
else:
|
|
px, py = rx, ry
|
|
# Back to original frame
|
|
ox = px + cx
|
|
oy = py + cy
|
|
ox = max(0, min(int(round(ox)), self.frame_width - 1))
|
|
oy = max(0, min(int(round(oy)), self.frame_height - 1))
|
|
return ox, oy
|
|
|
|
def clear_transformation_cache(self):
|
|
"""Clear the cached transformation to force recalculation"""
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
|
|
def apply_rotation(self, frame):
|
|
"""Apply rotation to frame"""
|
|
if self.rotation_angle == 0:
|
|
return frame
|
|
elif self.rotation_angle == 90:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
elif self.rotation_angle == 180:
|
|
return cv2.rotate(frame, cv2.ROTATE_180)
|
|
elif self.rotation_angle == 270:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
return frame
|
|
|
|
def rotate_clockwise(self):
|
|
"""Rotate video 90 degrees clockwise"""
|
|
self.rotation_angle = (self.rotation_angle + 90) % 360
|
|
self.clear_transformation_cache()
|
|
|
|
def apply_brightness_contrast(self, frame):
|
|
"""Apply brightness and contrast adjustments to frame"""
|
|
if self.brightness == 0 and self.contrast == 1.0:
|
|
return frame
|
|
|
|
# Convert brightness from -100/100 range to -255/255 range
|
|
brightness_value = self.brightness * 2.55
|
|
|
|
# Apply brightness and contrast: new_pixel = contrast * old_pixel + brightness
|
|
adjusted = cv2.convertScaleAbs(
|
|
frame, alpha=self.contrast, beta=brightness_value
|
|
)
|
|
return adjusted
|
|
|
|
def adjust_brightness(self, delta: int):
|
|
"""Adjust brightness by delta (-100 to 100)"""
|
|
self.brightness = max(-100, min(100, self.brightness + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def adjust_contrast(self, delta: float):
|
|
"""Adjust contrast by delta (0.1 to 3.0)"""
|
|
self.contrast = max(0.1, min(3.0, self.contrast + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def show_progress_bar(self, text: str = "Processing..."):
|
|
"""Show progress bar with given text"""
|
|
self.progress_bar_visible = True
|
|
self.progress_bar_progress = 0.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = text
|
|
self.display_needs_update = True
|
|
|
|
def update_progress_bar(self, progress: float, text: str = None, fps: float = None):
|
|
"""Update progress bar progress (0.0 to 1.0) and optionally text and FPS"""
|
|
if self.progress_bar_visible:
|
|
self.progress_bar_progress = max(0.0, min(1.0, progress))
|
|
if text is not None:
|
|
self.progress_bar_text = text
|
|
if fps is not None:
|
|
self.progress_bar_fps = fps
|
|
|
|
# Mark as complete when reaching 100%
|
|
if self.progress_bar_progress >= 1.0 and not self.progress_bar_complete:
|
|
self.progress_bar_complete = True
|
|
self.progress_bar_complete_time = time.time()
|
|
|
|
def hide_progress_bar(self):
|
|
"""Hide progress bar"""
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_fps = 0.0
|
|
|
|
def show_feedback_message(self, message: str):
|
|
"""Show a feedback message on screen for a few seconds"""
|
|
self.feedback_message = message
|
|
self.feedback_message_time = time.time()
|
|
self.display_needs_update = True
|
|
|
|
def toggle_fullscreen(self):
|
|
"""Toggle between windowed and fullscreen mode"""
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
|
|
if self.is_fullscreen:
|
|
# Switch to windowed mode
|
|
self.is_fullscreen = False
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, 1200, 800)
|
|
print("Switched to windowed mode")
|
|
else:
|
|
# Switch to fullscreen mode
|
|
self.is_fullscreen = True
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
|
|
print("Switched to fullscreen mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def toggle_project_view(self):
|
|
"""Toggle between editor and project view mode"""
|
|
if self.project_view_mode:
|
|
# Switch back to editor mode
|
|
self.project_view_mode = False
|
|
if self.project_view:
|
|
cv2.destroyWindow("Project View")
|
|
self.project_view = None
|
|
print("Switched to editor mode")
|
|
else:
|
|
# Switch to project view mode
|
|
self.project_view_mode = True
|
|
# Create project view for the current directory
|
|
if self.path.is_dir():
|
|
project_dir = self.path
|
|
else:
|
|
project_dir = self.path.parent
|
|
self.project_view = ProjectView(project_dir, self)
|
|
# Create separate window for project view
|
|
cv2.namedWindow("Project View", cv2.WINDOW_AUTOSIZE)
|
|
print("Switched to project view mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def open_video_from_project_view(self, video_path: Path):
|
|
"""Open a video from project view in editor mode"""
|
|
print(f"Attempting to open video: {video_path}")
|
|
print(f"Video path exists: {video_path.exists()}")
|
|
|
|
# Save current state before switching
|
|
self.save_state()
|
|
|
|
# Find the video in our video_files list
|
|
try:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
except ValueError:
|
|
print(f"Video not found in current session: {video_path.name}")
|
|
# If video not in current session, reload the directory
|
|
self.path = video_path.parent
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if video_path in self.video_files:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
else:
|
|
print(f"Could not find video: {video_path.name}")
|
|
return
|
|
|
|
# Keep project view open but switch focus to video editor
|
|
# Don't destroy the project view window - just let the user switch between them
|
|
|
|
def draw_feedback_message(self, frame):
|
|
"""Draw feedback message on frame if visible"""
|
|
if not self.feedback_message or not self.feedback_message_time:
|
|
return
|
|
|
|
# Check if message should still be shown
|
|
elapsed = time.time() - self.feedback_message_time
|
|
if elapsed > self.feedback_message_duration:
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate message position (center of frame)
|
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
font_scale = 1.0
|
|
thickness = 2
|
|
|
|
# Get text size
|
|
text_size = cv2.getTextSize(self.feedback_message, font, font_scale, thickness)[0]
|
|
text_x = (width - text_size[0]) // 2
|
|
text_y = (height + text_size[1]) // 2
|
|
|
|
# Draw background rectangle
|
|
padding = 10
|
|
rect_x1 = text_x - padding
|
|
rect_y1 = text_y - text_size[1] - padding
|
|
rect_x2 = text_x + text_size[0] + padding
|
|
rect_y2 = text_y + padding
|
|
|
|
# Semi-transparent background
|
|
overlay = frame.copy()
|
|
cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (0, 0, 0), -1)
|
|
alpha = 0.7
|
|
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
|
|
|
|
# Draw text with shadow
|
|
cv2.putText(frame, self.feedback_message, (text_x + 2, text_y + 2), font, font_scale, (0, 0, 0), thickness + 1)
|
|
cv2.putText(frame, self.feedback_message, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)
|
|
|
|
def draw_progress_bar(self, frame):
|
|
"""Draw progress bar on frame if visible - positioned at top with full width"""
|
|
if not self.progress_bar_visible:
|
|
return
|
|
|
|
# Check if we should fade out
|
|
if self.progress_bar_complete and self.progress_bar_complete_time:
|
|
elapsed = time.time() - self.progress_bar_complete_time
|
|
if elapsed > self.PROGRESS_BAR_FADE_DURATION:
|
|
self.hide_progress_bar()
|
|
return
|
|
|
|
# Calculate fade alpha (1.0 at start, 0.0 at end)
|
|
fade_alpha = max(0.0, 1.0 - (elapsed / self.PROGRESS_BAR_FADE_DURATION))
|
|
else:
|
|
fade_alpha = 1.0
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate progress bar position (top of frame with 5% margins)
|
|
margin_width = int(width * self.PROGRESS_BAR_MARGIN_PERCENT / 100)
|
|
bar_width = width - (2 * margin_width)
|
|
bar_x = margin_width
|
|
bar_y = self.PROGRESS_BAR_TOP_MARGIN
|
|
|
|
# Apply fade alpha to colors
|
|
bg_color = tuple(int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BG)
|
|
border_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BORDER
|
|
)
|
|
|
|
if self.progress_bar_complete:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_FILL
|
|
)
|
|
else:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_PROGRESS
|
|
)
|
|
|
|
# Draw background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
bg_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw progress fill
|
|
fill_width = int(bar_width * self.progress_bar_progress)
|
|
if fill_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + fill_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
fill_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw border
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
border_color,
|
|
2,
|
|
)
|
|
|
|
# Draw progress percentage on the left
|
|
percentage_text = f"{self.progress_bar_progress * 100:.1f}%"
|
|
text_color = tuple(int(255 * fade_alpha) for _ in range(3))
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 12, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 10, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw FPS on the right if available
|
|
if self.progress_bar_fps > 0:
|
|
fps_text = f"{self.progress_bar_fps:.1f} FPS"
|
|
fps_text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[
|
|
0
|
|
]
|
|
fps_x = bar_x + bar_width - fps_text_size[0] - 10
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x + 2, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw main text in center
|
|
if self.progress_bar_text:
|
|
text_size = cv2.getTextSize(
|
|
self.progress_bar_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
|
|
)[0]
|
|
text_x = bar_x + (bar_width - text_size[0]) // 2
|
|
text_y = bar_y + 20
|
|
|
|
# Draw text shadow for better visibility
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x + 2, text_y + 2),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
def draw_timeline(self, frame):
|
|
"""Draw timeline at the bottom of the frame"""
|
|
# Don't draw timeline for images
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Timeline background area
|
|
timeline_y = height - self.TIMELINE_HEIGHT
|
|
cv2.rectangle(frame, (0, timeline_y), (width, height), (40, 40, 40), -1)
|
|
|
|
# Calculate timeline bar position
|
|
bar_y = timeline_y + (self.TIMELINE_HEIGHT - self.TIMELINE_BAR_HEIGHT) // 2
|
|
bar_x_start = self.TIMELINE_MARGIN
|
|
bar_x_end = width - self.TIMELINE_MARGIN
|
|
bar_width = bar_x_end - bar_x_start
|
|
|
|
self.timeline_rect = (bar_x_start, bar_y, bar_width, self.TIMELINE_BAR_HEIGHT)
|
|
|
|
# Draw timeline background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BG,
|
|
-1,
|
|
)
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BORDER,
|
|
1,
|
|
)
|
|
|
|
# Draw progress
|
|
if self.total_frames > 0:
|
|
progress = self.current_frame / max(1, self.total_frames - 1)
|
|
progress_width = int(bar_width * progress)
|
|
if progress_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_start + progress_width, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_PROGRESS,
|
|
-1,
|
|
)
|
|
|
|
# Draw current position handle
|
|
handle_x = bar_x_start + progress_width
|
|
handle_y = bar_y + self.TIMELINE_BAR_HEIGHT // 2
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_HANDLE,
|
|
-1,
|
|
)
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_BORDER,
|
|
2,
|
|
)
|
|
|
|
# Draw cut points
|
|
if self.cut_start_frame is not None:
|
|
cut_start_progress = self.cut_start_frame / max(
|
|
1, self.total_frames - 1
|
|
)
|
|
cut_start_x = bar_x_start + int(bar_width * cut_start_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_start_x, bar_y),
|
|
(cut_start_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"1",
|
|
(cut_start_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
if self.cut_end_frame is not None:
|
|
cut_end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
cut_end_x = bar_x_start + int(bar_width * cut_end_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_end_x, bar_y),
|
|
(cut_end_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"2",
|
|
(cut_end_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
def display_current_frame(self):
|
|
"""Display the current frame with all overlays"""
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Check if display needs update (optimization)
|
|
current_state = (
|
|
self.current_frame,
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
self.display_offset,
|
|
self.progress_bar_visible,
|
|
self.feedback_message
|
|
)
|
|
|
|
# Always update display when paused to ensure UI elements are visible
|
|
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing:
|
|
return # Skip redraw if nothing changed and playing
|
|
|
|
self.last_display_state = current_state
|
|
self.display_needs_update = False
|
|
|
|
# Apply crop, zoom, and rotation transformations for preview
|
|
display_frame = self.apply_crop_zoom_and_rotation(
|
|
self.current_display_frame
|
|
)
|
|
|
|
if display_frame is None:
|
|
return
|
|
|
|
# Resize to fit window while maintaining aspect ratio
|
|
height, width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
|
|
scale = min(self.window_width / width, available_height / height)
|
|
if scale < 1.0:
|
|
new_width = int(width * scale)
|
|
new_height = int(height * scale)
|
|
display_frame = cv2.resize(display_frame, (new_width, new_height))
|
|
|
|
# Create canvas with timeline space
|
|
canvas = np.zeros((self.window_height, self.window_width, 3), dtype=np.uint8)
|
|
|
|
# Center the frame on canvas
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
canvas[start_y : start_y + frame_height, start_x : start_x + frame_width] = (
|
|
display_frame
|
|
)
|
|
|
|
# Draw crop selection preview during Shift+Click+Drag
|
|
if self.crop_preview_rect:
|
|
x, y, w, h = self.crop_preview_rect
|
|
cv2.rectangle(
|
|
canvas, (int(x), int(y)), (int(x + w), int(y + h)), (0, 255, 0), 2
|
|
)
|
|
|
|
# Add info overlay
|
|
rotation_text = (
|
|
f" | Rotation: {self.rotation_angle}°" if self.rotation_angle != 0 else ""
|
|
)
|
|
brightness_text = (
|
|
f" | Brightness: {self.brightness}" if self.brightness != 0 else ""
|
|
)
|
|
contrast_text = (
|
|
f" | Contrast: {self.contrast:.1f}" if self.contrast != 1.0 else ""
|
|
)
|
|
seek_multiplier_text = (
|
|
f" | Seek: {self.seek_multiplier:.1f}x" if self.seek_multiplier != 1.0 else ""
|
|
)
|
|
motion_text = (
|
|
f" | Motion: {self.tracking_enabled}" if self.tracking_enabled else ""
|
|
)
|
|
if self.is_image_mode:
|
|
info_text = f"Image | Zoom: {self.zoom_factor:.1f}x{rotation_text}{brightness_text}{contrast_text}{motion_text}"
|
|
else:
|
|
info_text = f"Frame: {self.current_frame}/{self.total_frames} | Speed: {self.playback_speed:.1f}x | Zoom: {self.zoom_factor:.1f}x{seek_multiplier_text}{rotation_text}{brightness_text}{contrast_text}{motion_text} | {'Playing' if self.is_playing else 'Paused'}"
|
|
cv2.putText(
|
|
canvas,
|
|
info_text,
|
|
(10, 30),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.7,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
|
|
)
|
|
|
|
# Add video navigation info
|
|
if len(self.video_files) > 1:
|
|
video_text = f"Video: {self.current_video_index + 1}/{len(self.video_files)} - {self.video_path.name}"
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset = 90
|
|
else:
|
|
y_offset = 60
|
|
|
|
# Add crop info
|
|
if self.crop_rect:
|
|
crop_text = f"Crop: {int(self.crop_rect[0])},{int(self.crop_rect[1])} {int(self.crop_rect[2])}x{int(self.crop_rect[3])}"
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset += 30
|
|
|
|
# Add cut info
|
|
if self.cut_start_frame is not None or self.cut_end_frame is not None:
|
|
cut_text = (
|
|
f"Cut: {self.cut_start_frame or '?'} - {self.cut_end_frame or '?'}"
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
|
|
# Draw tracking overlays (points and interpolated cross)
|
|
pts = self.tracking_points.get(self.current_frame, []) if not self.is_image_mode else []
|
|
for (ox, oy) in pts:
|
|
sx, sy = self._map_original_to_screen(ox, oy)
|
|
cv2.circle(canvas, (sx, sy), 6, (0, 255, 0), -1)
|
|
cv2.circle(canvas, (sx, sy), 6, (255, 255, 255), 1)
|
|
if self.tracking_enabled and not self.is_image_mode:
|
|
interp = self._get_interpolated_tracking_position(self.current_frame)
|
|
if interp:
|
|
sx, sy = self._map_original_to_screen(interp[0], interp[1])
|
|
cv2.line(canvas, (sx - 10, sy), (sx + 10, sy), (255, 0, 0), 2)
|
|
cv2.line(canvas, (sx, sy - 10), (sx, sy + 10), (255, 0, 0), 2)
|
|
|
|
# Draw timeline
|
|
self.draw_timeline(canvas)
|
|
|
|
# Draw progress bar (if visible)
|
|
self.draw_progress_bar(canvas)
|
|
|
|
# Draw feedback message (if visible)
|
|
self.draw_feedback_message(canvas)
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.imshow(window_title, canvas)
|
|
|
|
def mouse_callback(self, event, x, y, flags, _):
|
|
"""Handle mouse events"""
|
|
# Handle timeline interaction (not for images)
|
|
if self.timeline_rect and not self.is_image_mode:
|
|
bar_x_start, bar_y, bar_width, bar_height = self.timeline_rect
|
|
bar_x_end = bar_x_start + bar_width
|
|
|
|
if bar_y <= y <= bar_y + bar_height + 10:
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.mouse_dragging = True
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.mouse_dragging:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_LBUTTONUP:
|
|
self.mouse_dragging = False
|
|
return
|
|
|
|
# Handle crop selection (Shift + click and drag)
|
|
if flags & cv2.EVENT_FLAG_SHIFTKEY:
|
|
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
self.crop_selecting = True
|
|
self.crop_start_point = (x, y)
|
|
self.crop_preview_rect = None
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.crop_selecting:
|
|
if self.crop_start_point:
|
|
start_x, start_y = self.crop_start_point
|
|
width = abs(x - start_x)
|
|
height = abs(y - start_y)
|
|
crop_x = min(start_x, x)
|
|
crop_y = min(start_y, y)
|
|
self.crop_preview_rect = (crop_x, crop_y, width, height)
|
|
elif event == cv2.EVENT_LBUTTONUP and self.crop_selecting:
|
|
if self.crop_start_point and self.crop_preview_rect:
|
|
# Convert screen coordinates to video coordinates
|
|
self.set_crop_from_screen_coords(self.crop_preview_rect)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
|
|
# Handle zoom center (Ctrl + click)
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN:
|
|
self.zoom_center = (x, y)
|
|
|
|
# Handle right-click for tracking points (no modifiers)
|
|
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
|
if not self.is_image_mode:
|
|
ox, oy = self._map_screen_to_original(x, y)
|
|
threshold = 50
|
|
removed = False
|
|
if self.current_frame in self.tracking_points:
|
|
pts_screen = []
|
|
for idx, (px, py) in enumerate(self.tracking_points[self.current_frame]):
|
|
sxp, syp = self._map_original_to_screen(px, py)
|
|
pts_screen.append((idx, sxp, syp))
|
|
for idx, sxp, syp in pts_screen:
|
|
if (sxp - x) ** 2 + (syp - y) ** 2 <= threshold ** 2:
|
|
del self.tracking_points[self.current_frame][idx]
|
|
if not self.tracking_points[self.current_frame]:
|
|
del self.tracking_points[self.current_frame]
|
|
self.show_feedback_message("Tracking point removed")
|
|
removed = True
|
|
break
|
|
if not removed:
|
|
self.tracking_points.setdefault(self.current_frame, []).append((int(ox), int(oy)))
|
|
self.show_feedback_message("Tracking point added")
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
|
|
# Handle scroll wheel for zoom (Ctrl + scroll)
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY:
|
|
if event == cv2.EVENT_MOUSEWHEEL:
|
|
if flags > 0: # Scroll up
|
|
self.zoom_factor = min(
|
|
self.MAX_ZOOM, self.zoom_factor + self.ZOOM_INCREMENT
|
|
)
|
|
else: # Scroll down
|
|
self.zoom_factor = max(
|
|
self.MIN_ZOOM, self.zoom_factor - self.ZOOM_INCREMENT
|
|
)
|
|
self.clear_transformation_cache()
|
|
|
|
def set_crop_from_screen_coords(self, screen_rect):
|
|
"""Convert screen coordinates to video frame coordinates and set crop"""
|
|
x, y, w, h = screen_rect
|
|
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Map both corners from screen to original to form an axis-aligned crop
|
|
# All coordinates are in reference to the ORIGINAL frame
|
|
# User input arrives in processed display space → map back to original
|
|
x2 = x + w
|
|
y2 = y + h
|
|
ox1, oy1 = self._map_screen_to_original(x, y)
|
|
ox2, oy2 = self._map_screen_to_original(x2, y2)
|
|
left = min(ox1, ox2)
|
|
top = min(oy1, oy2)
|
|
right = max(ox1, ox2)
|
|
bottom = max(oy1, oy2)
|
|
original_x = left
|
|
original_y = top
|
|
original_w = max(10, right - left)
|
|
original_h = max(10, bottom - top)
|
|
|
|
# Clamp to original frame bounds
|
|
original_x = max(0, min(original_x, self.frame_width - 1))
|
|
original_y = max(0, min(original_y, self.frame_height - 1))
|
|
original_w = min(original_w, self.frame_width - original_x)
|
|
original_h = min(original_h, self.frame_height - original_y)
|
|
|
|
if original_w > 10 and original_h > 10:
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = (original_x, original_y, original_w, original_h)
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
|
|
def seek_to_timeline_position(self, mouse_x, bar_x_start, bar_width):
|
|
"""Seek to position based on mouse click on timeline"""
|
|
relative_x = mouse_x - bar_x_start
|
|
position_ratio = max(0, min(1, relative_x / bar_width))
|
|
target_frame = int(position_ratio * (self.total_frames - 1))
|
|
self.seek_to_frame(target_frame)
|
|
|
|
def undo_crop(self):
|
|
"""Undo the last crop operation"""
|
|
if self.crop_history:
|
|
self.crop_rect = self.crop_history.pop()
|
|
else:
|
|
self.crop_rect = None
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is undone
|
|
|
|
def toggle_marker_looping(self):
|
|
"""Toggle looping between cut markers"""
|
|
# Check if both markers are set
|
|
if self.cut_start_frame is None or self.cut_end_frame is None:
|
|
print("Both markers must be set to enable looping. Use '1' and '2' to set markers.")
|
|
return False
|
|
|
|
if self.cut_start_frame >= self.cut_end_frame:
|
|
print("Invalid marker range - start frame must be before end frame")
|
|
return False
|
|
|
|
self.looping_between_markers = not self.looping_between_markers
|
|
|
|
if self.looping_between_markers:
|
|
print(f"Marker looping ENABLED: frames {self.cut_start_frame} - {self.cut_end_frame}")
|
|
# Jump to start marker when enabling
|
|
self.seek_to_frame(self.cut_start_frame)
|
|
else:
|
|
print("Marker looping DISABLED")
|
|
|
|
self.save_state() # Save state when looping is toggled
|
|
return True
|
|
|
|
|
|
|
|
def adjust_crop_size(self, direction: str, expand: bool, amount: int = None):
|
|
"""
|
|
Adjust crop size in given direction
|
|
direction: 'up', 'down', 'left', 'right'
|
|
expand: True to expand, False to contract
|
|
amount: pixels to adjust by (uses self.crop_size_step if None)
|
|
"""
|
|
if amount is None:
|
|
amount = self.crop_size_step
|
|
if not self.crop_rect:
|
|
# If no crop exists, create a default one in the center
|
|
center_x = self.frame_width // 2
|
|
center_y = self.frame_height // 2
|
|
default_size = min(self.frame_width, self.frame_height) // 4
|
|
self.crop_rect = (
|
|
center_x - default_size // 2,
|
|
center_y - default_size // 2,
|
|
default_size,
|
|
default_size
|
|
)
|
|
return
|
|
|
|
x, y, w, h = self.crop_rect
|
|
|
|
if direction == 'up':
|
|
if expand:
|
|
# Expand upward - decrease y, increase height
|
|
new_y = max(0, y - amount)
|
|
new_h = h + (y - new_y)
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
else:
|
|
# Contract from bottom - decrease height
|
|
new_h = max(10, h - amount) # Minimum size of 10 pixels
|
|
self.crop_rect = (x, y, w, new_h)
|
|
|
|
elif direction == 'down':
|
|
if expand:
|
|
# Expand downward - increase height
|
|
new_h = min(self.frame_height - y, h + amount)
|
|
self.crop_rect = (x, y, w, new_h)
|
|
else:
|
|
# Contract from top - increase y, decrease height
|
|
amount = min(amount, h - 10) # Don't make it smaller than 10 pixels
|
|
new_y = y + amount
|
|
new_h = h - amount
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
|
|
elif direction == 'left':
|
|
if expand:
|
|
# Expand leftward - decrease x, increase width
|
|
new_x = max(0, x - amount)
|
|
new_w = w + (x - new_x)
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
else:
|
|
# Contract from right - decrease width
|
|
new_w = max(10, w - amount) # Minimum size of 10 pixels
|
|
self.crop_rect = (x, y, new_w, h)
|
|
|
|
elif direction == 'right':
|
|
if expand:
|
|
# Expand rightward - increase width
|
|
new_w = min(self.frame_width - x, w + amount)
|
|
self.crop_rect = (x, y, new_w, h)
|
|
else:
|
|
# Contract from left - increase x, decrease width
|
|
amount = min(amount, w - 10) # Don't make it smaller than 10 pixels
|
|
new_x = x + amount
|
|
new_w = w - amount
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is adjusted
|
|
|
|
def render_video(self, output_path: str):
|
|
"""Render video or save image with current edits applied"""
|
|
if self.is_image_mode:
|
|
return self._render_image(output_path)
|
|
else:
|
|
return self._render_video_threaded(output_path)
|
|
|
|
def _render_video_threaded(self, output_path: str):
|
|
"""Start video rendering in a separate thread"""
|
|
# Check if already rendering
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
print("Render already in progress! Use 'X' to cancel first.")
|
|
return False
|
|
|
|
# Reset render state
|
|
self.render_cancelled = False
|
|
|
|
# Start render thread
|
|
self.render_thread = threading.Thread(
|
|
target=self._render_video_worker,
|
|
args=(output_path,),
|
|
daemon=True
|
|
)
|
|
self.render_thread.start()
|
|
|
|
print(f"Started rendering to {output_path} in background thread...")
|
|
print("You can continue editing while rendering. Press 'X' to cancel.")
|
|
return True
|
|
|
|
def _render_video_worker(self, output_path: str):
|
|
"""Worker method that runs in the render thread"""
|
|
render_cap = None
|
|
try:
|
|
if not output_path.endswith(".mp4"):
|
|
output_path += ".mp4"
|
|
|
|
start_time = time.time()
|
|
|
|
# Send progress update to main thread
|
|
self.render_progress_queue.put(("init", "Initializing render...", 0.0, 0.0))
|
|
|
|
# No need to create VideoCapture since we use FFmpeg directly
|
|
|
|
# Determine frame range
|
|
start_frame = self.cut_start_frame if self.cut_start_frame is not None else 0
|
|
end_frame = (
|
|
self.cut_end_frame
|
|
if self.cut_end_frame is not None
|
|
else self.total_frames - 1
|
|
)
|
|
|
|
if start_frame >= end_frame:
|
|
self.render_progress_queue.put(("error", "Invalid cut range!", 1.0, 0.0))
|
|
return False
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Calculating output dimensions...", 0.05, 0.0))
|
|
|
|
# Calculate output dimensions (accounting for rotation)
|
|
if self.crop_rect:
|
|
crop_width = int(self.crop_rect[2])
|
|
crop_height = int(self.crop_rect[3])
|
|
else:
|
|
crop_width = self.frame_width
|
|
crop_height = self.frame_height
|
|
|
|
# Swap dimensions if rotation is 90 or 270 degrees
|
|
if self.rotation_angle == 90 or self.rotation_angle == 270:
|
|
output_width = int(crop_height * self.zoom_factor)
|
|
output_height = int(crop_width * self.zoom_factor)
|
|
else:
|
|
output_width = int(crop_width * self.zoom_factor)
|
|
output_height = int(crop_height * self.zoom_factor)
|
|
|
|
# Ensure dimensions are divisible by 2 for H.264 encoding
|
|
output_width = output_width - (output_width % 2)
|
|
output_height = output_height - (output_height % 2)
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Setting up FFmpeg encoder...", 0.1, 0.0))
|
|
|
|
# Debug output dimensions
|
|
print(f"Output dimensions: {output_width}x{output_height}")
|
|
print(f"Zoom factor: {self.zoom_factor}")
|
|
print(f"Crop dimensions: {crop_width}x{crop_height}")
|
|
|
|
# Skip all the OpenCV codec bullshit and go straight to FFmpeg
|
|
print("Using FFmpeg for encoding with OpenCV transformations...")
|
|
return self._render_with_ffmpeg_pipe(output_path, start_frame, end_frame, output_width, output_height)
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
# Handle specific FFmpeg threading errors
|
|
if "async_lock" in error_msg or "pthread_frame" in error_msg:
|
|
error_msg = "FFmpeg threading error - try restarting the application"
|
|
elif "Assertion" in error_msg:
|
|
error_msg = "Video codec error - the video file may be corrupted or incompatible"
|
|
|
|
self.render_progress_queue.put(("error", f"Render error: {error_msg}", 1.0, 0.0))
|
|
print(f"Render error: {error_msg}")
|
|
return False
|
|
finally:
|
|
# No cleanup needed since we don't create VideoCapture
|
|
pass
|
|
|
|
def update_render_progress(self):
|
|
"""Process progress updates from the render thread"""
|
|
try:
|
|
while True:
|
|
# Non-blocking get from queue
|
|
update_type, text, progress, fps = self.render_progress_queue.get_nowait()
|
|
|
|
if update_type == "init":
|
|
self.show_progress_bar(text)
|
|
elif update_type == "progress":
|
|
self.update_progress_bar(progress, text, fps)
|
|
elif update_type == "complete":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Handle file overwrite if this was an overwrite operation
|
|
if hasattr(self, 'overwrite_temp_path') and self.overwrite_temp_path:
|
|
self._handle_overwrite_completion()
|
|
elif update_type == "error":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Also show error as feedback message for better visibility
|
|
self.show_feedback_message(f"ERROR: {text}")
|
|
elif update_type == "cancelled":
|
|
self.hide_progress_bar()
|
|
self.show_feedback_message("Render cancelled")
|
|
|
|
except queue.Empty:
|
|
# No more updates in queue
|
|
pass
|
|
|
|
def _handle_overwrite_completion(self):
|
|
"""Handle file replacement after successful render"""
|
|
try:
|
|
print("Replacing original file...")
|
|
# Release current video capture before replacing the file
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
|
|
# Replace the original file with the temporary file
|
|
import shutil
|
|
print(f"DEBUG: Moving {self.overwrite_temp_path} to {self.overwrite_target_path}")
|
|
try:
|
|
shutil.move(self.overwrite_temp_path, self.overwrite_target_path)
|
|
print("DEBUG: File move successful")
|
|
except Exception as e:
|
|
print(f"DEBUG: File move failed: {e}")
|
|
# Try to clean up temp file
|
|
if os.path.exists(self.overwrite_temp_path):
|
|
os.remove(self.overwrite_temp_path)
|
|
raise
|
|
|
|
# Small delay to ensure file system operations are complete
|
|
time.sleep(0.1)
|
|
|
|
try:
|
|
self._load_video(self.video_path)
|
|
self.load_current_frame()
|
|
print("File reloaded successfully")
|
|
except Exception as e:
|
|
print(f"Warning: Could not reload file after overwrite: {e}")
|
|
print("The file was saved successfully, but you may need to restart the editor to continue editing it.")
|
|
except Exception as e:
|
|
print(f"Error during file overwrite: {e}")
|
|
finally:
|
|
# Clean up overwrite state
|
|
self.overwrite_temp_path = None
|
|
self.overwrite_target_path = None
|
|
|
|
def cancel_render(self):
|
|
"""Cancel the current render operation"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
print("Render cancellation requested...")
|
|
return True
|
|
return False
|
|
|
|
def is_rendering(self):
|
|
"""Check if a render operation is currently active"""
|
|
return self.render_thread and self.render_thread.is_alive()
|
|
|
|
def cleanup_render_thread(self):
|
|
"""Clean up render thread resources"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
# Terminate FFmpeg process if running
|
|
if self.ffmpeg_process:
|
|
try:
|
|
self.ffmpeg_process.terminate()
|
|
self.ffmpeg_process.wait(timeout=1.0)
|
|
except:
|
|
try:
|
|
self.ffmpeg_process.kill()
|
|
except:
|
|
pass
|
|
self.ffmpeg_process = None
|
|
# Wait a bit for the thread to finish gracefully
|
|
self.render_thread.join(timeout=2.0)
|
|
if self.render_thread.is_alive():
|
|
print("Warning: Render thread did not finish gracefully")
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
|
|
def _render_image(self, output_path: str):
|
|
"""Save image with current edits applied"""
|
|
# Get the appropriate file extension
|
|
original_ext = self.video_path.suffix.lower()
|
|
if not output_path.endswith(original_ext):
|
|
output_path += original_ext
|
|
|
|
print(f"Saving image to {output_path}...")
|
|
|
|
# Apply all transformations to the image
|
|
processed_image = self.apply_crop_zoom_and_rotation(self.static_image.copy())
|
|
|
|
if processed_image is not None:
|
|
# Save the image
|
|
success = cv2.imwrite(output_path, processed_image)
|
|
if success:
|
|
print(f"Image saved successfully to {output_path}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save image to {output_path}")
|
|
return False
|
|
else:
|
|
print("Error: Could not process image")
|
|
return False
|
|
|
|
|
|
def _process_frame_for_render(self, frame, output_width: int, output_height: int, frame_number: int = None):
|
|
"""Process a single frame for rendering (optimized for speed)"""
|
|
try:
|
|
# Apply crop (vectorized operation)
|
|
if self.crop_rect:
|
|
if frame_number is None:
|
|
x, y, w, h = map(int, self.crop_rect)
|
|
else:
|
|
x, y, w, h = map(int, self._get_effective_crop_rect_for_frame(frame_number))
|
|
|
|
# Clamp coordinates to frame bounds
|
|
h_frame, w_frame = frame.shape[:2]
|
|
x = max(0, min(x, w_frame - 1))
|
|
y = max(0, min(y, h_frame - 1))
|
|
w = min(w, w_frame - x)
|
|
h = min(h, h_frame - y)
|
|
|
|
if w > 0 and h > 0:
|
|
frame = frame[y : y + h, x : x + w]
|
|
else:
|
|
return None
|
|
|
|
# Apply brightness and contrast
|
|
frame = self.apply_brightness_contrast(frame)
|
|
|
|
# Apply rotation
|
|
if self.rotation_angle != 0:
|
|
frame = self.apply_rotation(frame)
|
|
|
|
# Apply zoom and resize directly to final output dimensions
|
|
if self.zoom_factor != 1.0:
|
|
height, width = frame.shape[:2]
|
|
# Calculate what the zoomed dimensions would be
|
|
zoomed_width = int(width * self.zoom_factor)
|
|
zoomed_height = int(height * self.zoom_factor)
|
|
|
|
# If zoomed dimensions match output, use them; otherwise resize directly to output
|
|
if zoomed_width == output_width and zoomed_height == output_height:
|
|
frame = cv2.resize(
|
|
frame, (zoomed_width, zoomed_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# Resize directly to final output dimensions
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# No zoom, just resize to output dimensions if needed
|
|
if frame.shape[1] != output_width or frame.shape[0] != output_height:
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
return frame
|
|
|
|
except Exception as e:
|
|
print(f"Error processing frame: {e}")
|
|
return None
|
|
|
|
def _render_with_ffmpeg_pipe(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int):
|
|
"""Render video with transformations"""
|
|
try:
|
|
# Test FFmpeg with a simple command first
|
|
try:
|
|
test_result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True, timeout=10)
|
|
if test_result.returncode != 0:
|
|
print(f"FFmpeg test failed with return code {test_result.returncode}")
|
|
print(f"FFmpeg stderr: {test_result.stderr}")
|
|
error_msg = "FFmpeg is not working properly"
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired) as e:
|
|
error_msg = f"FFmpeg not found or not working: {e}"
|
|
print(error_msg)
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
|
|
self.render_progress_queue.put(("progress", "Starting encoder...", 0.0, 0.0))
|
|
|
|
import tempfile
|
|
import os
|
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.raw')
|
|
temp_file.close()
|
|
|
|
# Use a simpler, more Windows-compatible FFmpeg command
|
|
ffmpeg_cmd = [
|
|
'ffmpeg', '-y',
|
|
'-f', 'rawvideo',
|
|
'-s', f'{output_width}x{output_height}',
|
|
'-pix_fmt', 'bgr24',
|
|
'-r', str(self.fps),
|
|
'-i', temp_file.name,
|
|
'-c:v', 'libx264',
|
|
'-preset', 'fast',
|
|
'-crf', '18',
|
|
'-pix_fmt', 'yuv420p',
|
|
output_path
|
|
]
|
|
self.temp_file_name = temp_file.name
|
|
|
|
render_cap = cv2.VideoCapture(str(self.video_path))
|
|
render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
|
|
|
total_frames = end_frame - start_frame + 1
|
|
frames_written = 0
|
|
start_time = time.time()
|
|
last_progress_update = 0
|
|
|
|
self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0))
|
|
with open(self.temp_file_name, 'wb') as temp_file:
|
|
for i in range(total_frames):
|
|
if self.render_cancelled:
|
|
render_cap.release()
|
|
self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0))
|
|
return False
|
|
|
|
ret, frame = render_cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
processed_frame = self._process_frame_for_render(frame, output_width, output_height, start_frame + i)
|
|
if processed_frame is not None:
|
|
if i == 0:
|
|
print(f"Processed frame dimensions: {processed_frame.shape[1]}x{processed_frame.shape[0]}")
|
|
print(f"Expected dimensions: {output_width}x{output_height}")
|
|
|
|
temp_file.write(processed_frame.tobytes())
|
|
frames_written += 1
|
|
|
|
current_time = time.time()
|
|
progress = 0.1 + (0.8 * (i + 1) / total_frames)
|
|
|
|
if current_time - last_progress_update > 0.5:
|
|
elapsed = current_time - start_time
|
|
fps_rate = frames_written / elapsed if elapsed > 0 else 0
|
|
self.render_progress_queue.put(("progress", f"Processed {i+1}/{total_frames} frames", progress, fps_rate))
|
|
last_progress_update = current_time
|
|
|
|
render_cap.release()
|
|
|
|
self.render_progress_queue.put(("progress", "Encoding...", 0.9, 0.0))
|
|
|
|
# Use subprocess.run() with timeout for better Windows reliability
|
|
result = subprocess.run(
|
|
ffmpeg_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=300, # 5 minute timeout
|
|
creationflags=subprocess.CREATE_NO_WINDOW if hasattr(subprocess, 'CREATE_NO_WINDOW') else 0
|
|
)
|
|
|
|
return_code = result.returncode
|
|
stdout = result.stdout
|
|
stderr = result.stderr
|
|
|
|
# Debug output
|
|
print(f"FFmpeg return code: {return_code}")
|
|
if stdout:
|
|
print(f"FFmpeg stdout: {stdout}")
|
|
if stderr:
|
|
print(f"FFmpeg stderr: {stderr}")
|
|
|
|
if os.path.exists(self.temp_file_name):
|
|
try:
|
|
os.unlink(self.temp_file_name)
|
|
except OSError:
|
|
pass
|
|
|
|
if return_code == 0:
|
|
total_time = time.time() - start_time
|
|
avg_fps = frames_written / total_time if total_time > 0 else 0
|
|
self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames", 1.0, avg_fps))
|
|
print(f"Successfully rendered {frames_written} frames (avg {avg_fps:.1f} FPS)")
|
|
return True
|
|
else:
|
|
error_details = stderr if stderr else "No error details available"
|
|
print(f"Encoding failed with return code {return_code}")
|
|
print(f"Error: {error_details}")
|
|
self.render_progress_queue.put(("error", f"Encoding failed: {error_details}", 1.0, 0.0))
|
|
return False
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
print(f"Rendering exception: {error_msg}")
|
|
print(f"Exception type: {type(e).__name__}")
|
|
|
|
if "Errno 22" in error_msg or "invalid argument" in error_msg.lower():
|
|
error_msg = "File system error - try using a different output path"
|
|
elif "BrokenPipeError" in error_msg:
|
|
error_msg = "Process terminated unexpectedly"
|
|
elif "FileNotFoundError" in error_msg or "ffmpeg" in error_msg.lower():
|
|
error_msg = "FFmpeg not found - please install FFmpeg and ensure it's in your PATH"
|
|
|
|
self.render_progress_queue.put(("error", f"Rendering failed: {error_msg}", 1.0, 0.0))
|
|
return False
|
|
|
|
def run(self):
|
|
"""Main editor loop"""
|
|
if self.is_image_mode:
|
|
print("Image Editor Controls:")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" C: Clear crop")
|
|
print()
|
|
print("Motion Tracking:")
|
|
print(" Right-click: Add/remove tracking point (at current frame)")
|
|
print(" v: Toggle motion tracking on/off")
|
|
print(" V: Clear all tracking points")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next file")
|
|
print(" n: Previous file")
|
|
print(" Enter: Save image (overwrites if '_edited_' in name)")
|
|
print(" b: Save image as _edited_edited")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
else:
|
|
print("Video Editor Controls:")
|
|
print(" Space: Play/Pause")
|
|
print(" A/D: Seek backward/forward (1 frame)")
|
|
print(" Shift+A/D: Seek backward/forward (10 frames)")
|
|
print(" Ctrl+A/D: Seek backward/forward (60 frames)")
|
|
print(" W/S: Increase/Decrease speed")
|
|
print(" Q/Y: Increase/Decrease seek multiplier")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" C: Clear crop")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
print(" 1: Set cut start point")
|
|
print(" 2: Set cut end point")
|
|
print(" T: Toggle loop between markers")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next video")
|
|
print(" n: Previous video")
|
|
print(" Enter: Render video (overwrites if '_edited_' in name)")
|
|
print(" b: Render video")
|
|
print(" x: Cancel render")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, self.window_width, self.window_height)
|
|
cv2.setMouseCallback(window_title, self.mouse_callback)
|
|
|
|
self.load_current_frame()
|
|
|
|
while True:
|
|
# Update auto-repeat seeking if active
|
|
self.update_auto_repeat_seek()
|
|
|
|
# Update render progress from background thread
|
|
self.update_render_progress()
|
|
|
|
# Update display
|
|
self.display_current_frame()
|
|
|
|
# Handle project view window if it exists
|
|
if self.project_view_mode and self.project_view:
|
|
# Draw project view in its own window
|
|
project_canvas = self.project_view.draw()
|
|
cv2.imshow("Project View", project_canvas)
|
|
|
|
# Calculate appropriate delay based on playback state
|
|
if self.is_playing and not self.is_image_mode:
|
|
# Use calculated frame delay for proper playback speed
|
|
delay_ms = self.calculate_frame_delay()
|
|
else:
|
|
# Use minimal delay when not playing for responsive UI
|
|
delay_ms = 1
|
|
|
|
# Key capture with appropriate delay
|
|
key = cv2.waitKey(delay_ms) & 0xFF
|
|
|
|
# Route keys based on window focus
|
|
if key != 255: # Key was pressed
|
|
active_window = get_active_window_title()
|
|
|
|
if "Project View" in active_window:
|
|
# Project view window has focus - handle project view keys
|
|
if self.project_view_mode and self.project_view:
|
|
action = self.project_view.handle_key(key)
|
|
if action == "back_to_editor":
|
|
self.toggle_project_view()
|
|
elif action == "quit":
|
|
return # Exit the main loop
|
|
elif action.startswith("open_video:"):
|
|
video_path_str = action.split(":", 1)[1]
|
|
video_path = Path(video_path_str)
|
|
self.open_video_from_project_view(video_path)
|
|
continue # Skip main window key handling
|
|
|
|
elif "Video Editor" in active_window or "Image Editor" in active_window:
|
|
# Main window has focus - handle editor keys
|
|
pass # Continue to main window key handling below
|
|
else:
|
|
# Neither window has focus, ignore key
|
|
continue
|
|
|
|
# Handle auto-repeat - stop if no key is pressed
|
|
if key == 255 and self.auto_repeat_active: # 255 means no key pressed
|
|
self.stop_auto_repeat_seek()
|
|
|
|
if key == ord("q") or key == 27: # ESC
|
|
self.stop_auto_repeat_seek()
|
|
self.save_state()
|
|
break
|
|
elif key == ord("p"): # P - Toggle project view
|
|
self.toggle_project_view()
|
|
elif key == ord(" "):
|
|
# Don't allow play/pause for images
|
|
if not self.is_image_mode:
|
|
self.stop_auto_repeat_seek() # Stop seeking when toggling play/pause
|
|
self.is_playing = not self.is_playing
|
|
elif key == ord("a") or key == ord("A"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase A (Shift+A)
|
|
if key == ord("A"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, True, False) # Shift+A: -10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, False) # A: -1 frame
|
|
elif key == ord("d") or key == ord("D"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase D (Shift+D)
|
|
if key == ord("D"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, True, False) # Shift+D: +10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, False) # D: +1 frame
|
|
elif key == 1: # Ctrl+A
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, True) # Ctrl+A: -60 frames
|
|
elif key == 4: # Ctrl+D
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, True) # Ctrl+D: +60 frames
|
|
elif key == ord("-") or key == ord("_"):
|
|
self.rotate_clockwise()
|
|
print(f"Rotated to {self.rotation_angle}°")
|
|
elif key == ord("f"):
|
|
self.toggle_fullscreen()
|
|
elif key == ord("s"): # Shift+S - Save screenshot
|
|
self.save_current_frame()
|
|
elif key == ord("W"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = min(
|
|
self.MAX_PLAYBACK_SPEED, self.playback_speed + self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("S"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = max(
|
|
self.MIN_PLAYBACK_SPEED, self.playback_speed - self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("Q"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = min(
|
|
self.MAX_SEEK_MULTIPLIER, self.seek_multiplier + self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("Y"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = max(
|
|
self.MIN_SEEK_MULTIPLIER, self.seek_multiplier - self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("e") or key == ord("E"):
|
|
# Brightness adjustment: E (increase), Shift+E (decrease)
|
|
if key == ord("E"):
|
|
self.adjust_brightness(-5)
|
|
print(f"Brightness: {self.brightness}")
|
|
else:
|
|
self.adjust_brightness(5)
|
|
print(f"Brightness: {self.brightness}")
|
|
elif key == ord("r") or key == ord("R"):
|
|
# Contrast adjustment: R (increase), Shift+R (decrease)
|
|
if key == ord("R"):
|
|
self.adjust_contrast(-0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
else:
|
|
self.adjust_contrast(0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
elif key == ord("u"):
|
|
self.undo_crop()
|
|
elif key == ord("c"):
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
self.zoom_factor = 1.0
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is cleared
|
|
elif key == ord("1"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_start_frame = self.current_frame
|
|
print(f"Set cut start at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut start is set
|
|
elif key == ord("2"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_end_frame = self.current_frame
|
|
print(f"Set cut end at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut end is set
|
|
elif key == ord("N"):
|
|
if len(self.video_files) > 1:
|
|
self.previous_video()
|
|
elif key == ord("n"):
|
|
if len(self.video_files) > 1:
|
|
self.next_video()
|
|
elif key == ord("b"):
|
|
directory = self.video_path.parent
|
|
base_name = self.video_path.stem
|
|
extension = self.video_path.suffix
|
|
|
|
# Remove any existing _edited_ suffix to get clean base name
|
|
clean_base = base_name.replace("_edited", "")
|
|
|
|
# Find next available number
|
|
counter = 1
|
|
while True:
|
|
new_name = f"{clean_base}_edited_{counter:05d}{extension}"
|
|
output_path = directory / new_name
|
|
if not output_path.exists():
|
|
break
|
|
counter += 1
|
|
|
|
success = self.render_video(str(output_path))
|
|
elif key == 13: # Enter
|
|
# Only overwrite if file already contains "_edited_" in name
|
|
print(f"DEBUG: Checking if '{self.video_path.stem}' contains '_edited_'")
|
|
if "_edited_" in self.video_path.stem:
|
|
print("DEBUG: File contains '_edited_', proceeding with overwrite")
|
|
print(f"DEBUG: Original file path: {self.video_path}")
|
|
print(f"DEBUG: Original file exists: {self.video_path.exists()}")
|
|
output_path = str(self.video_path)
|
|
|
|
# If we're overwriting the same file, use a temporary file first
|
|
import tempfile
|
|
temp_dir = self.video_path.parent
|
|
temp_fd, temp_path = tempfile.mkstemp(suffix=self.video_path.suffix, dir=temp_dir)
|
|
os.close(temp_fd) # Close the file descriptor, we just need the path
|
|
|
|
print(f"DEBUG: Created temp file: {temp_path}")
|
|
print("Rendering to temporary file first...")
|
|
|
|
success = self.render_video(temp_path)
|
|
|
|
# Store the temp path so we can replace the file when render completes
|
|
self.overwrite_temp_path = temp_path
|
|
self.overwrite_target_path = str(self.video_path)
|
|
else:
|
|
print(f"DEBUG: File '{self.video_path.stem}' does not contain '_edited_'")
|
|
print("Enter key only overwrites files with '_edited_' in the name. Use 'n' to create new files.")
|
|
elif key == ord("v"):
|
|
# Toggle motion tracking on/off
|
|
self.tracking_enabled = not self.tracking_enabled
|
|
self.show_feedback_message(f"Motion tracking {'ON' if self.tracking_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("V"):
|
|
# Clear all tracking points
|
|
self.tracking_points = {}
|
|
self.show_feedback_message("Tracking points cleared")
|
|
self.save_state()
|
|
elif key == ord("t"):
|
|
# Marker looping only for videos
|
|
if not self.is_image_mode:
|
|
self.toggle_marker_looping()
|
|
elif key == ord("x"):
|
|
# Cancel render if active
|
|
if self.is_rendering():
|
|
self.cancel_render()
|
|
print("Render cancellation requested")
|
|
else:
|
|
print("No render operation to cancel")
|
|
|
|
# Individual direction controls using shift combinations we can detect
|
|
elif key == ord("J"): # Shift+i - expand up
|
|
self.adjust_crop_size('up', False)
|
|
print(f"Expanded crop upward by {self.crop_size_step}px")
|
|
elif key == ord("K"): # Shift+k - expand down
|
|
self.adjust_crop_size('down', False)
|
|
print(f"Expanded crop downward by {self.crop_size_step}px")
|
|
elif key == ord("L"): # Shift+j - expand left
|
|
self.adjust_crop_size('left', False)
|
|
print(f"Expanded crop leftward by {self.crop_size_step}px")
|
|
elif key == ord("H"): # Shift+l - expand right
|
|
self.adjust_crop_size('right', False)
|
|
print(f"Expanded crop rightward by {self.crop_size_step}px")
|
|
|
|
# Contract in specific directions
|
|
elif key == ord("k"): # i - contract from bottom (reduce height from bottom)
|
|
self.adjust_crop_size('up', True)
|
|
print(f"Contracted crop from bottom by {self.crop_size_step}px")
|
|
elif key == ord("j"): # k - contract from top (reduce height from top)
|
|
self.adjust_crop_size('down', True)
|
|
print(f"Contracted crop from top by {self.crop_size_step}px")
|
|
elif key == ord("h"): # j - contract from right (reduce width from right)
|
|
self.adjust_crop_size('left', True)
|
|
print(f"Contracted crop from right by {self.crop_size_step}px")
|
|
elif key == ord("l"): # l - contract from left (reduce width from left)
|
|
self.adjust_crop_size('right', True)
|
|
print(f"Contracted crop from left by {self.crop_size_step}px")
|
|
|
|
|
|
# Auto advance frame when playing (videos only)
|
|
if self.is_playing and not self.is_image_mode:
|
|
self.advance_frame()
|
|
|
|
self.save_state()
|
|
self.cleanup_render_thread()
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Fast Media Editor - Crop, Zoom, and Edit videos and images"
|
|
)
|
|
parser.add_argument(
|
|
"media", help="Path to media file or directory containing videos/images"
|
|
)
|
|
|
|
try:
|
|
args = parser.parse_args()
|
|
except SystemExit:
|
|
# If launched from context menu without arguments, this might fail
|
|
input("Argument parsing failed. Press Enter to exit...")
|
|
return
|
|
|
|
if not os.path.exists(args.media):
|
|
error_msg = f"Error: {args.media} does not exist"
|
|
print(error_msg)
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
try:
|
|
editor = VideoEditor(args.media)
|
|
editor.run()
|
|
except Exception as e:
|
|
error_msg = f"Error initializing media editor: {e}"
|
|
print(error_msg)
|
|
import traceback
|
|
traceback.print_exc() # Full error trace for debugging
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|