4426 lines
196 KiB
Python
4426 lines
196 KiB
Python
import os
|
|
import sys
|
|
import cv2
|
|
import argparse
|
|
import numpy as np
|
|
from pathlib import Path
|
|
from typing import List, Optional, Tuple, Dict, Any
|
|
import time
|
|
import re
|
|
import threading
|
|
import json
|
|
import subprocess
|
|
import queue
|
|
import ctypes
|
|
from PIL import Image
|
|
|
|
def load_image_utf8(image_path):
|
|
"""Load image with UTF-8 path support using PIL, then convert to OpenCV format"""
|
|
try:
|
|
# Use PIL to load image with UTF-8 support
|
|
pil_image = Image.open(image_path)
|
|
# Convert PIL image to OpenCV format (BGR)
|
|
cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
|
return cv_image
|
|
except Exception as e:
|
|
raise ValueError(f"Could not load image file: {image_path} - {e}")
|
|
|
|
|
|
class FeatureTracker:
|
|
"""Semi-automatic feature tracking with SIFT/SURF/ORB support and full state serialization"""
|
|
|
|
def __init__(self):
|
|
# Feature detection parameters
|
|
self.detector_type = 'SIFT' # 'SIFT', 'SURF', 'ORB'
|
|
self.max_features = 1000
|
|
self.match_threshold = 0.7
|
|
|
|
# Tracking state
|
|
self.features = {} # {frame_number: {'keypoints': [...], 'descriptors': [...], 'positions': [...]}}
|
|
self.tracking_enabled = False
|
|
self.auto_tracking = False
|
|
|
|
# Initialize detectors
|
|
self._init_detectors()
|
|
|
|
def _init_detectors(self):
|
|
"""Initialize feature detectors based on type"""
|
|
try:
|
|
if self.detector_type == 'SIFT':
|
|
self.detector = cv2.SIFT_create(nfeatures=self.max_features)
|
|
elif self.detector_type == 'SURF':
|
|
# SURF requires opencv-contrib-python, fallback to SIFT
|
|
print("Warning: SURF requires opencv-contrib-python package. Using SIFT instead.")
|
|
self.detector = cv2.SIFT_create(nfeatures=self.max_features)
|
|
self.detector_type = 'SIFT'
|
|
elif self.detector_type == 'ORB':
|
|
self.detector = cv2.ORB_create(nfeatures=self.max_features)
|
|
else:
|
|
raise ValueError(f"Unknown detector type: {self.detector_type}")
|
|
except Exception as e:
|
|
print(f"Warning: Could not initialize {self.detector_type} detector: {e}")
|
|
# Fallback to ORB
|
|
self.detector_type = 'ORB'
|
|
self.detector = cv2.ORB_create(nfeatures=self.max_features)
|
|
|
|
def set_detector_type(self, detector_type: str):
|
|
"""Change detector type and reinitialize"""
|
|
if detector_type in ['SIFT', 'SURF', 'ORB']:
|
|
self.detector_type = detector_type
|
|
self._init_detectors()
|
|
print(f"Switched to {detector_type} detector")
|
|
else:
|
|
print(f"Invalid detector type: {detector_type}")
|
|
|
|
def extract_features(self, frame: np.ndarray, frame_number: int, coord_mapper=None) -> bool:
|
|
"""Extract features from a frame and store them"""
|
|
try:
|
|
# Convert to grayscale if needed
|
|
if len(frame.shape) == 3:
|
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray = frame
|
|
|
|
# Extract keypoints and descriptors
|
|
keypoints, descriptors = self.detector.detectAndCompute(gray, None)
|
|
|
|
if keypoints is None or descriptors is None:
|
|
return False
|
|
|
|
# Map coordinates back to original frame space if mapper provided
|
|
if coord_mapper:
|
|
mapped_positions = []
|
|
for kp in keypoints:
|
|
orig_x, orig_y = coord_mapper(kp.pt[0], kp.pt[1])
|
|
mapped_positions.append((int(orig_x), int(orig_y)))
|
|
else:
|
|
mapped_positions = [(int(kp.pt[0]), int(kp.pt[1])) for kp in keypoints]
|
|
|
|
# Store features
|
|
self.features[frame_number] = {
|
|
'keypoints': keypoints,
|
|
'descriptors': descriptors,
|
|
'positions': mapped_positions
|
|
}
|
|
|
|
print(f"Extracted {len(keypoints)} features from frame {frame_number}")
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"Error extracting features from frame {frame_number}: {e}")
|
|
return False
|
|
|
|
def extract_features_from_region(self, frame: np.ndarray, frame_number: int, coord_mapper=None) -> bool:
|
|
"""Extract features from a frame and ADD them to existing features"""
|
|
try:
|
|
# Convert to grayscale if needed
|
|
if len(frame.shape) == 3:
|
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray = frame
|
|
|
|
# Extract keypoints and descriptors
|
|
keypoints, descriptors = self.detector.detectAndCompute(gray, None)
|
|
|
|
if keypoints is None or descriptors is None:
|
|
return False
|
|
|
|
# Map coordinates back to original frame space if mapper provided
|
|
if coord_mapper:
|
|
mapped_positions = []
|
|
for kp in keypoints:
|
|
orig_x, orig_y = coord_mapper(kp.pt[0], kp.pt[1])
|
|
mapped_positions.append((int(orig_x), int(orig_y)))
|
|
else:
|
|
mapped_positions = [(int(kp.pt[0]), int(kp.pt[1])) for kp in keypoints]
|
|
|
|
# Add to existing features or create new entry
|
|
if frame_number in self.features:
|
|
# Check if descriptor dimensions match
|
|
existing_features = self.features[frame_number]
|
|
if existing_features['descriptors'].shape[1] != descriptors.shape[1]:
|
|
print(f"Warning: Descriptor dimension mismatch ({existing_features['descriptors'].shape[1]} vs {descriptors.shape[1]}). Cannot concatenate. Replacing features.")
|
|
# Replace instead of concatenate when dimensions don't match
|
|
existing_features['keypoints'] = keypoints
|
|
existing_features['descriptors'] = descriptors
|
|
existing_features['positions'] = mapped_positions
|
|
else:
|
|
# Append to existing features
|
|
existing_features['keypoints'] = np.concatenate([existing_features['keypoints'], keypoints])
|
|
existing_features['descriptors'] = np.concatenate([existing_features['descriptors'], descriptors])
|
|
existing_features['positions'].extend(mapped_positions)
|
|
print(f"Added {len(keypoints)} features to frame {frame_number} (total: {len(existing_features['positions'])})")
|
|
else:
|
|
# Create new features entry
|
|
self.features[frame_number] = {
|
|
'keypoints': keypoints,
|
|
'descriptors': descriptors,
|
|
'positions': mapped_positions
|
|
}
|
|
print(f"Extracted {len(keypoints)} features from frame {frame_number}")
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
print(f"Error extracting features from frame {frame_number}: {e}")
|
|
return False
|
|
|
|
def track_features_optical_flow(self, prev_frame, curr_frame, prev_points):
|
|
"""Track features using Lucas-Kanade optical flow"""
|
|
try:
|
|
# Convert to grayscale if needed
|
|
if len(prev_frame.shape) == 3:
|
|
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
prev_gray = prev_frame
|
|
|
|
if len(curr_frame.shape) == 3:
|
|
curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
curr_gray = curr_frame
|
|
|
|
# Parameters for Lucas-Kanade optical flow
|
|
lk_params = dict(winSize=(15, 15),
|
|
maxLevel=2,
|
|
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
|
|
|
|
# Calculate optical flow
|
|
new_points, status, _ = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_points, None, **lk_params)
|
|
|
|
# Filter out bad tracks
|
|
good_new = new_points[status == 1]
|
|
good_old = prev_points[status == 1]
|
|
|
|
return good_new, good_old, status
|
|
|
|
except Exception as e:
|
|
print(f"Error in optical flow tracking: {e}")
|
|
return None, None, None
|
|
|
|
|
|
|
|
|
|
def get_tracking_position(self, frame_number: int) -> Optional[Tuple[float, float]]:
|
|
"""Get the average tracking position for a frame"""
|
|
if frame_number not in self.features:
|
|
return None
|
|
|
|
if not self.features[frame_number]['positions']:
|
|
return None
|
|
|
|
positions = self.features[frame_number]['positions']
|
|
|
|
if not positions:
|
|
return None
|
|
|
|
# Calculate average position
|
|
avg_x = sum(pos[0] for pos in positions) / len(positions)
|
|
avg_y = sum(pos[1] for pos in positions) / len(positions)
|
|
|
|
return (avg_x, avg_y)
|
|
|
|
|
|
def clear_features(self):
|
|
"""Clear all stored features"""
|
|
self.features.clear()
|
|
print("All features cleared")
|
|
|
|
def get_feature_count(self, frame_number: int) -> int:
|
|
"""Get number of features for a frame"""
|
|
if frame_number in self.features:
|
|
return len(self.features[frame_number]['positions'])
|
|
return 0
|
|
|
|
def serialize_features(self) -> Dict[str, Any]:
|
|
"""Serialize features for state saving"""
|
|
serialized = {}
|
|
|
|
for frame_num, frame_data in self.features.items():
|
|
frame_key = str(frame_num)
|
|
serialized[frame_key] = {
|
|
'positions': frame_data['positions'],
|
|
'keypoints': None, # Keypoints are not serialized (too large)
|
|
'descriptors': None # Descriptors are not serialized (too large)
|
|
}
|
|
|
|
return serialized
|
|
|
|
def deserialize_features(self, serialized_data: Dict[str, Any]):
|
|
"""Deserialize features from state loading"""
|
|
self.features.clear()
|
|
|
|
for frame_key, frame_data in serialized_data.items():
|
|
frame_num = int(frame_key)
|
|
self.features[frame_num] = {
|
|
'positions': frame_data['positions'],
|
|
'keypoints': None,
|
|
'descriptors': None
|
|
}
|
|
|
|
print(f"Deserialized features for {len(self.features)} frames")
|
|
|
|
def get_state_dict(self) -> Dict[str, Any]:
|
|
"""Get complete state for serialization"""
|
|
return {
|
|
'detector_type': self.detector_type,
|
|
'max_features': self.max_features,
|
|
'match_threshold': self.match_threshold,
|
|
'tracking_enabled': self.tracking_enabled,
|
|
'auto_tracking': self.auto_tracking,
|
|
'features': self.serialize_features()
|
|
}
|
|
|
|
def load_state_dict(self, state_dict: Dict[str, Any]):
|
|
"""Load complete state from serialization"""
|
|
if 'detector_type' in state_dict:
|
|
self.detector_type = state_dict['detector_type']
|
|
self._init_detectors()
|
|
|
|
if 'max_features' in state_dict:
|
|
self.max_features = state_dict['max_features']
|
|
|
|
if 'match_threshold' in state_dict:
|
|
self.match_threshold = state_dict['match_threshold']
|
|
|
|
if 'tracking_enabled' in state_dict:
|
|
self.tracking_enabled = state_dict['tracking_enabled']
|
|
|
|
if 'auto_tracking' in state_dict:
|
|
self.auto_tracking = state_dict['auto_tracking']
|
|
|
|
if 'features' in state_dict:
|
|
self.deserialize_features(state_dict['features'])
|
|
|
|
print("Feature tracker state loaded")
|
|
|
|
|
|
class Cv2BufferedCap:
|
|
"""Buffered wrapper around cv2.VideoCapture that handles frame loading, seeking, and caching correctly"""
|
|
|
|
def __init__(self, video_path, backend=None):
|
|
self.video_path = video_path
|
|
self.cap = cv2.VideoCapture(str(video_path), backend)
|
|
if not self.cap.isOpened():
|
|
raise ValueError(f"Could not open video: {video_path}")
|
|
|
|
# Video properties
|
|
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
|
self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
|
|
# Current position tracking
|
|
self.current_frame = 0
|
|
|
|
|
|
|
|
def get_frame(self, frame_number):
|
|
"""Get frame at specific index - always accurate"""
|
|
# Clamp frame number to valid range
|
|
frame_number = max(0, min(frame_number, self.total_frames - 1))
|
|
|
|
# Optimize for sequential reading (next frame)
|
|
if frame_number == self.current_frame + 1:
|
|
ret, frame = self.cap.read()
|
|
else:
|
|
# Seek for non-sequential access
|
|
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
|
ret, frame = self.cap.read()
|
|
|
|
if ret:
|
|
self.current_frame = frame_number
|
|
return frame
|
|
else:
|
|
raise ValueError(f"Failed to read frame {frame_number}")
|
|
|
|
def advance_frame(self, frames=1):
|
|
"""Advance by specified number of frames"""
|
|
new_frame = self.current_frame + frames
|
|
return self.get_frame(new_frame)
|
|
|
|
def release(self):
|
|
"""Release the video capture"""
|
|
if self.cap:
|
|
self.cap.release()
|
|
|
|
def isOpened(self):
|
|
"""Check if capture is opened"""
|
|
return self.cap and self.cap.isOpened()
|
|
|
|
def get_active_window_title():
|
|
"""Get the title of the currently active window"""
|
|
try:
|
|
# Get handle to foreground window
|
|
hwnd = ctypes.windll.user32.GetForegroundWindow()
|
|
|
|
# Get window title length
|
|
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd)
|
|
|
|
# Create buffer and get window title
|
|
buffer = ctypes.create_unicode_buffer(length + 1)
|
|
ctypes.windll.user32.GetWindowTextW(hwnd, buffer, length + 1)
|
|
|
|
return buffer.value
|
|
except:
|
|
return ""
|
|
|
|
class ProjectView:
|
|
"""Project view that displays videos in current directory with progress bars"""
|
|
|
|
# Project view configuration
|
|
THUMBNAIL_SIZE = (200, 150) # Width, Height
|
|
THUMBNAIL_MARGIN = 20
|
|
PROGRESS_BAR_HEIGHT = 8
|
|
TEXT_HEIGHT = 30
|
|
|
|
# Colors
|
|
BG_COLOR = (40, 40, 40)
|
|
THUMBNAIL_BG_COLOR = (60, 60, 60)
|
|
PROGRESS_BG_COLOR = (80, 80, 80)
|
|
PROGRESS_FILL_COLOR = (0, 120, 255)
|
|
TEXT_COLOR = (255, 255, 255)
|
|
SELECTED_COLOR = (255, 165, 0)
|
|
|
|
def __init__(self, directory: Path, video_editor):
|
|
self.directory = directory
|
|
self.video_editor = video_editor
|
|
self.video_files = []
|
|
self.thumbnails = {}
|
|
self.progress_data = {}
|
|
self.selected_index = 0
|
|
self.scroll_offset = 0
|
|
self.items_per_row = 2 # Default to 2 items per row
|
|
self.window_width = 1920 # Increased to accommodate 1080p videos
|
|
self.window_height = 1200
|
|
|
|
self._load_video_files()
|
|
self._load_progress_data()
|
|
|
|
def _calculate_thumbnail_size(self, window_width: int) -> tuple:
|
|
"""Calculate thumbnail size based on items per row and window width"""
|
|
available_width = window_width - self.THUMBNAIL_MARGIN
|
|
item_width = (available_width - (self.items_per_row - 1) * self.THUMBNAIL_MARGIN) // self.items_per_row
|
|
thumbnail_width = max(50, item_width) # Minimum 50px width
|
|
thumbnail_height = int(thumbnail_width * self.THUMBNAIL_SIZE[1] / self.THUMBNAIL_SIZE[0]) # Maintain aspect ratio
|
|
return (thumbnail_width, thumbnail_height)
|
|
|
|
def _load_video_files(self):
|
|
"""Load all video files from directory"""
|
|
self.video_files = []
|
|
for file_path in self.directory.iterdir():
|
|
if (file_path.is_file() and
|
|
file_path.suffix.lower() in self.video_editor.VIDEO_EXTENSIONS):
|
|
self.video_files.append(file_path)
|
|
self.video_files.sort(key=lambda x: x.name)
|
|
|
|
def _load_progress_data(self):
|
|
"""Load progress data from JSON state files"""
|
|
self.progress_data = {}
|
|
for video_path in self.video_files:
|
|
state_file = video_path.with_suffix('.json')
|
|
if state_file.exists():
|
|
try:
|
|
with open(state_file, 'r') as f:
|
|
state = json.load(f)
|
|
current_frame = state.get('current_frame', 0)
|
|
|
|
# Get total frames from video
|
|
cap = cv2.VideoCapture(str(video_path))
|
|
if cap.isOpened():
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
cap.release()
|
|
|
|
if total_frames > 0:
|
|
progress = current_frame / (total_frames - 1)
|
|
self.progress_data[video_path] = {
|
|
'current_frame': current_frame,
|
|
'total_frames': total_frames,
|
|
'progress': progress
|
|
}
|
|
except Exception as e:
|
|
print(f"Error loading progress for {video_path.name}: {e}")
|
|
|
|
def refresh_progress_data(self):
|
|
"""Refresh progress data from JSON files (call when editor state changes)"""
|
|
self._load_progress_data()
|
|
|
|
def get_progress_for_video(self, video_path: Path) -> float:
|
|
"""Get progress (0.0 to 1.0) for a video"""
|
|
if video_path in self.progress_data:
|
|
return self.progress_data[video_path]['progress']
|
|
return 0.0
|
|
|
|
def get_thumbnail_for_video(self, video_path: Path, size: tuple = None) -> np.ndarray:
|
|
"""Get thumbnail for a video, generating it if needed"""
|
|
if size is None:
|
|
size = self.THUMBNAIL_SIZE
|
|
|
|
# Cache the original thumbnail by video path only (not size)
|
|
if video_path in self.thumbnails:
|
|
original_thumbnail = self.thumbnails[video_path]
|
|
# Resize the cached thumbnail to the requested size
|
|
return cv2.resize(original_thumbnail, size)
|
|
|
|
# Generate original thumbnail on demand (only once per video)
|
|
try:
|
|
cap = cv2.VideoCapture(str(video_path))
|
|
if cap.isOpened():
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
if total_frames > 0:
|
|
middle_frame = total_frames // 2
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, middle_frame)
|
|
ret, frame = cap.read()
|
|
if ret:
|
|
# Store original thumbnail at original size
|
|
original_thumbnail = cv2.resize(frame, self.THUMBNAIL_SIZE)
|
|
self.thumbnails[video_path] = original_thumbnail
|
|
cap.release()
|
|
# Return resized version
|
|
return cv2.resize(original_thumbnail, size)
|
|
cap.release()
|
|
except Exception as e:
|
|
print(f"Error generating thumbnail for {video_path.name}: {e}")
|
|
|
|
# Return a placeholder if thumbnail generation failed
|
|
placeholder = np.full((size[1], size[0], 3),
|
|
self.THUMBNAIL_BG_COLOR, dtype=np.uint8)
|
|
return placeholder
|
|
|
|
def draw(self) -> np.ndarray:
|
|
"""Draw the project view"""
|
|
# Get actual window size dynamically
|
|
try:
|
|
# Try to get the actual window size from OpenCV
|
|
window_rect = cv2.getWindowImageRect("Project View")
|
|
if window_rect[2] > 0 and window_rect[3] > 0: # width and height > 0
|
|
actual_width = window_rect[2]
|
|
actual_height = window_rect[3]
|
|
else:
|
|
# Fallback to default size
|
|
actual_width = self.window_width
|
|
actual_height = self.window_height
|
|
except:
|
|
# Fallback to default size
|
|
actual_width = self.window_width
|
|
actual_height = self.window_height
|
|
|
|
canvas = np.full((actual_height, actual_width, 3), self.BG_COLOR, dtype=np.uint8)
|
|
|
|
if not self.video_files:
|
|
# No videos message
|
|
text = "No videos found in directory"
|
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
text_size = cv2.getTextSize(text, font, 1.0, 2)[0]
|
|
text_x = (actual_width - text_size[0]) // 2
|
|
text_y = (actual_height - text_size[1]) // 2
|
|
cv2.putText(canvas, text, (text_x, text_y), font, 1.0, self.TEXT_COLOR, 2)
|
|
return canvas
|
|
|
|
# Calculate layout - use fixed items_per_row and calculate thumbnail size to fit
|
|
items_per_row = min(self.items_per_row, len(self.video_files)) # Don't exceed number of videos
|
|
|
|
# Calculate thumbnail size to fit the desired number of items per row
|
|
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(actual_width)
|
|
|
|
# Calculate item height dynamically based on thumbnail size
|
|
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
|
|
|
item_width = (actual_width - (items_per_row + 1) * self.THUMBNAIL_MARGIN) // items_per_row
|
|
|
|
# Draw videos in grid
|
|
for i, video_path in enumerate(self.video_files):
|
|
row = i // items_per_row
|
|
col = i % items_per_row
|
|
|
|
# Skip if scrolled out of view
|
|
if row < self.scroll_offset:
|
|
continue
|
|
if row > self.scroll_offset + (actual_height // item_height):
|
|
break
|
|
|
|
# Calculate position
|
|
x = self.THUMBNAIL_MARGIN + col * (item_width + self.THUMBNAIL_MARGIN)
|
|
y = self.THUMBNAIL_MARGIN + (row - self.scroll_offset) * item_height
|
|
|
|
# Draw thumbnail background
|
|
cv2.rectangle(canvas,
|
|
(x, y),
|
|
(x + thumbnail_width, y + thumbnail_height),
|
|
self.THUMBNAIL_BG_COLOR, -1)
|
|
|
|
# Draw selection highlight
|
|
if i == self.selected_index:
|
|
cv2.rectangle(canvas,
|
|
(x - 2, y - 2),
|
|
(x + thumbnail_width + 2, y + thumbnail_height + 2),
|
|
self.SELECTED_COLOR, 3)
|
|
|
|
# Draw thumbnail
|
|
thumbnail = self.get_thumbnail_for_video(video_path, (thumbnail_width, thumbnail_height))
|
|
# Thumbnail is already the correct size, no need to resize
|
|
resized_thumbnail = thumbnail
|
|
|
|
# Ensure thumbnail doesn't exceed canvas bounds
|
|
end_y = min(y + thumbnail_height, actual_height)
|
|
end_x = min(x + thumbnail_width, actual_width)
|
|
thumb_height = end_y - y
|
|
thumb_width = end_x - x
|
|
|
|
if thumb_height > 0 and thumb_width > 0:
|
|
# Resize thumbnail to fit within bounds if necessary
|
|
if thumb_height != thumbnail_height or thumb_width != thumbnail_width:
|
|
resized_thumbnail = cv2.resize(thumbnail, (thumb_width, thumb_height))
|
|
|
|
canvas[y:end_y, x:end_x] = resized_thumbnail
|
|
|
|
# Draw progress bar
|
|
progress_y = y + thumbnail_height + 5
|
|
progress_width = thumbnail_width
|
|
progress = self.get_progress_for_video(video_path)
|
|
|
|
# Progress background
|
|
cv2.rectangle(canvas,
|
|
(x, progress_y),
|
|
(x + progress_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
|
self.PROGRESS_BG_COLOR, -1)
|
|
|
|
# Progress fill
|
|
if progress > 0:
|
|
fill_width = int(progress_width * progress)
|
|
cv2.rectangle(canvas,
|
|
(x, progress_y),
|
|
(x + fill_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
|
self.PROGRESS_FILL_COLOR, -1)
|
|
|
|
# Draw filename
|
|
filename = video_path.name
|
|
# Truncate if too long
|
|
if len(filename) > 25:
|
|
filename = filename[:22] + "..."
|
|
|
|
text_y = progress_y + self.PROGRESS_BAR_HEIGHT + 20
|
|
cv2.putText(canvas, filename, (x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, self.TEXT_COLOR, 2)
|
|
|
|
# Draw progress percentage
|
|
if video_path in self.progress_data:
|
|
progress_text = f"{progress * 100:.0f}%"
|
|
text_size = cv2.getTextSize(progress_text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)[0]
|
|
progress_text_x = x + progress_width - text_size[0]
|
|
cv2.putText(canvas, progress_text, (progress_text_x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.4, self.TEXT_COLOR, 1)
|
|
|
|
# Draw instructions
|
|
instructions = [
|
|
"Project View - Videos in current directory",
|
|
"WASD: Navigate | E: Open video | Q: Fewer items per row | Y: More items per row | q: Quit | ESC: Back to editor",
|
|
f"Showing {len(self.video_files)} videos | {items_per_row} per row | Thumbnail: {thumbnail_width}x{thumbnail_height}"
|
|
]
|
|
|
|
for i, instruction in enumerate(instructions):
|
|
y_pos = actual_height - 60 + i * 20
|
|
cv2.putText(canvas, instruction, (10, y_pos),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.TEXT_COLOR, 1)
|
|
|
|
return canvas
|
|
|
|
def handle_key(self, key: int) -> str:
|
|
"""Handle keyboard input, returns action taken"""
|
|
if key == 27: # ESC
|
|
return "back_to_editor"
|
|
elif key == ord('q'): # lowercase q - Quit
|
|
return "quit"
|
|
elif key == ord('e') or key == ord('E'): # E - Open video
|
|
if self.video_files and 0 <= self.selected_index < len(self.video_files):
|
|
return f"open_video:{self.video_files[self.selected_index]}"
|
|
elif key == ord('w') or key == ord('W'): # W - Up
|
|
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
|
if self.selected_index >= current_items_per_row:
|
|
self.selected_index -= current_items_per_row
|
|
else:
|
|
self.selected_index = 0
|
|
self._update_scroll()
|
|
elif key == ord('s') or key == ord('S'): # S - Down
|
|
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
|
if self.selected_index + current_items_per_row < len(self.video_files):
|
|
self.selected_index += current_items_per_row
|
|
else:
|
|
self.selected_index = len(self.video_files) - 1
|
|
self._update_scroll()
|
|
elif key == ord('a') or key == ord('A'): # A - Left
|
|
if self.selected_index > 0:
|
|
self.selected_index -= 1
|
|
self._update_scroll()
|
|
elif key == ord('d') or key == ord('D'): # D - Right
|
|
if self.selected_index < len(self.video_files) - 1:
|
|
self.selected_index += 1
|
|
self._update_scroll()
|
|
elif key == ord('Q'): # uppercase Q - Fewer items per row (larger thumbnails)
|
|
if self.items_per_row > 1:
|
|
self.items_per_row -= 1
|
|
print(f"Items per row: {self.items_per_row}")
|
|
elif key == ord('y') or key == ord('Y'): # Y - More items per row (smaller thumbnails)
|
|
self.items_per_row += 1
|
|
print(f"Items per row: {self.items_per_row}")
|
|
|
|
return "none"
|
|
|
|
def _update_scroll(self):
|
|
"""Update scroll offset based on selected item"""
|
|
if not self.video_files:
|
|
return
|
|
|
|
# Use fixed items per row
|
|
items_per_row = min(self.items_per_row, len(self.video_files))
|
|
|
|
# Get window dimensions for calculations
|
|
try:
|
|
window_rect = cv2.getWindowImageRect("Project View")
|
|
if window_rect[2] > 0 and window_rect[3] > 0:
|
|
window_width = window_rect[2]
|
|
window_height = window_rect[3]
|
|
else:
|
|
window_width = self.window_width
|
|
window_height = self.window_height
|
|
except:
|
|
window_width = self.window_width
|
|
window_height = self.window_height
|
|
|
|
# Calculate thumbnail size and item height dynamically
|
|
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(window_width)
|
|
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
|
|
|
selected_row = self.selected_index // items_per_row
|
|
visible_rows = max(1, window_height // item_height)
|
|
|
|
# Calculate how many rows we can actually show
|
|
total_rows = (len(self.video_files) + items_per_row - 1) // items_per_row
|
|
|
|
# If we can show all rows, no scrolling needed
|
|
if total_rows <= visible_rows:
|
|
self.scroll_offset = 0
|
|
return
|
|
|
|
# Update scroll to keep selected item visible
|
|
if selected_row < self.scroll_offset:
|
|
self.scroll_offset = selected_row
|
|
elif selected_row >= self.scroll_offset + visible_rows:
|
|
self.scroll_offset = selected_row - visible_rows + 1
|
|
|
|
# Ensure scroll offset doesn't go negative or beyond available content
|
|
self.scroll_offset = max(0, min(self.scroll_offset, total_rows - visible_rows))
|
|
|
|
class VideoEditor:
|
|
# Configuration constants
|
|
TARGET_FPS = 80 # Target FPS for speed calculations
|
|
SPEED_INCREMENT = 0.1
|
|
MIN_PLAYBACK_SPEED = 0.05
|
|
MAX_PLAYBACK_SPEED = 1.0
|
|
|
|
# Seek multiplier configuration
|
|
SEEK_MULTIPLIER_INCREMENT = 4.0
|
|
MIN_SEEK_MULTIPLIER = 1.0
|
|
MAX_SEEK_MULTIPLIER = 1000.0
|
|
|
|
# Auto-repeat seeking configuration
|
|
AUTO_REPEAT_DISPLAY_RATE = 0.1
|
|
|
|
# Timeline configuration
|
|
TIMELINE_HEIGHT = 60
|
|
TIMELINE_MARGIN = 20
|
|
TIMELINE_BAR_HEIGHT = 12
|
|
TIMELINE_HANDLE_SIZE = 12
|
|
TIMELINE_COLOR_BG = (80, 80, 80)
|
|
TIMELINE_COLOR_PROGRESS = (0, 120, 255)
|
|
TIMELINE_COLOR_HANDLE = (255, 255, 255)
|
|
TIMELINE_COLOR_BORDER = (200, 200, 200)
|
|
TIMELINE_COLOR_CUT_POINT = (255, 0, 0)
|
|
|
|
# Progress bar configuration
|
|
PROGRESS_BAR_HEIGHT = 30
|
|
PROGRESS_BAR_MARGIN_PERCENT = 5 # 5% margin on each side
|
|
PROGRESS_BAR_TOP_MARGIN = 20 # Fixed top margin
|
|
PROGRESS_BAR_FADE_DURATION = 3.0 # seconds to fade out after completion
|
|
PROGRESS_BAR_COLOR_BG = (50, 50, 50)
|
|
PROGRESS_BAR_COLOR_FILL = (0, 255, 0) # Green when complete
|
|
PROGRESS_BAR_COLOR_PROGRESS = (0, 120, 255) # Blue during progress
|
|
PROGRESS_BAR_COLOR_BORDER = (200, 200, 200)
|
|
|
|
# Zoom and crop settings
|
|
MIN_ZOOM = 0.1
|
|
MAX_ZOOM = 10.0
|
|
ZOOM_INCREMENT = 0.1
|
|
|
|
# Supported video extensions
|
|
VIDEO_EXTENSIONS = {".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"}
|
|
|
|
# Supported image extensions
|
|
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp", ".jp2", ".pbm", ".pgm", ".ppm", ".sr", ".ras"}
|
|
|
|
# Crop adjustment settings
|
|
CROP_SIZE_STEP = 15 # pixels to expand/contract crop
|
|
|
|
# Motion tracking settings
|
|
TRACKING_POINT_THRESHOLD = 10 # pixels for delete/snap radius
|
|
|
|
# Seek frame counts
|
|
SEEK_FRAMES_CTRL = 60 # Ctrl modifier: 60 frames
|
|
SEEK_FRAMES_SHIFT = 10 # Shift modifier: 10 frames
|
|
SEEK_FRAMES_DEFAULT = 1 # Default: 1 frame
|
|
|
|
def __init__(self, path: str):
|
|
self.path = Path(path)
|
|
|
|
# Video file management
|
|
self.video_files = []
|
|
self.current_video_index = 0
|
|
|
|
# Media type tracking
|
|
self.is_image_mode = False # True if current file is an image
|
|
|
|
# Determine if path is file or directory
|
|
if self.path.is_file():
|
|
self.video_files = [self.path]
|
|
elif self.path.is_dir():
|
|
# Load all media files from directory
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if not self.video_files:
|
|
raise ValueError(f"No media files found in directory: {path}")
|
|
else:
|
|
raise ValueError(f"Path does not exist: {path}")
|
|
|
|
# Mouse and keyboard interaction
|
|
self.mouse_dragging = False
|
|
self.timeline_rect = None
|
|
self.window_width = 1920 # Increased to accommodate 1080p videos
|
|
self.window_height = 1200
|
|
|
|
# Auto-repeat seeking state
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
self.last_display_update = 0
|
|
|
|
# Crop settings
|
|
self.crop_rect = None # (x, y, width, height)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
self.crop_history = [] # For undo
|
|
|
|
# Zoom settings
|
|
self.zoom_factor = 1.0
|
|
self.zoom_center = None # (x, y) center point for zoom
|
|
|
|
# Rotation settings
|
|
self.rotation_angle = 0 # 0, 90, 180, 270 degrees
|
|
|
|
# Brightness and contrast settings
|
|
self.brightness = 0 # -100 to 100
|
|
self.contrast = 1.0 # 0.1 to 3.0
|
|
|
|
# Marker looping state
|
|
self.looping_between_markers = False
|
|
|
|
# Display offset for panning when zoomed
|
|
self.display_offset = [0, 0]
|
|
|
|
# Fullscreen state
|
|
self.is_fullscreen = False
|
|
|
|
# Progress bar state
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_progress = 0.0 # 0.0 to 1.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = ""
|
|
self.progress_bar_fps = 0.0 # Current rendering FPS
|
|
|
|
# Feedback message state
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
self.feedback_message_duration = 0.2 # seconds to show message
|
|
|
|
# Crop adjustment settings
|
|
self.crop_size_step = self.CROP_SIZE_STEP
|
|
|
|
# Render thread management
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
self.render_progress_queue = queue.Queue()
|
|
self.ffmpeg_process = None # Track FFmpeg process for cancellation
|
|
|
|
# Display optimization - track when redraw is needed
|
|
self.display_needs_update = True
|
|
self.last_display_state = None
|
|
|
|
# Cached transformations for performance
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
# Motion tracking state
|
|
self.tracking_points = {} # {frame_number: [(x, y), ...]} in original frame coords
|
|
self.tracking_enabled = False
|
|
|
|
# Feature tracking system
|
|
self.feature_tracker = FeatureTracker()
|
|
|
|
# Initialize selective feature extraction/deletion
|
|
self.selective_feature_extraction_start = None
|
|
self.selective_feature_extraction_rect = None
|
|
self.selective_feature_deletion_start = None
|
|
self.selective_feature_deletion_rect = None
|
|
|
|
# Optical flow tracking
|
|
self.optical_flow_enabled = False
|
|
self.previous_frame_for_flow = None
|
|
|
|
# Template matching tracking
|
|
self.template_matching_enabled = False
|
|
self.tracking_template = None
|
|
self.template_region = None # (x, y, w, h) in rotated frame coordinates
|
|
self.template_selection_start = None
|
|
self.template_selection_rect = None
|
|
|
|
# Project view mode
|
|
self.project_view_mode = False
|
|
self.project_view = None
|
|
|
|
# Initialize with first video
|
|
self._load_video(self.video_files[0])
|
|
|
|
# Load saved state after all attributes are initialized
|
|
self.load_state()
|
|
|
|
def _get_state_file_path(self) -> Path:
|
|
"""Get the state file path for the current media file"""
|
|
if not hasattr(self, 'video_path') or not self.video_path:
|
|
print("DEBUG: No video_path available for state file")
|
|
return None
|
|
state_path = self.video_path.with_suffix('.json')
|
|
print(f"DEBUG: State file path would be: {state_path}")
|
|
return state_path
|
|
|
|
def save_state(self):
|
|
"""Save current editor state to JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
|
|
try:
|
|
state = {
|
|
'timestamp': time.time(),
|
|
'current_frame': getattr(self, 'current_frame', 0),
|
|
'crop_rect': self.crop_rect,
|
|
'zoom_factor': self.zoom_factor,
|
|
'zoom_center': self.zoom_center,
|
|
'rotation_angle': self.rotation_angle,
|
|
'brightness': self.brightness,
|
|
'contrast': self.contrast,
|
|
'cut_start_frame': self.cut_start_frame,
|
|
'cut_end_frame': self.cut_end_frame,
|
|
'looping_between_markers': self.looping_between_markers,
|
|
'display_offset': self.display_offset,
|
|
'playback_speed': getattr(self, 'playback_speed', 1.0),
|
|
'seek_multiplier': getattr(self, 'seek_multiplier', 1.0),
|
|
'is_playing': getattr(self, 'is_playing', False),
|
|
'tracking_enabled': self.tracking_enabled,
|
|
'tracking_points': {str(k): v for k, v in self.tracking_points.items()},
|
|
'feature_tracker': self.feature_tracker.get_state_dict(),
|
|
'template_matching_enabled': self.template_matching_enabled,
|
|
'template_region': self.template_region
|
|
}
|
|
|
|
with open(state_file, 'w') as f:
|
|
json.dump(state, f, indent=2)
|
|
print(f"State saved to {state_file}")
|
|
|
|
# Refresh project view progress data if project view is active
|
|
if self.project_view_mode and self.project_view:
|
|
self.project_view.refresh_progress_data()
|
|
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error saving state: {e}")
|
|
return False
|
|
|
|
def load_state(self) -> bool:
|
|
"""Load editor state from JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
if not state_file.exists():
|
|
print(f"State file does not exist: {state_file}")
|
|
return False
|
|
|
|
print(f"Loading state from: {state_file}")
|
|
try:
|
|
with open(state_file, 'r') as f:
|
|
state = json.load(f)
|
|
|
|
print(f"State file contents: {state}")
|
|
|
|
# Restore state values
|
|
if 'current_frame' in state:
|
|
self.current_frame = state['current_frame']
|
|
print(f"Loaded current_frame: {self.current_frame}")
|
|
if 'crop_rect' in state and state['crop_rect'] is not None:
|
|
self.crop_rect = tuple(state['crop_rect'])
|
|
print(f"Loaded crop_rect: {self.crop_rect}")
|
|
if 'zoom_factor' in state:
|
|
self.zoom_factor = state['zoom_factor']
|
|
print(f"Loaded zoom_factor: {self.zoom_factor}")
|
|
if 'zoom_center' in state and state['zoom_center'] is not None:
|
|
self.zoom_center = tuple(state['zoom_center'])
|
|
print(f"Loaded zoom_center: {self.zoom_center}")
|
|
if 'rotation_angle' in state:
|
|
self.rotation_angle = state['rotation_angle']
|
|
print(f"Loaded rotation_angle: {self.rotation_angle}")
|
|
if 'brightness' in state:
|
|
self.brightness = state['brightness']
|
|
print(f"Loaded brightness: {self.brightness}")
|
|
if 'contrast' in state:
|
|
self.contrast = state['contrast']
|
|
print(f"Loaded contrast: {self.contrast}")
|
|
if 'cut_start_frame' in state:
|
|
self.cut_start_frame = state['cut_start_frame']
|
|
print(f"Loaded cut_start_frame: {self.cut_start_frame}")
|
|
if 'cut_end_frame' in state:
|
|
self.cut_end_frame = state['cut_end_frame']
|
|
print(f"Loaded cut_end_frame: {self.cut_end_frame}")
|
|
if 'looping_between_markers' in state:
|
|
self.looping_between_markers = state['looping_between_markers']
|
|
print(f"Loaded looping_between_markers: {self.looping_between_markers}")
|
|
if 'display_offset' in state:
|
|
self.display_offset = state['display_offset']
|
|
print(f"Loaded display_offset: {self.display_offset}")
|
|
if 'playback_speed' in state:
|
|
self.playback_speed = state['playback_speed']
|
|
print(f"Loaded playback_speed: {self.playback_speed}")
|
|
if 'seek_multiplier' in state:
|
|
self.seek_multiplier = state['seek_multiplier']
|
|
print(f"Loaded seek_multiplier: {self.seek_multiplier}")
|
|
if 'is_playing' in state:
|
|
self.is_playing = state['is_playing']
|
|
print(f"Loaded is_playing: {self.is_playing}")
|
|
if 'tracking_enabled' in state:
|
|
self.tracking_enabled = state['tracking_enabled']
|
|
print(f"Loaded tracking_enabled: {self.tracking_enabled}")
|
|
if 'tracking_points' in state and isinstance(state['tracking_points'], dict):
|
|
self.tracking_points = {int(k): v for k, v in state['tracking_points'].items()}
|
|
print(f"Loaded tracking_points: {sum(len(v) for v in self.tracking_points.values())} points")
|
|
|
|
# Load feature tracker state
|
|
if 'feature_tracker' in state:
|
|
self.feature_tracker.load_state_dict(state['feature_tracker'])
|
|
print(f"Loaded feature tracker state")
|
|
|
|
# Load template matching state
|
|
if 'template_matching_enabled' in state:
|
|
self.template_matching_enabled = state['template_matching_enabled']
|
|
if 'template_region' in state and state['template_region'] is not None:
|
|
self.template_region = state['template_region']
|
|
# Recreate template from region when needed
|
|
self.tracking_template = None # Will be recreated on first use
|
|
|
|
# Validate cut markers against current video length
|
|
if self.cut_start_frame is not None and self.cut_start_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_start_frame {self.cut_start_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_start_frame = None
|
|
if self.cut_end_frame is not None and self.cut_end_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_end_frame {self.cut_end_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_end_frame = None
|
|
|
|
# Calculate and show marker positions on timeline
|
|
if self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
start_progress = self.cut_start_frame / max(1, self.total_frames - 1)
|
|
end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
print(f"Markers will be drawn at: Start {start_progress:.4f} ({self.cut_start_frame}/{self.total_frames}), End {end_progress:.4f} ({self.cut_end_frame}/{self.total_frames})")
|
|
|
|
# Validate and clamp values
|
|
self.current_frame = max(0, min(self.current_frame, getattr(self, 'total_frames', 1) - 1))
|
|
self.zoom_factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, self.zoom_factor))
|
|
self.brightness = max(-100, min(100, self.brightness))
|
|
self.contrast = max(0.1, min(3.0, self.contrast))
|
|
self.playback_speed = max(self.MIN_PLAYBACK_SPEED, min(self.MAX_PLAYBACK_SPEED, self.playback_speed))
|
|
self.seek_multiplier = max(self.MIN_SEEK_MULTIPLIER, min(self.MAX_SEEK_MULTIPLIER, self.seek_multiplier))
|
|
|
|
# Apply loaded settings
|
|
self.clear_transformation_cache()
|
|
self.load_current_frame()
|
|
|
|
print("Successfully loaded and applied all settings from state file")
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error loading state: {e}")
|
|
return False
|
|
|
|
def _is_video_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported video format"""
|
|
return file_path.suffix.lower() in self.VIDEO_EXTENSIONS
|
|
|
|
def _is_image_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported image format"""
|
|
return file_path.suffix.lower() in self.IMAGE_EXTENSIONS
|
|
|
|
def _is_media_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported media format (video or image)"""
|
|
return self._is_video_file(file_path) or self._is_image_file(file_path)
|
|
|
|
|
|
def _get_next_screenshot_filename(self, video_path: Path) -> str:
|
|
"""Generate the next available screenshot filename: video_frame_00001.jpg, video_frame_00002.jpg, etc."""
|
|
directory = video_path.parent
|
|
base_name = video_path.stem
|
|
|
|
# Pattern to match existing screenshot files: video_frame_00001.jpg, video_frame_00002.jpg, etc.
|
|
pattern = re.compile(rf"^{re.escape(base_name)}_frame_(\d{{5}})\.(jpg|jpeg|png)$")
|
|
|
|
existing_numbers = set()
|
|
for file_path in directory.iterdir():
|
|
if file_path.is_file():
|
|
match = pattern.match(file_path.name)
|
|
if match:
|
|
existing_numbers.add(int(match.group(1)))
|
|
|
|
# Find the next available number starting from 1
|
|
next_number = 1
|
|
while next_number in existing_numbers:
|
|
next_number += 1
|
|
|
|
return f"{base_name}_frame_{next_number:05d}.jpg"
|
|
|
|
def save_current_frame(self):
|
|
"""Save the current frame as a screenshot"""
|
|
if self.current_display_frame is None:
|
|
print("No frame to save")
|
|
return False
|
|
|
|
# Generate the next available screenshot filename
|
|
screenshot_name = self._get_next_screenshot_filename(self.video_path)
|
|
screenshot_path = self.video_path.parent / screenshot_name
|
|
|
|
# Apply current transformations (crop, zoom, rotation, brightness/contrast) to the frame
|
|
processed_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame.copy())
|
|
|
|
if processed_frame is not None:
|
|
# Save the processed frame with high quality settings
|
|
# Use JPEG quality 95 (0-100, where 100 is highest quality)
|
|
success = cv2.imwrite(str(screenshot_path), processed_frame, [cv2.IMWRITE_JPEG_QUALITY, 95])
|
|
if success:
|
|
print(f"Screenshot saved: {screenshot_name}")
|
|
self.show_feedback_message(f"Screenshot saved: {screenshot_name}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save screenshot to {screenshot_path}")
|
|
self.show_feedback_message("Error: Could not save screenshot")
|
|
return False
|
|
else:
|
|
print("Error: Could not process frame for screenshot")
|
|
self.show_feedback_message("Error: Could not process frame")
|
|
return False
|
|
|
|
def _get_media_files_from_directory(self, directory: Path) -> List[Path]:
|
|
"""Get all media files (video and image) from a directory, sorted by name"""
|
|
media_files = set()
|
|
for file_path in directory.iterdir():
|
|
if (
|
|
file_path.is_file()
|
|
and self._is_media_file(file_path)
|
|
):
|
|
media_files.add(file_path)
|
|
|
|
# Pattern to match edited files: basename_edited_001.ext, basename_edited_002.ext, etc.
|
|
edited_pattern = re.compile(r"^(.+)_edited_\d{3}$")
|
|
|
|
edited_base_names = set()
|
|
for file_path in media_files:
|
|
match = edited_pattern.match(file_path.stem)
|
|
if match:
|
|
edited_base_names.add(match.group(1))
|
|
|
|
non_edited_media = set()
|
|
for file_path in media_files:
|
|
# Skip if this is an edited file
|
|
if edited_pattern.match(file_path.stem):
|
|
continue
|
|
|
|
# Skip if there's already an edited version of this file
|
|
if file_path.stem in edited_base_names:
|
|
continue
|
|
|
|
non_edited_media.add(file_path)
|
|
|
|
return sorted(non_edited_media)
|
|
def _load_video(self, media_path: Path):
|
|
"""Load a media file (video or image) and initialize properties"""
|
|
if hasattr(self, "cap") and self.cap:
|
|
self.cap.release()
|
|
|
|
|
|
self.video_path = media_path
|
|
self.is_image_mode = self._is_image_file(media_path)
|
|
|
|
if self.is_image_mode:
|
|
# Load static image with UTF-8 support
|
|
self.static_image = load_image_utf8(media_path)
|
|
|
|
# Set up image properties to mimic video interface
|
|
self.frame_height, self.frame_width = self.static_image.shape[:2]
|
|
self.total_frames = 1
|
|
self.fps = 30 # Dummy FPS for image mode
|
|
self.cap = None
|
|
|
|
print(f"Loaded image: {self.video_path.name}")
|
|
print(f" Resolution: {self.frame_width}x{self.frame_height}")
|
|
else:
|
|
# Try different backends for better performance
|
|
# Order of preference: FFmpeg (best for video files), DirectShow (cameras), any available
|
|
backends_to_try = []
|
|
if hasattr(cv2, 'CAP_FFMPEG'): # FFmpeg - best for video files
|
|
backends_to_try.append(cv2.CAP_FFMPEG)
|
|
if hasattr(cv2, 'CAP_DSHOW'): # DirectShow - usually for cameras
|
|
backends_to_try.append(cv2.CAP_DSHOW)
|
|
backends_to_try.append(cv2.CAP_ANY) # Fallback
|
|
|
|
self.cap = None
|
|
for backend in backends_to_try:
|
|
try:
|
|
self.cap = Cv2BufferedCap(self.video_path, backend)
|
|
if self.cap.isOpened():
|
|
break
|
|
except Exception:
|
|
continue
|
|
|
|
if not self.cap or not self.cap.isOpened():
|
|
raise ValueError(f"Could not open video file: {media_path}")
|
|
|
|
# Video properties from buffered cap
|
|
self.total_frames = self.cap.total_frames
|
|
self.fps = self.cap.fps
|
|
self.frame_width = self.cap.frame_width
|
|
self.frame_height = self.cap.frame_height
|
|
|
|
# Get codec information for debugging
|
|
fourcc = int(self.cap.cap.get(cv2.CAP_PROP_FOURCC))
|
|
codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
|
|
|
|
# Get backend information
|
|
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
|
|
|
|
print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})")
|
|
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
|
|
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
|
|
|
|
# Performance warning for known problematic cases
|
|
if codec in ['H264', 'H.264', 'AVC1', 'avc1'] and self.total_frames > 10000:
|
|
print(" Warning: Large H.264 video detected - seeking may be slow")
|
|
if self.frame_width * self.frame_height > 1920 * 1080:
|
|
print(" Warning: High resolution video - decoding may be slow")
|
|
if self.fps > 60:
|
|
print(" Warning: High framerate video - may impact playback smoothness")
|
|
|
|
# Set default values for video-specific properties
|
|
self.current_frame = 0
|
|
self.is_playing = False if self.is_image_mode else False # Images start paused
|
|
self.playback_speed = 1.0
|
|
self.seek_multiplier = 1.0
|
|
self.cut_start_frame = None
|
|
self.cut_end_frame = None
|
|
|
|
# Always reset these regardless of state
|
|
self.current_display_frame = None
|
|
|
|
def switch_to_video(self, index: int):
|
|
"""Switch to a specific video by index"""
|
|
if 0 <= index < len(self.video_files):
|
|
self.current_video_index = index
|
|
self._load_video(self.video_files[index])
|
|
self.load_current_frame()
|
|
|
|
def next_video(self):
|
|
"""Switch to the next video"""
|
|
self.save_state() # Save current video state before switching
|
|
next_index = (self.current_video_index + 1) % len(self.video_files)
|
|
self.switch_to_video(next_index)
|
|
|
|
def previous_video(self):
|
|
"""Switch to the previous video"""
|
|
self.save_state() # Save current video state before switching
|
|
prev_index = (self.current_video_index - 1) % len(self.video_files)
|
|
self.switch_to_video(prev_index)
|
|
|
|
def load_current_frame(self) -> bool:
|
|
"""Load the current frame into display cache"""
|
|
if self.is_image_mode:
|
|
# For images, just copy the static image
|
|
self.current_display_frame = self.static_image.copy()
|
|
return True
|
|
else:
|
|
# Use buffered cap to get frame
|
|
try:
|
|
self.current_display_frame = self.cap.get_frame(self.current_frame)
|
|
return True
|
|
except Exception as e:
|
|
print(f"Failed to load frame {self.current_frame}: {e}")
|
|
return False
|
|
|
|
def calculate_frame_delay(self) -> int:
|
|
"""Calculate frame delay in milliseconds based on playback speed"""
|
|
# Round to 2 decimals to handle floating point precision issues
|
|
speed = round(self.playback_speed, 2)
|
|
print(f"Playback speed: {speed}")
|
|
if speed >= 1.0:
|
|
# Speed >= 1: maximum FPS (no delay)
|
|
return 1
|
|
else:
|
|
# Speed < 1: scale FPS based on speed
|
|
# Formula: fps = TARGET_FPS * speed, so delay = 1000 / fps
|
|
target_fps = self.TARGET_FPS * speed
|
|
delay_ms = int(1000 / target_fps)
|
|
return max(1, delay_ms)
|
|
|
|
def seek_video(self, frames_delta: int):
|
|
"""Seek video by specified number of frames"""
|
|
target_frame = max(
|
|
0, min(self.current_frame + frames_delta, self.total_frames - 1)
|
|
)
|
|
self.current_frame = target_frame
|
|
self.load_current_frame()
|
|
self.display_needs_update = True
|
|
|
|
|
|
def seek_video_with_modifier(
|
|
self, direction: int, shift_pressed: bool, ctrl_pressed: bool
|
|
):
|
|
"""Seek video with different frame counts based on modifiers and seek multiplier"""
|
|
if ctrl_pressed:
|
|
base_frames = self.SEEK_FRAMES_CTRL
|
|
elif shift_pressed:
|
|
base_frames = self.SEEK_FRAMES_SHIFT
|
|
else:
|
|
base_frames = self.SEEK_FRAMES_DEFAULT
|
|
|
|
# Apply seek multiplier to the base frame count
|
|
frames = direction * int(base_frames * self.seek_multiplier)
|
|
self.seek_video(frames)
|
|
|
|
def seek_video_exact_frame(self, direction: int):
|
|
"""Seek video by exactly 1 frame, unaffected by seek multiplier"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
frames = direction # Always exactly 1 frame
|
|
self.seek_video(frames)
|
|
|
|
def start_auto_repeat_seek(self, direction: int, shift_pressed: bool, ctrl_pressed: bool):
|
|
"""Start auto-repeat seeking"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
self.auto_repeat_active = True
|
|
self.auto_repeat_direction = direction
|
|
self.auto_repeat_shift = shift_pressed
|
|
self.auto_repeat_ctrl = ctrl_pressed
|
|
|
|
# Initialize last_display_update to prevent immediate auto-repeat
|
|
self.last_display_update = time.time()
|
|
|
|
self.seek_video_with_modifier(direction, shift_pressed, ctrl_pressed)
|
|
|
|
def stop_auto_repeat_seek(self):
|
|
"""Stop auto-repeat seeking"""
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
|
|
def update_auto_repeat_seek(self):
|
|
"""Update auto-repeat seeking"""
|
|
if not self.auto_repeat_active or self.is_image_mode:
|
|
return
|
|
|
|
current_time = time.time()
|
|
|
|
if current_time - self.last_display_update >= self.AUTO_REPEAT_DISPLAY_RATE:
|
|
self.seek_video_with_modifier(
|
|
self.auto_repeat_direction,
|
|
self.auto_repeat_shift,
|
|
self.auto_repeat_ctrl
|
|
)
|
|
self.last_display_update = current_time
|
|
|
|
|
|
def seek_to_frame(self, frame_number: int):
|
|
"""Seek to specific frame"""
|
|
old_frame = self.current_frame
|
|
self.current_frame = max(0, min(frame_number, self.total_frames - 1))
|
|
self.load_current_frame()
|
|
|
|
# Only log when we actually change frames
|
|
if old_frame != self.current_frame:
|
|
print(f"DEBUG: === LOADED NEW FRAME {self.current_frame} ===")
|
|
print(f"DEBUG: Features available for frames: {sorted(self.feature_tracker.features.keys())}")
|
|
if self.current_frame in self.feature_tracker.features:
|
|
feature_count = len(self.feature_tracker.features[self.current_frame]['positions'])
|
|
print(f"DEBUG: Frame {self.current_frame} has {feature_count} features")
|
|
else:
|
|
print(f"DEBUG: Frame {self.current_frame} has NO features")
|
|
|
|
# Auto-extract features if feature tracking is enabled and auto-tracking is on
|
|
print(f"DEBUG: seek_to_frame {frame_number}: is_image_mode={self.is_image_mode}, tracking_enabled={self.feature_tracker.tracking_enabled}, auto_tracking={self.feature_tracker.auto_tracking}, display_frame={self.current_display_frame is not None}")
|
|
|
|
if (not self.is_image_mode and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.feature_tracker.auto_tracking and
|
|
self.current_display_frame is not None):
|
|
|
|
print(f"DEBUG: Auto-tracking conditions met for frame {self.current_frame}")
|
|
# Only extract if we don't already have features for this frame
|
|
if self.current_frame not in self.feature_tracker.features:
|
|
print(f"DEBUG: Extracting features for frame {self.current_frame}")
|
|
# Extract features from the transformed frame (what user sees)
|
|
# This handles all transformations (crop, zoom, rotation) correctly
|
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
if display_frame is not None:
|
|
# Map coordinates from transformed frame to rotated frame coordinates
|
|
# Use the existing coordinate transformation system
|
|
def coord_mapper(x, y):
|
|
# Map from transformed frame coordinates to screen coordinates
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Convert to screen coordinates
|
|
screen_x = x + start_x
|
|
screen_y = y + start_y
|
|
|
|
# Use the existing coordinate transformation system
|
|
return self._map_screen_to_rotated(screen_x, screen_y)
|
|
|
|
self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper)
|
|
else:
|
|
print(f"DEBUG: Frame {self.current_frame} already has features, skipping")
|
|
|
|
# Optical flow tracking - track features from previous frame
|
|
if (not self.is_image_mode and
|
|
self.optical_flow_enabled and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.previous_frame_for_flow is not None and
|
|
self.current_display_frame is not None):
|
|
|
|
self._track_with_optical_flow()
|
|
|
|
|
|
# Store current frame for next optical flow iteration
|
|
if not self.is_image_mode and self.current_display_frame is not None:
|
|
self.previous_frame_for_flow = self.current_display_frame.copy()
|
|
|
|
def jump_to_previous_marker(self):
|
|
"""Jump to the previous tracking marker (frame with tracking points)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v)
|
|
if not tracking_frames:
|
|
print("DEBUG: No tracking markers; prev jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
candidates = [f for f in tracking_frames if f < current]
|
|
if candidates:
|
|
target = candidates[-1]
|
|
print(f"DEBUG: Jump prev tracking from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
else:
|
|
target = tracking_frames[0]
|
|
print(f"DEBUG: Jump prev tracking to first marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
|
|
def jump_to_next_marker(self):
|
|
"""Jump to the next tracking marker (frame with tracking points)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v)
|
|
if not tracking_frames:
|
|
print("DEBUG: No tracking markers; next jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
for f in tracking_frames:
|
|
if f > current:
|
|
print(f"DEBUG: Jump next tracking from {current} -> {f}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(f)
|
|
return
|
|
target = tracking_frames[-1]
|
|
print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
|
|
def _get_previous_tracking_point(self):
|
|
"""Get the tracking point from the previous frame that has tracking points."""
|
|
if self.is_image_mode or not self.tracking_points:
|
|
return None
|
|
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v and 0 <= k < self.total_frames)
|
|
if not tracking_frames:
|
|
return None
|
|
|
|
# Find the last frame with tracking points that's before current frame
|
|
prev_frames = [f for f in tracking_frames if f < self.current_frame]
|
|
if not prev_frames:
|
|
return None
|
|
|
|
prev_frame = max(prev_frames)
|
|
return prev_frame, self.tracking_points[prev_frame]
|
|
|
|
def _get_next_tracking_point(self):
|
|
"""Get the tracking point from the next frame that has tracking points."""
|
|
if self.is_image_mode or not self.tracking_points:
|
|
return None
|
|
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v and 0 <= k < self.total_frames)
|
|
if not tracking_frames:
|
|
return None
|
|
|
|
# Find the first frame with tracking points that's after current frame
|
|
next_frames = [f for f in tracking_frames if f > self.current_frame]
|
|
if not next_frames:
|
|
return None
|
|
|
|
next_frame = min(next_frames)
|
|
return next_frame, self.tracking_points[next_frame]
|
|
|
|
def _point_to_line_distance_and_foot(self, px, py, x1, y1, x2, y2):
|
|
"""Calculate distance from point (px, py) to infinite line (x1, y1) to (x2, y2) and return foot of perpendicular"""
|
|
# Convert line to general form: Ax + By + C = 0
|
|
# (y2 - y1)(x - x1) - (x2 - x1)(y - y1) = 0
|
|
A = y2 - y1
|
|
B = -(x2 - x1) # Note the negative sign
|
|
C = -(A * x1 + B * y1)
|
|
|
|
# Calculate distance: d = |Ax + By + C| / sqrt(A^2 + B^2)
|
|
denominator = (A * A + B * B) ** 0.5
|
|
if denominator == 0:
|
|
# Line is actually a point
|
|
distance = ((px - x1) ** 2 + (py - y1) ** 2) ** 0.5
|
|
return distance, (x1, y1)
|
|
|
|
distance = abs(A * px + B * py + C) / denominator
|
|
|
|
# Calculate foot of perpendicular: (xf, yf)
|
|
# xf = xu - A(Axu + Byu + C)/(A^2 + B^2)
|
|
# yf = yu - B(Axu + Byu + C)/(A^2 + B^2)
|
|
numerator = A * px + B * py + C
|
|
xf = px - A * numerator / (A * A + B * B)
|
|
yf = py - B * numerator / (A * A + B * B)
|
|
|
|
return distance, (xf, yf)
|
|
|
|
def advance_frame(self) -> bool:
|
|
"""Advance to next frame - handles playback speed and marker looping"""
|
|
if not self.is_playing:
|
|
return True
|
|
|
|
# Always advance by 1 frame - speed is controlled by delay timing
|
|
new_frame = self.current_frame + 1
|
|
|
|
# Handle marker looping bounds
|
|
if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
if new_frame >= self.cut_end_frame:
|
|
# Loop back to start marker
|
|
new_frame = self.cut_start_frame
|
|
elif new_frame >= self.total_frames:
|
|
# Loop to beginning
|
|
new_frame = 0
|
|
|
|
# Update current frame and load it
|
|
self.current_frame = new_frame
|
|
return self.load_current_frame()
|
|
|
|
def apply_crop_zoom_and_rotation(self, frame):
|
|
"""Apply current crop, zoom, rotation, and brightness/contrast settings to frame"""
|
|
if frame is None:
|
|
return None
|
|
|
|
# Create a hash of the transformation parameters for caching
|
|
transform_hash = hash((
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
tuple(self.display_offset)
|
|
))
|
|
|
|
# Check if we can use cached transformation during auto-repeat seeking
|
|
if (self.auto_repeat_active and
|
|
self.cached_transformed_frame is not None and
|
|
self.cached_frame_number == self.current_frame and
|
|
self.cached_transform_hash == transform_hash):
|
|
return self.cached_transformed_frame.copy()
|
|
|
|
# Work in-place when possible to avoid unnecessary copying
|
|
processed_frame = frame
|
|
|
|
# Apply brightness/contrast first (to original frame for best quality)
|
|
processed_frame = self.apply_brightness_contrast(processed_frame)
|
|
|
|
# Apply rotation first so crop_rect is in ROTATED frame coordinates
|
|
if self.rotation_angle != 0:
|
|
processed_frame = self.apply_rotation(processed_frame)
|
|
|
|
# Apply crop (interpreted in rotated frame coordinates) using EFFECTIVE rect
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
if eff_w > 0 and eff_h > 0:
|
|
eff_x = max(0, min(eff_x, processed_frame.shape[1] - 1))
|
|
eff_y = max(0, min(eff_y, processed_frame.shape[0] - 1))
|
|
eff_w = min(eff_w, processed_frame.shape[1] - eff_x)
|
|
eff_h = min(eff_h, processed_frame.shape[0] - eff_y)
|
|
processed_frame = processed_frame[eff_y : eff_y + eff_h, eff_x : eff_x + eff_w]
|
|
|
|
# Apply zoom
|
|
if self.zoom_factor != 1.0:
|
|
height, width = processed_frame.shape[:2]
|
|
new_width = int(width * self.zoom_factor)
|
|
new_height = int(height * self.zoom_factor)
|
|
processed_frame = cv2.resize(
|
|
processed_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
# Handle zoom center and display offset
|
|
if new_width > self.window_width or new_height > self.window_height:
|
|
# Calculate crop from zoomed image to fit window
|
|
start_x = max(0, self.display_offset[0])
|
|
start_y = max(0, self.display_offset[1])
|
|
end_x = min(new_width, start_x + self.window_width)
|
|
end_y = min(new_height, start_y + self.window_height)
|
|
processed_frame = processed_frame[start_y:end_y, start_x:end_x]
|
|
|
|
# Cache the result for auto-repeat seeking performance
|
|
if self.auto_repeat_active:
|
|
self.cached_transformed_frame = processed_frame.copy()
|
|
self.cached_frame_number = self.current_frame
|
|
self.cached_transform_hash = transform_hash
|
|
|
|
return processed_frame
|
|
|
|
# --- Motion tracking helpers ---
|
|
def _get_effective_crop_rect_for_frame(self, frame_number):
|
|
"""Return EFFECTIVE crop_rect in ROTATED frame coords for this frame (applies tracking follow)."""
|
|
# Rotated base dims
|
|
if self.rotation_angle in (90, 270):
|
|
rot_w, rot_h = self.frame_height, self.frame_width
|
|
else:
|
|
rot_w, rot_h = self.frame_width, self.frame_height
|
|
# Default full-frame
|
|
if not self.crop_rect:
|
|
return (0, 0, rot_w, rot_h)
|
|
x, y, w, h = map(int, self.crop_rect)
|
|
# Tracking follow: center crop on interpolated rotated position
|
|
if self.tracking_enabled:
|
|
pos = self._get_interpolated_tracking_position(frame_number)
|
|
if pos:
|
|
cx, cy = pos
|
|
x = int(round(cx - w / 2))
|
|
y = int(round(cy - h / 2))
|
|
# Clamp in rotated space
|
|
x = max(0, min(x, rot_w - 1))
|
|
y = max(0, min(y, rot_h - 1))
|
|
w = min(w, rot_w - x)
|
|
h = min(h, rot_h - y)
|
|
return (x, y, w, h)
|
|
|
|
|
|
def _get_interpolated_tracking_position(self, frame_number):
|
|
"""Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None."""
|
|
# First try template matching if enabled (much better than optical flow)
|
|
if self.template_matching_enabled and self.tracking_template is not None:
|
|
if self.current_display_frame is not None:
|
|
# Use only the cropped region for much faster template matching
|
|
if self.crop_rect:
|
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
|
# Extract only the cropped region
|
|
cropped_frame = self.current_display_frame[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
|
|
if cropped_frame is not None and cropped_frame.size > 0:
|
|
# Track template in cropped frame (much faster!)
|
|
result = self.track_template(cropped_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
|
|
|
|
# Map from cropped frame coordinates to rotated frame coordinates
|
|
# Add crop offset back
|
|
rot_x = center_x + crop_x
|
|
rot_y = center_y + crop_y
|
|
|
|
return (rot_x, rot_y)
|
|
else:
|
|
# No crop - use full frame
|
|
raw_frame = self.current_display_frame.copy()
|
|
if raw_frame is not None:
|
|
result = self.track_template(raw_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
|
|
|
|
# Map from raw frame coordinates to rotated frame coordinates
|
|
if self.rotation_angle == 90:
|
|
rot_x = self.frame_height - center_y
|
|
rot_y = center_x
|
|
elif self.rotation_angle == 180:
|
|
rot_x = self.frame_width - center_x
|
|
rot_y = self.frame_height - center_y
|
|
elif self.rotation_angle == 270:
|
|
rot_x = center_y
|
|
rot_y = self.frame_width - center_x
|
|
else:
|
|
rot_x, rot_y = center_x, center_y
|
|
|
|
return (rot_x, rot_y)
|
|
|
|
# Fall back to feature tracking if enabled - but use smooth interpolation instead of averaging
|
|
if self.feature_tracker.tracking_enabled:
|
|
# Get the nearest frames with features for smooth interpolation
|
|
feature_frames = sorted(self.feature_tracker.features.keys())
|
|
if feature_frames:
|
|
# Find the two nearest frames for interpolation
|
|
if frame_number <= feature_frames[0]:
|
|
# Before first feature frame - use first frame
|
|
return self._get_feature_center(feature_frames[0])
|
|
elif frame_number >= feature_frames[-1]:
|
|
# After last feature frame - use last frame
|
|
return self._get_feature_center(feature_frames[-1])
|
|
else:
|
|
# Between two feature frames - interpolate smoothly
|
|
for i in range(len(feature_frames) - 1):
|
|
if feature_frames[i] <= frame_number <= feature_frames[i + 1]:
|
|
return self._interpolate_feature_positions(
|
|
feature_frames[i], feature_frames[i + 1], frame_number
|
|
)
|
|
|
|
# Fall back to manual tracking points
|
|
if not self.tracking_points:
|
|
return None
|
|
frames = sorted(self.tracking_points.keys())
|
|
if not frames:
|
|
return None
|
|
if frame_number in self.tracking_points and self.tracking_points[frame_number]:
|
|
pts = self.tracking_points[frame_number]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts))
|
|
if frame_number < frames[0]:
|
|
pts = self.tracking_points[frames[0]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
if frame_number > frames[-1]:
|
|
pts = self.tracking_points[frames[-1]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
for i in range(len(frames) - 1):
|
|
f1, f2 = frames[i], frames[i + 1]
|
|
if f1 <= frame_number <= f2:
|
|
pts1 = self.tracking_points.get(f1) or []
|
|
pts2 = self.tracking_points.get(f2) or []
|
|
if not pts1 or not pts2:
|
|
continue
|
|
x1 = sum(p[0] for p in pts1) / len(pts1)
|
|
y1 = sum(p[1] for p in pts1) / len(pts1)
|
|
x2 = sum(p[0] for p in pts2) / len(pts2)
|
|
y2 = sum(p[1] for p in pts2) / len(pts2)
|
|
t = (frame_number - f1) / (f2 - f1) if f2 != f1 else 0.0
|
|
return (x1 + t * (x2 - x1), y1 + t * (y2 - y1))
|
|
return None
|
|
|
|
def _get_display_params(self):
|
|
"""Unified display transform parameters for current frame in rotated space."""
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
new_w = int(eff_w * self.zoom_factor)
|
|
new_h = int(eff_h * self.zoom_factor)
|
|
cropped_due_to_zoom = (self.zoom_factor != 1.0) and (new_w > self.window_width or new_h > self.window_height)
|
|
if cropped_due_to_zoom:
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
visible_w = min(new_w, self.window_width)
|
|
visible_h = min(new_h, self.window_height)
|
|
else:
|
|
offx = 0
|
|
offy = 0
|
|
visible_w = new_w
|
|
visible_h = new_h
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale_raw = min(self.window_width / max(1, visible_w), available_height / max(1, visible_h))
|
|
scale = scale_raw if scale_raw < 1.0 else 1.0
|
|
final_w = int(visible_w * scale)
|
|
final_h = int(visible_h * scale)
|
|
start_x = (self.window_width - final_w) // 2
|
|
start_y = (available_height - final_h) // 2
|
|
return {
|
|
'eff_x': eff_x, 'eff_y': eff_y, 'eff_w': eff_w, 'eff_h': eff_h,
|
|
'offx': offx, 'offy': offy,
|
|
'scale': scale,
|
|
'start_x': start_x, 'start_y': start_y,
|
|
'visible_w': visible_w, 'visible_h': visible_h,
|
|
'available_h': available_height
|
|
}
|
|
|
|
def _map_rotated_to_screen(self, rx, ry):
|
|
"""Map a point in ROTATED frame coords to canvas screen coords (post-crop)."""
|
|
# Subtract crop offset in rotated space (EFFECTIVE crop at current frame)
|
|
cx, cy, cw, ch = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
rx2 = rx - cx
|
|
ry2 = ry - cy
|
|
# Zoomed dimensions of cropped-rotated frame
|
|
new_w = int(cw * self.zoom_factor)
|
|
new_h = int(ch * self.zoom_factor)
|
|
cropped_due_to_zoom = (self.zoom_factor != 1.0) and (new_w > self.window_width or new_h > self.window_height)
|
|
if cropped_due_to_zoom:
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
else:
|
|
offx = 0
|
|
offy = 0
|
|
zx = rx2 * self.zoom_factor - offx
|
|
zy = ry2 * self.zoom_factor - offy
|
|
visible_w = new_w if not cropped_due_to_zoom else min(new_w, self.window_width)
|
|
visible_h = new_h if not cropped_due_to_zoom else min(new_h, self.window_height)
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale_raw = min(self.window_width / max(1, visible_w), available_height / max(1, visible_h))
|
|
scale_canvas = scale_raw if scale_raw < 1.0 else 1.0
|
|
final_w = int(visible_w * scale_canvas)
|
|
final_h = int(visible_h * scale_canvas)
|
|
start_x_canvas = (self.window_width - final_w) // 2
|
|
start_y_canvas = (available_height - final_h) // 2
|
|
sx = int(round(start_x_canvas + zx * scale_canvas))
|
|
sy = int(round(start_y_canvas + zy * scale_canvas))
|
|
return sx, sy
|
|
|
|
def _map_screen_to_rotated(self, sx, sy):
|
|
"""Map a point on canvas screen coords back to ROTATED frame coords (pre-crop)."""
|
|
frame_number = getattr(self, 'current_frame', 0)
|
|
angle = self.rotation_angle
|
|
# Use unified display params
|
|
params = self._get_display_params()
|
|
# Back to processed (zoomed+cropped) space
|
|
zx = (sx - params['start_x']) / max(1e-6, params['scale'])
|
|
zy = (sy - params['start_y']) / max(1e-6, params['scale'])
|
|
zx += params['offx']
|
|
zy += params['offy']
|
|
# Reverse zoom
|
|
rx = zx / max(1e-6, self.zoom_factor)
|
|
ry = zy / max(1e-6, self.zoom_factor)
|
|
# Unapply current EFFECTIVE crop to get PRE-crop rotated coords
|
|
rx = rx + params['eff_x']
|
|
ry = ry + params['eff_y']
|
|
return int(round(rx)), int(round(ry))
|
|
|
|
def clear_transformation_cache(self):
|
|
"""Clear the cached transformation to force recalculation"""
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
def _extract_features_from_region(self, screen_rect):
|
|
"""Extract features from a specific screen region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Extracting features from region ({x}, {y}, {w}, {h})")
|
|
|
|
# Map screen coordinates to rotated frame coordinates
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x + w, y + h)
|
|
|
|
# Get the region in rotated frame coordinates
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
|
|
# Extract features from this region of the original frame
|
|
if self.rotation_angle in (90, 270):
|
|
# For rotated frames, we need to map back to original frame coordinates
|
|
if self.rotation_angle == 90:
|
|
orig_x = top_r
|
|
orig_y = self.frame_height - right_r
|
|
orig_w = bottom_r - top_r
|
|
orig_h = right_r - left_r
|
|
else: # 270
|
|
orig_x = self.frame_width - bottom_r
|
|
orig_y = left_r
|
|
orig_w = bottom_r - top_r
|
|
orig_h = right_r - left_r
|
|
else:
|
|
orig_x, orig_y = left_r, top_r
|
|
orig_w, orig_h = right_r - left_r, bottom_r - top_r
|
|
|
|
# Extract features from this region
|
|
if (orig_x >= 0 and orig_y >= 0 and
|
|
orig_x + orig_w <= self.frame_width and
|
|
orig_y + orig_h <= self.frame_height):
|
|
|
|
if self.current_display_frame is not None:
|
|
region_frame = self.current_display_frame[orig_y:orig_y+orig_h, orig_x:orig_x+orig_w]
|
|
if region_frame is not None and region_frame.size > 0:
|
|
# Map coordinates from region to rotated frame coordinates
|
|
def coord_mapper(px, py):
|
|
# Map from region coordinates to rotated frame coordinates
|
|
if self.rotation_angle == 90:
|
|
rot_x = orig_x + py
|
|
rot_y = self.frame_height - (orig_y + px)
|
|
elif self.rotation_angle == 270:
|
|
rot_x = self.frame_width - (orig_y + py)
|
|
rot_y = orig_x + px
|
|
else:
|
|
rot_x = orig_x + px
|
|
rot_y = orig_y + py
|
|
return (int(rot_x), int(rot_y))
|
|
|
|
# Extract features and add them to existing features
|
|
success = self.feature_tracker.extract_features_from_region(region_frame, self.current_frame, coord_mapper)
|
|
if success:
|
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
self.show_feedback_message(f"Added features from selected region (total: {count})")
|
|
else:
|
|
self.show_feedback_message("Failed to extract features from region")
|
|
else:
|
|
self.show_feedback_message("Region too small")
|
|
else:
|
|
self.show_feedback_message("Region outside frame bounds")
|
|
|
|
def _delete_features_from_region(self, screen_rect):
|
|
"""Delete features from a specific screen region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Deleting features from region ({x}, {y}, {w}, {h})")
|
|
|
|
if self.current_frame not in self.feature_tracker.features:
|
|
self.show_feedback_message("No features to delete")
|
|
return
|
|
|
|
# Map screen coordinates to rotated frame coordinates
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x + w, y + h)
|
|
|
|
# Get the region in rotated frame coordinates
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
|
|
# Remove features within this region
|
|
features = self.feature_tracker.features[self.current_frame]
|
|
original_count = len(features['positions'])
|
|
|
|
# Filter out features within the region
|
|
filtered_positions = []
|
|
for fx, fy in features['positions']:
|
|
if not (left_r <= fx <= right_r and top_r <= fy <= bottom_r):
|
|
filtered_positions.append((fx, fy))
|
|
|
|
# Update the features
|
|
features['positions'] = filtered_positions
|
|
removed_count = original_count - len(filtered_positions)
|
|
|
|
if removed_count > 0:
|
|
self.show_feedback_message(f"Removed {removed_count} features from selected region")
|
|
self.save_state()
|
|
else:
|
|
self.show_feedback_message("No features found in selected region")
|
|
|
|
def _track_with_optical_flow(self):
|
|
"""Track features using optical flow from previous frame"""
|
|
try:
|
|
# Get previous frame features
|
|
prev_frame_number = self.current_frame - 1
|
|
if prev_frame_number not in self.feature_tracker.features:
|
|
print(f"DEBUG: No features on previous frame {prev_frame_number} for optical flow")
|
|
return
|
|
|
|
prev_features = self.feature_tracker.features[prev_frame_number]
|
|
prev_positions = np.array(prev_features['positions'], dtype=np.float32).reshape(-1, 1, 2)
|
|
|
|
if len(prev_positions) == 0:
|
|
print(f"DEBUG: No positions on previous frame {prev_frame_number} for optical flow")
|
|
return
|
|
|
|
print(f"DEBUG: Optical flow tracking from frame {prev_frame_number} to {self.current_frame}")
|
|
|
|
# Apply transformations to get the display frames
|
|
prev_display_frame = self.apply_crop_zoom_and_rotation(self.previous_frame_for_flow)
|
|
curr_display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
|
|
if prev_display_frame is None or curr_display_frame is None:
|
|
print("DEBUG: Could not get display frames for optical flow")
|
|
return
|
|
|
|
# Map previous positions to display frame coordinates
|
|
display_prev_positions = []
|
|
for px, py in prev_positions.reshape(-1, 2):
|
|
# Map from rotated frame coordinates to screen coordinates
|
|
sx, sy = self._map_rotated_to_screen(px, py)
|
|
|
|
# Map from screen coordinates to display frame coordinates
|
|
frame_height, frame_width = prev_display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
display_x = sx - start_x
|
|
display_y = sy - start_y
|
|
|
|
if 0 <= display_x < frame_width and 0 <= display_y < frame_height:
|
|
display_prev_positions.append([display_x, display_y])
|
|
|
|
if len(display_prev_positions) == 0:
|
|
print("DEBUG: No valid display positions for optical flow")
|
|
return
|
|
|
|
display_prev_positions = np.array(display_prev_positions, dtype=np.float32).reshape(-1, 1, 2)
|
|
print(f"DEBUG: Tracking {len(display_prev_positions)} points with optical flow")
|
|
|
|
# Track using optical flow
|
|
new_points, good_old, status = self.feature_tracker.track_features_optical_flow(
|
|
prev_display_frame, curr_display_frame, display_prev_positions
|
|
)
|
|
|
|
if new_points is not None and len(new_points) > 0:
|
|
print(f"DEBUG: Optical flow found {len(new_points)} tracked points")
|
|
|
|
# Map new positions back to rotated frame coordinates
|
|
mapped_positions = []
|
|
for point in new_points.reshape(-1, 2):
|
|
# Map from display frame coordinates to screen coordinates
|
|
frame_height, frame_width = curr_display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
screen_x = point[0] + start_x
|
|
screen_y = point[1] + start_y
|
|
|
|
# Map from screen coordinates to rotated frame coordinates
|
|
rx, ry = self._map_screen_to_rotated(screen_x, screen_y)
|
|
mapped_positions.append((int(rx), int(ry)))
|
|
|
|
# Store tracked features
|
|
self.feature_tracker.features[self.current_frame] = {
|
|
'keypoints': [], # Optical flow doesn't use keypoints
|
|
'descriptors': np.array([]), # Optical flow doesn't use descriptors
|
|
'positions': mapped_positions
|
|
}
|
|
|
|
print(f"Optical flow tracked {len(mapped_positions)} features to frame {self.current_frame}")
|
|
else:
|
|
print("DEBUG: Optical flow failed to track any points")
|
|
|
|
except Exception as e:
|
|
print(f"Error in optical flow tracking: {e}")
|
|
|
|
|
|
def _interpolate_features_between_frames(self, start_frame, end_frame):
|
|
"""Interpolate features between two frames using linear interpolation"""
|
|
try:
|
|
print(f"DEBUG: Starting interpolation between frame {start_frame} and {end_frame}")
|
|
|
|
if start_frame not in self.feature_tracker.features or end_frame not in self.feature_tracker.features:
|
|
print(f"DEBUG: Missing features on start_frame={start_frame} or end_frame={end_frame}")
|
|
return
|
|
|
|
start_features = self.feature_tracker.features[start_frame]['positions']
|
|
end_features = self.feature_tracker.features[end_frame]['positions']
|
|
|
|
print(f"DEBUG: Start frame {start_frame} has {len(start_features)} features")
|
|
print(f"DEBUG: End frame {end_frame} has {len(end_features)} features")
|
|
|
|
if len(start_features) != len(end_features):
|
|
print(f"DEBUG: Feature count mismatch between frames {start_frame} and {end_frame} ({len(start_features)} vs {len(end_features)})")
|
|
print(f"DEBUG: Using minimum count for interpolation")
|
|
# Use the minimum count to avoid index errors
|
|
min_count = min(len(start_features), len(end_features))
|
|
start_features = start_features[:min_count]
|
|
end_features = end_features[:min_count]
|
|
|
|
# Interpolate for all frames between start and end
|
|
frames_to_interpolate = []
|
|
for frame_num in range(start_frame + 1, end_frame):
|
|
if frame_num in self.feature_tracker.features:
|
|
print(f"DEBUG: Frame {frame_num} already has features, skipping")
|
|
continue # Skip if already has features
|
|
frames_to_interpolate.append(frame_num)
|
|
|
|
print(f"DEBUG: Will interpolate {len(frames_to_interpolate)} frames: {frames_to_interpolate}")
|
|
|
|
for frame_num in frames_to_interpolate:
|
|
# Linear interpolation
|
|
alpha = (frame_num - start_frame) / (end_frame - start_frame)
|
|
interpolated_positions = []
|
|
|
|
for i in range(len(start_features)):
|
|
start_x, start_y = start_features[i]
|
|
end_x, end_y = end_features[i]
|
|
|
|
interp_x = start_x + alpha * (end_x - start_x)
|
|
interp_y = start_y + alpha * (end_y - start_y)
|
|
|
|
interpolated_positions.append((int(interp_x), int(interp_y)))
|
|
|
|
# Store interpolated features
|
|
self.feature_tracker.features[frame_num] = {
|
|
'keypoints': [],
|
|
'descriptors': np.array([]),
|
|
'positions': interpolated_positions
|
|
}
|
|
|
|
print(f"DEBUG: Interpolated {len(interpolated_positions)} features for frame {frame_num}")
|
|
|
|
print(f"DEBUG: Finished interpolation between frame {start_frame} and {end_frame}")
|
|
|
|
except Exception as e:
|
|
print(f"Error interpolating features: {e}")
|
|
|
|
def _fill_all_gaps_with_interpolation(self):
|
|
"""Fill all gaps between existing features with linear interpolation"""
|
|
try:
|
|
print("=== FILLING ALL GAPS WITH INTERPOLATION ===")
|
|
print(f"DEBUG: Total features stored: {len(self.feature_tracker.features)}")
|
|
|
|
if not self.feature_tracker.features:
|
|
print("DEBUG: No features to interpolate between")
|
|
return
|
|
|
|
# Get all frames with features, sorted
|
|
frames_with_features = sorted(self.feature_tracker.features.keys())
|
|
print(f"DEBUG: Frames with features: {frames_with_features}")
|
|
|
|
if len(frames_with_features) < 2:
|
|
print("DEBUG: Need at least 2 frames with features to interpolate")
|
|
return
|
|
|
|
# Fill gaps between each pair of consecutive frames with features
|
|
for i in range(len(frames_with_features) - 1):
|
|
start_frame = frames_with_features[i]
|
|
end_frame = frames_with_features[i + 1]
|
|
|
|
print(f"DEBUG: Interpolating between frame {start_frame} and {end_frame}")
|
|
self._interpolate_features_between_frames(start_frame, end_frame)
|
|
|
|
print(f"DEBUG: After interpolation, total features stored: {len(self.feature_tracker.features)}")
|
|
print("=== FINISHED FILLING ALL GAPS ===")
|
|
|
|
except Exception as e:
|
|
print(f"Error filling all gaps: {e}")
|
|
|
|
def _get_feature_center(self, frame_number):
|
|
"""Get the center of features for a frame (smooth, not jarring)"""
|
|
if frame_number not in self.feature_tracker.features:
|
|
return None
|
|
|
|
positions = self.feature_tracker.features[frame_number]['positions']
|
|
if not positions:
|
|
return None
|
|
|
|
# Calculate center of mass (smoother than average)
|
|
center_x = sum(pos[0] for pos in positions) / len(positions)
|
|
center_y = sum(pos[1] for pos in positions) / len(positions)
|
|
|
|
return (center_x, center_y)
|
|
|
|
def _interpolate_feature_positions(self, start_frame, end_frame, target_frame):
|
|
"""Smoothly interpolate between feature centers of two frames"""
|
|
start_center = self._get_feature_center(start_frame)
|
|
end_center = self._get_feature_center(end_frame)
|
|
|
|
if not start_center or not end_center:
|
|
return None
|
|
|
|
# Linear interpolation between centers
|
|
alpha = (target_frame - start_frame) / (end_frame - start_frame)
|
|
|
|
interp_x = start_center[0] + alpha * (end_center[0] - start_center[0])
|
|
interp_y = start_center[1] + alpha * (end_center[1] - start_center[1])
|
|
|
|
return (interp_x, interp_y)
|
|
|
|
def set_tracking_template(self, frame, region):
|
|
"""Set a template region for tracking (much better than optical flow)"""
|
|
try:
|
|
x, y, w, h = region
|
|
self.tracking_template = frame[y:y+h, x:x+w].copy()
|
|
self.template_region = region
|
|
print(f"DEBUG: Set tracking template with region {region}")
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error setting tracking template: {e}")
|
|
return False
|
|
|
|
def track_template(self, frame):
|
|
"""Track the template in the current frame"""
|
|
if self.tracking_template is None:
|
|
# Try to recreate template from saved region
|
|
if self.template_region is not None:
|
|
self._recreate_template_from_region(frame)
|
|
if self.tracking_template is None:
|
|
return None
|
|
|
|
try:
|
|
# Convert to grayscale
|
|
if len(frame.shape) == 3:
|
|
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray_frame = frame
|
|
|
|
if len(self.tracking_template.shape) == 3:
|
|
gray_template = cv2.cvtColor(self.tracking_template, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray_template = self.tracking_template
|
|
|
|
# Template matching
|
|
result = cv2.matchTemplate(gray_frame, gray_template, cv2.TM_CCOEFF_NORMED)
|
|
_, max_val, _, max_loc = cv2.minMaxLoc(result)
|
|
|
|
# Only accept matches above threshold
|
|
if max_val > 0.6: # Adjust threshold as needed
|
|
# Get template center
|
|
template_h, template_w = gray_template.shape
|
|
center_x = max_loc[0] + template_w // 2
|
|
center_y = max_loc[1] + template_h // 2
|
|
return (center_x, center_y, max_val)
|
|
else:
|
|
return None
|
|
|
|
except Exception as e:
|
|
print(f"Error in template tracking: {e}")
|
|
return None
|
|
|
|
def _recreate_template_from_region(self, frame):
|
|
"""Recreate template from saved region coordinates"""
|
|
try:
|
|
if self.template_region is None:
|
|
return False
|
|
|
|
x, y, w, h = self.template_region
|
|
print(f"DEBUG: Recreating template from region ({x}, {y}, {w}, {h})")
|
|
|
|
# Ensure region is within frame bounds
|
|
if (x >= 0 and y >= 0 and
|
|
x + w <= frame.shape[1] and
|
|
y + h <= frame.shape[0]):
|
|
|
|
# Extract template from frame
|
|
template = frame[y:y+h, x:x+w]
|
|
if template.size > 0:
|
|
self.tracking_template = template.copy()
|
|
print(f"DEBUG: Template recreated with size {template.shape}")
|
|
return True
|
|
else:
|
|
print("DEBUG: Template region too small")
|
|
return False
|
|
else:
|
|
print("DEBUG: Template region outside frame bounds")
|
|
return False
|
|
|
|
except Exception as e:
|
|
print(f"Error recreating template: {e}")
|
|
return False
|
|
|
|
def _set_template_from_region(self, screen_rect):
|
|
"""Set template from selected region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Setting template from region ({x}, {y}, {w}, {h})")
|
|
|
|
if self.current_display_frame is not None:
|
|
# Apply transformations to get the display frame
|
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
if display_frame is not None:
|
|
# Map screen coordinates to display frame coordinates
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Convert screen coordinates to display frame coordinates
|
|
display_x = x - start_x
|
|
display_y = y - start_y
|
|
display_w = w
|
|
display_h = h
|
|
|
|
# Ensure region is within frame bounds
|
|
if (display_x >= 0 and display_y >= 0 and
|
|
display_x + display_w <= frame_width and
|
|
display_y + display_h <= frame_height):
|
|
|
|
# Extract template from display frame
|
|
template = display_frame[display_y:display_y+display_h, display_x:display_x+display_w]
|
|
if template.size > 0:
|
|
self.tracking_template = template.copy()
|
|
self.template_region = (display_x, display_y, display_w, display_h)
|
|
self.show_feedback_message(f"Template set from region ({display_w}x{display_h})")
|
|
print(f"DEBUG: Template set with size {template.shape}")
|
|
else:
|
|
self.show_feedback_message("Template region too small")
|
|
else:
|
|
self.show_feedback_message("Template region outside frame bounds")
|
|
|
|
|
|
def apply_rotation(self, frame):
|
|
"""Apply rotation to frame"""
|
|
if self.rotation_angle == 0:
|
|
return frame
|
|
elif self.rotation_angle == 90:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
elif self.rotation_angle == 180:
|
|
return cv2.rotate(frame, cv2.ROTATE_180)
|
|
elif self.rotation_angle == 270:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
return frame
|
|
|
|
def rotate_clockwise(self):
|
|
"""Rotate video 90 degrees clockwise"""
|
|
self.rotation_angle = (self.rotation_angle + 90) % 360
|
|
self.clear_transformation_cache()
|
|
|
|
def apply_brightness_contrast(self, frame):
|
|
"""Apply brightness and contrast adjustments to frame"""
|
|
if self.brightness == 0 and self.contrast == 1.0:
|
|
return frame
|
|
|
|
# Convert brightness from -100/100 range to -255/255 range
|
|
brightness_value = self.brightness * 2.55
|
|
|
|
# Apply brightness and contrast: new_pixel = contrast * old_pixel + brightness
|
|
adjusted = cv2.convertScaleAbs(
|
|
frame, alpha=self.contrast, beta=brightness_value
|
|
)
|
|
return adjusted
|
|
|
|
def adjust_brightness(self, delta: int):
|
|
"""Adjust brightness by delta (-100 to 100)"""
|
|
self.brightness = max(-100, min(100, self.brightness + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def adjust_contrast(self, delta: float):
|
|
"""Adjust contrast by delta (0.1 to 3.0)"""
|
|
self.contrast = max(0.1, min(3.0, self.contrast + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def show_progress_bar(self, text: str = "Processing..."):
|
|
"""Show progress bar with given text"""
|
|
self.progress_bar_visible = True
|
|
self.progress_bar_progress = 0.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = text
|
|
self.display_needs_update = True
|
|
|
|
def update_progress_bar(self, progress: float, text: str = None, fps: float = None):
|
|
"""Update progress bar progress (0.0 to 1.0) and optionally text and FPS"""
|
|
if self.progress_bar_visible:
|
|
self.progress_bar_progress = max(0.0, min(1.0, progress))
|
|
if text is not None:
|
|
self.progress_bar_text = text
|
|
if fps is not None:
|
|
self.progress_bar_fps = fps
|
|
|
|
# Mark as complete when reaching 100%
|
|
if self.progress_bar_progress >= 1.0 and not self.progress_bar_complete:
|
|
self.progress_bar_complete = True
|
|
self.progress_bar_complete_time = time.time()
|
|
|
|
def hide_progress_bar(self):
|
|
"""Hide progress bar"""
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_fps = 0.0
|
|
|
|
def show_feedback_message(self, message: str):
|
|
"""Show a feedback message on screen for a few seconds"""
|
|
self.feedback_message = message
|
|
self.feedback_message_time = time.time()
|
|
self.display_needs_update = True
|
|
|
|
def toggle_fullscreen(self):
|
|
"""Toggle between windowed and fullscreen mode"""
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
|
|
if self.is_fullscreen:
|
|
# Switch to windowed mode
|
|
self.is_fullscreen = False
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, 1200, 800)
|
|
print("Switched to windowed mode")
|
|
else:
|
|
# Switch to fullscreen mode
|
|
self.is_fullscreen = True
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
|
|
print("Switched to fullscreen mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def toggle_project_view(self):
|
|
"""Toggle between editor and project view mode"""
|
|
if self.project_view_mode:
|
|
# Switch back to editor mode
|
|
self.project_view_mode = False
|
|
if self.project_view:
|
|
cv2.destroyWindow("Project View")
|
|
self.project_view = None
|
|
print("Switched to editor mode")
|
|
else:
|
|
# Switch to project view mode
|
|
self.project_view_mode = True
|
|
# Create project view for the current directory
|
|
if self.path.is_dir():
|
|
project_dir = self.path
|
|
else:
|
|
project_dir = self.path.parent
|
|
self.project_view = ProjectView(project_dir, self)
|
|
# Create separate window for project view
|
|
cv2.namedWindow("Project View", cv2.WINDOW_AUTOSIZE)
|
|
print("Switched to project view mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def open_video_from_project_view(self, video_path: Path):
|
|
"""Open a video from project view in editor mode"""
|
|
print(f"Attempting to open video: {video_path}")
|
|
print(f"Video path exists: {video_path.exists()}")
|
|
|
|
# Save current state before switching
|
|
self.save_state()
|
|
|
|
# Find the video in our video_files list
|
|
try:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
except ValueError:
|
|
print(f"Video not found in current session: {video_path.name}")
|
|
# If video not in current session, reload the directory
|
|
self.path = video_path.parent
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if video_path in self.video_files:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
else:
|
|
print(f"Could not find video: {video_path.name}")
|
|
return
|
|
|
|
# Keep project view open but switch focus to video editor
|
|
# Don't destroy the project view window - just let the user switch between them
|
|
|
|
def draw_feedback_message(self, frame):
|
|
"""Draw feedback message on frame if visible"""
|
|
if not self.feedback_message or not self.feedback_message_time:
|
|
return
|
|
|
|
# Check if message should still be shown
|
|
elapsed = time.time() - self.feedback_message_time
|
|
if elapsed > self.feedback_message_duration:
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate message position (center of frame)
|
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
font_scale = 1.0
|
|
thickness = 2
|
|
|
|
# Get text size
|
|
text_size = cv2.getTextSize(self.feedback_message, font, font_scale, thickness)[0]
|
|
text_x = (width - text_size[0]) // 2
|
|
text_y = (height + text_size[1]) // 2
|
|
|
|
# Draw background rectangle
|
|
padding = 10
|
|
rect_x1 = text_x - padding
|
|
rect_y1 = text_y - text_size[1] - padding
|
|
rect_x2 = text_x + text_size[0] + padding
|
|
rect_y2 = text_y + padding
|
|
|
|
# Semi-transparent background
|
|
overlay = frame.copy()
|
|
cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (0, 0, 0), -1)
|
|
alpha = 0.7
|
|
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
|
|
|
|
# Draw text with shadow
|
|
cv2.putText(frame, self.feedback_message, (text_x + 2, text_y + 2), font, font_scale, (0, 0, 0), thickness + 1)
|
|
cv2.putText(frame, self.feedback_message, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)
|
|
|
|
def draw_progress_bar(self, frame):
|
|
"""Draw progress bar on frame if visible - positioned at top with full width"""
|
|
if not self.progress_bar_visible:
|
|
return
|
|
|
|
# Check if we should fade out
|
|
if self.progress_bar_complete and self.progress_bar_complete_time:
|
|
elapsed = time.time() - self.progress_bar_complete_time
|
|
if elapsed > self.PROGRESS_BAR_FADE_DURATION:
|
|
self.hide_progress_bar()
|
|
return
|
|
|
|
# Calculate fade alpha (1.0 at start, 0.0 at end)
|
|
fade_alpha = max(0.0, 1.0 - (elapsed / self.PROGRESS_BAR_FADE_DURATION))
|
|
else:
|
|
fade_alpha = 1.0
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate progress bar position (top of frame with 5% margins)
|
|
margin_width = int(width * self.PROGRESS_BAR_MARGIN_PERCENT / 100)
|
|
bar_width = width - (2 * margin_width)
|
|
bar_x = margin_width
|
|
bar_y = self.PROGRESS_BAR_TOP_MARGIN
|
|
|
|
# Apply fade alpha to colors
|
|
bg_color = tuple(int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BG)
|
|
border_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BORDER
|
|
)
|
|
|
|
if self.progress_bar_complete:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_FILL
|
|
)
|
|
else:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_PROGRESS
|
|
)
|
|
|
|
# Draw background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
bg_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw progress fill
|
|
fill_width = int(bar_width * self.progress_bar_progress)
|
|
if fill_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + fill_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
fill_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw border
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
border_color,
|
|
2,
|
|
)
|
|
|
|
# Draw progress percentage on the left
|
|
percentage_text = f"{self.progress_bar_progress * 100:.1f}%"
|
|
text_color = tuple(int(255 * fade_alpha) for _ in range(3))
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 12, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 10, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw FPS on the right if available
|
|
if self.progress_bar_fps > 0:
|
|
fps_text = f"{self.progress_bar_fps:.1f} FPS"
|
|
fps_text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[
|
|
0
|
|
]
|
|
fps_x = bar_x + bar_width - fps_text_size[0] - 10
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x + 2, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw main text in center
|
|
if self.progress_bar_text:
|
|
text_size = cv2.getTextSize(
|
|
self.progress_bar_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
|
|
)[0]
|
|
text_x = bar_x + (bar_width - text_size[0]) // 2
|
|
text_y = bar_y + 20
|
|
|
|
# Draw text shadow for better visibility
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x + 2, text_y + 2),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
def draw_timeline(self, frame):
|
|
"""Draw timeline at the bottom of the frame"""
|
|
# Don't draw timeline for images
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Timeline background area
|
|
timeline_y = height - self.TIMELINE_HEIGHT
|
|
cv2.rectangle(frame, (0, timeline_y), (width, height), (40, 40, 40), -1)
|
|
|
|
# Calculate timeline bar position
|
|
bar_y = timeline_y + (self.TIMELINE_HEIGHT - self.TIMELINE_BAR_HEIGHT) // 2
|
|
bar_x_start = self.TIMELINE_MARGIN
|
|
bar_x_end = width - self.TIMELINE_MARGIN
|
|
bar_width = bar_x_end - bar_x_start
|
|
|
|
self.timeline_rect = (bar_x_start, bar_y, bar_width, self.TIMELINE_BAR_HEIGHT)
|
|
|
|
# Draw timeline background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BG,
|
|
-1,
|
|
)
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BORDER,
|
|
1,
|
|
)
|
|
|
|
# Draw progress
|
|
if self.total_frames > 0:
|
|
progress = self.current_frame / max(1, self.total_frames - 1)
|
|
progress_width = int(bar_width * progress)
|
|
if progress_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_start + progress_width, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_PROGRESS,
|
|
-1,
|
|
)
|
|
|
|
# Draw current position handle
|
|
handle_x = bar_x_start + progress_width
|
|
handle_y = bar_y + self.TIMELINE_BAR_HEIGHT // 2
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_HANDLE,
|
|
-1,
|
|
)
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_BORDER,
|
|
2,
|
|
)
|
|
|
|
# Draw cut points
|
|
if self.cut_start_frame is not None:
|
|
cut_start_progress = self.cut_start_frame / max(
|
|
1, self.total_frames - 1
|
|
)
|
|
cut_start_x = bar_x_start + int(bar_width * cut_start_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_start_x, bar_y),
|
|
(cut_start_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"1",
|
|
(cut_start_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
if self.cut_end_frame is not None:
|
|
cut_end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
cut_end_x = bar_x_start + int(bar_width * cut_end_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_end_x, bar_y),
|
|
(cut_end_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"2",
|
|
(cut_end_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
def display_current_frame(self):
|
|
"""Display the current frame with all overlays"""
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Check if display needs update (optimization)
|
|
current_state = (
|
|
self.current_frame,
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
self.display_offset,
|
|
self.progress_bar_visible,
|
|
self.feedback_message
|
|
)
|
|
|
|
# Always update display when paused to ensure UI elements are visible
|
|
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing:
|
|
return # Skip redraw if nothing changed and playing
|
|
|
|
self.last_display_state = current_state
|
|
self.display_needs_update = False
|
|
|
|
# Apply crop, zoom, and rotation transformations for preview
|
|
display_frame = self.apply_crop_zoom_and_rotation(
|
|
self.current_display_frame
|
|
)
|
|
|
|
if display_frame is None:
|
|
return
|
|
|
|
# Resize to fit window while maintaining aspect ratio
|
|
height, width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
|
|
# Don't downscale - keep original video quality
|
|
# If video is larger than window, we'll handle it by resizing the window
|
|
scale = min(self.window_width / width, available_height / height)
|
|
if scale < 1.0:
|
|
# Resize window to fit video instead of downscaling video
|
|
new_window_width = int(width * 1.1) # Add 10% padding
|
|
new_window_height = int(height * 1.1) + (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
|
|
# Update window size
|
|
self.window_width = new_window_width
|
|
self.window_height = new_window_height
|
|
|
|
# Resize the OpenCV window
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.resizeWindow(window_title, self.window_width, self.window_height)
|
|
|
|
# Create canvas with timeline space
|
|
canvas = np.zeros((self.window_height, self.window_width, 3), dtype=np.uint8)
|
|
|
|
# Center the frame on canvas
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Ensure frame fits within canvas bounds
|
|
end_y = min(start_y + frame_height, available_height)
|
|
end_x = min(start_x + frame_width, self.window_width)
|
|
actual_frame_height = end_y - start_y
|
|
actual_frame_width = end_x - start_x
|
|
|
|
if actual_frame_height > 0 and actual_frame_width > 0:
|
|
canvas[start_y:end_y, start_x:end_x] = display_frame[:actual_frame_height, :actual_frame_width]
|
|
|
|
# Draw crop selection preview during Shift+Click+Drag
|
|
if self.crop_preview_rect:
|
|
x, y, w, h = self.crop_preview_rect
|
|
cv2.rectangle(
|
|
canvas, (int(x), int(y)), (int(x + w), int(y + h)), (0, 255, 0), 2
|
|
)
|
|
|
|
# Add info overlay
|
|
rotation_text = (
|
|
f" | Rotation: {self.rotation_angle}°" if self.rotation_angle != 0 else ""
|
|
)
|
|
brightness_text = (
|
|
f" | Brightness: {self.brightness}" if self.brightness != 0 else ""
|
|
)
|
|
contrast_text = (
|
|
f" | Contrast: {self.contrast:.1f}" if self.contrast != 1.0 else ""
|
|
)
|
|
seek_multiplier_text = (
|
|
f" | Seek: {self.seek_multiplier:.1f}x" if self.seek_multiplier != 1.0 else ""
|
|
)
|
|
motion_text = (
|
|
f" | Motion: {self.tracking_enabled}" if self.tracking_enabled else ""
|
|
)
|
|
feature_text = (
|
|
f" | Features: {self.feature_tracker.tracking_enabled}" if self.feature_tracker.tracking_enabled else ""
|
|
)
|
|
if self.feature_tracker.tracking_enabled and self.current_frame in self.feature_tracker.features:
|
|
feature_count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
feature_text = f" | Features: {feature_count} pts"
|
|
if self.optical_flow_enabled:
|
|
feature_text += " (OPTICAL FLOW)"
|
|
autorepeat_text = (
|
|
f" | Loop: ON" if self.looping_between_markers else ""
|
|
)
|
|
if self.is_image_mode:
|
|
info_text = f"Image | Zoom: {self.zoom_factor:.1f}x{rotation_text}{brightness_text}{contrast_text}{motion_text}{feature_text}"
|
|
else:
|
|
info_text = f"Frame: {self.current_frame}/{self.total_frames} | Speed: {self.playback_speed:.1f}x | Zoom: {self.zoom_factor:.1f}x{seek_multiplier_text}{rotation_text}{brightness_text}{contrast_text}{motion_text}{feature_text}{autorepeat_text} | {'Playing' if self.is_playing else 'Paused'}"
|
|
cv2.putText(
|
|
canvas,
|
|
info_text,
|
|
(10, 30),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.7,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
|
|
)
|
|
|
|
# Add video navigation info
|
|
if len(self.video_files) > 1:
|
|
video_text = f"Video: {self.current_video_index + 1}/{len(self.video_files)} - {self.video_path.name}"
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset = 90
|
|
else:
|
|
y_offset = 60
|
|
|
|
# Add crop info
|
|
if self.crop_rect:
|
|
crop_text = f"Crop: {int(self.crop_rect[0])},{int(self.crop_rect[1])} {int(self.crop_rect[2])}x{int(self.crop_rect[3])}"
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset += 30
|
|
|
|
# Add cut info
|
|
if self.cut_start_frame is not None or self.cut_end_frame is not None:
|
|
cut_text = (
|
|
f"Cut: {self.cut_start_frame or '?'} - {self.cut_end_frame or '?'}"
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
|
|
# Draw tracking overlays (points and interpolated cross), points stored in ROTATED space
|
|
pts = self.tracking_points.get(self.current_frame, []) if not self.is_image_mode else []
|
|
for (rx, ry) in pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
cv2.circle(canvas, (sx, sy), 6, (255, 0, 0), -1)
|
|
cv2.circle(canvas, (sx, sy), 6, (255, 255, 255), 1)
|
|
|
|
# Draw feature tracking points (green circles)
|
|
if (not self.is_image_mode and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.current_frame in self.feature_tracker.features):
|
|
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
|
for (fx, fy) in feature_positions:
|
|
# Features are stored in rotated frame coordinates (like existing motion tracking)
|
|
# Use the existing coordinate transformation system
|
|
sx, sy = self._map_rotated_to_screen(fx, fy)
|
|
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
|
cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1)
|
|
|
|
# Draw selection rectangles for feature extraction/deletion
|
|
if self.selective_feature_extraction_rect:
|
|
x, y, w, h = self.selective_feature_extraction_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 255, 255), 2) # Yellow for extraction
|
|
|
|
if self.selective_feature_deletion_rect:
|
|
x, y, w, h = self.selective_feature_deletion_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 0, 255), 2) # Red for deletion
|
|
|
|
# Draw template selection rectangle
|
|
if self.template_selection_rect:
|
|
x, y, w, h = self.template_selection_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (255, 0, 255), 2) # Magenta for template selection
|
|
|
|
# Draw previous and next tracking points with motion path visualization
|
|
if not self.is_image_mode and self.tracking_points:
|
|
prev_result = self._get_previous_tracking_point()
|
|
next_result = self._get_next_tracking_point()
|
|
|
|
# Draw motion path - either previous→current OR previous→next
|
|
line_to_draw = None
|
|
if prev_result and self.current_frame in self.tracking_points:
|
|
# Draw previous→current line (we're on a frame with tracking points)
|
|
line_to_draw = ("prev_current", prev_result, (self.current_frame, self.tracking_points[self.current_frame]))
|
|
elif prev_result and next_result:
|
|
# Draw previous→next line (we're between frames)
|
|
line_to_draw = ("prev_next", prev_result, next_result)
|
|
|
|
if line_to_draw:
|
|
line_type, (_, pts1), (_, pts2) = line_to_draw
|
|
|
|
# Draw lines between corresponding tracking points
|
|
for i, (px1, py1) in enumerate(pts1):
|
|
if i < len(pts2):
|
|
px2, py2 = pts2[i]
|
|
sx1, sy1 = self._map_rotated_to_screen(px1, py1)
|
|
sx2, sy2 = self._map_rotated_to_screen(px2, py2)
|
|
|
|
# Draw motion path line with arrow (thin and transparent)
|
|
overlay = canvas.copy()
|
|
cv2.line(overlay, (sx1, sy1), (sx2, sy2), (255, 255, 0), 1) # Thin yellow line
|
|
|
|
# Draw arrow head pointing from first to second point
|
|
angle = np.arctan2(sy2 - sy1, sx2 - sx1)
|
|
arrow_length = 12
|
|
arrow_angle = np.pi / 6 # 30 degrees
|
|
|
|
# Calculate arrow head points
|
|
arrow_x1 = int(sx2 - arrow_length * np.cos(angle - arrow_angle))
|
|
arrow_y1 = int(sy2 - arrow_length * np.sin(angle - arrow_angle))
|
|
arrow_x2 = int(sx2 - arrow_length * np.cos(angle + arrow_angle))
|
|
arrow_y2 = int(sy2 - arrow_length * np.sin(angle + arrow_angle))
|
|
|
|
cv2.line(overlay, (sx2, sy2), (arrow_x1, arrow_y1), (255, 255, 0), 1)
|
|
cv2.line(overlay, (sx2, sy2), (arrow_x2, arrow_y2), (255, 255, 0), 1)
|
|
cv2.addWeighted(overlay, 0.3, canvas, 0.7, 0, canvas) # Very transparent
|
|
|
|
# Previous tracking point (red) - from the most recent frame with tracking points before current
|
|
if prev_result:
|
|
prev_frame, prev_pts = prev_result
|
|
for (rx, ry) in prev_pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
# Create overlay for alpha blending (more transparent)
|
|
overlay = canvas.copy()
|
|
cv2.circle(overlay, (sx, sy), 5, (0, 0, 255), -1) # Red circle
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 255, 255), 1) # White border
|
|
cv2.addWeighted(overlay, 0.4, canvas, 0.6, 0, canvas) # More transparent
|
|
|
|
# Next tracking point (magenta/purple) - from the next frame with tracking points after current
|
|
if next_result:
|
|
next_frame, next_pts = next_result
|
|
for (rx, ry) in next_pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
# Create overlay for alpha blending (more transparent)
|
|
overlay = canvas.copy()
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 0, 255), -1) # Magenta circle
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 255, 255), 1) # White border
|
|
cv2.addWeighted(overlay, 0.4, canvas, 0.6, 0, canvas) # More transparent
|
|
if self.tracking_enabled and not self.is_image_mode:
|
|
interp = self._get_interpolated_tracking_position(self.current_frame)
|
|
if interp:
|
|
sx, sy = self._map_rotated_to_screen(interp[0], interp[1])
|
|
cv2.line(canvas, (sx - 10, sy), (sx + 10, sy), (255, 0, 0), 2)
|
|
cv2.line(canvas, (sx, sy - 10), (sx, sy + 10), (255, 0, 0), 2)
|
|
# Draw a faint outline of the effective crop to confirm follow
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(self.current_frame)
|
|
# Map rotated crop corners to screen for debug outline
|
|
tlx, tly = self._map_rotated_to_screen(eff_x, eff_y)
|
|
brx, bry = self._map_rotated_to_screen(eff_x + eff_w, eff_y + eff_h)
|
|
cv2.rectangle(canvas, (tlx, tly), (brx, bry), (255, 0, 0), 1)
|
|
|
|
# Draw timeline
|
|
self.draw_timeline(canvas)
|
|
|
|
# Draw progress bar (if visible)
|
|
self.draw_progress_bar(canvas)
|
|
|
|
# Draw feedback message (if visible)
|
|
self.draw_feedback_message(canvas)
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.imshow(window_title, canvas)
|
|
|
|
def mouse_callback(self, event, x, y, flags, _):
|
|
"""Handle mouse events"""
|
|
# Handle timeline interaction (not for images)
|
|
if self.timeline_rect and not self.is_image_mode:
|
|
bar_x_start, bar_y, bar_width, bar_height = self.timeline_rect
|
|
bar_x_end = bar_x_start + bar_width
|
|
|
|
if bar_y <= y <= bar_y + bar_height + 10:
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.mouse_dragging = True
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.mouse_dragging:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_LBUTTONUP:
|
|
self.mouse_dragging = False
|
|
return
|
|
|
|
# Handle crop selection (Shift + click and drag)
|
|
if flags & cv2.EVENT_FLAG_SHIFTKEY:
|
|
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
print(f"DEBUG: Crop start at screen=({x},{y}) frame={getattr(self, 'current_frame', -1)}")
|
|
self.crop_selecting = True
|
|
self.crop_start_point = (x, y)
|
|
self.crop_preview_rect = None
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.crop_selecting:
|
|
if self.crop_start_point:
|
|
start_x, start_y = self.crop_start_point
|
|
width = abs(x - start_x)
|
|
height = abs(y - start_y)
|
|
crop_x = min(start_x, x)
|
|
crop_y = min(start_y, y)
|
|
self.crop_preview_rect = (crop_x, crop_y, width, height)
|
|
elif event == cv2.EVENT_LBUTTONUP and self.crop_selecting:
|
|
if self.crop_start_point and self.crop_preview_rect:
|
|
print(f"DEBUG: Crop end screen_rect={self.crop_preview_rect}")
|
|
# Convert screen coordinates to video coordinates
|
|
self.set_crop_from_screen_coords(self.crop_preview_rect)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
|
|
# Handle zoom center (Ctrl + click)
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN:
|
|
self.zoom_center = (x, y)
|
|
|
|
# Handle Shift+Right-click+drag for selective feature extraction
|
|
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_SHIFTKEY):
|
|
if not self.is_image_mode:
|
|
# Enable feature tracking if not already enabled
|
|
if not self.feature_tracker.tracking_enabled:
|
|
self.feature_tracker.tracking_enabled = True
|
|
self.show_feedback_message("Feature tracking enabled")
|
|
self.selective_feature_extraction_start = (x, y)
|
|
self.selective_feature_extraction_rect = None
|
|
print(f"DEBUG: Started selective feature extraction at ({x}, {y})")
|
|
|
|
# Handle Shift+Right-click+drag for selective feature extraction
|
|
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.selective_feature_extraction_start
|
|
self.selective_feature_extraction_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
|
|
# Handle Shift+Right-click release for selective feature extraction
|
|
if event == cv2.EVENT_RBUTTONUP and (flags & cv2.EVENT_FLAG_SHIFTKEY) and self.selective_feature_extraction_start:
|
|
if not self.is_image_mode and self.selective_feature_extraction_rect:
|
|
self._extract_features_from_region(self.selective_feature_extraction_rect)
|
|
self.selective_feature_extraction_start = None
|
|
self.selective_feature_extraction_rect = None
|
|
|
|
# Handle Ctrl+Right-click+drag for selective feature deletion
|
|
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY):
|
|
if not self.is_image_mode and self.feature_tracker.tracking_enabled:
|
|
self.selective_feature_deletion_start = (x, y)
|
|
self.selective_feature_deletion_rect = None
|
|
print(f"DEBUG: Started selective feature deletion at ({x}, {y})")
|
|
|
|
# Handle Ctrl+Right-click+drag for selective feature deletion
|
|
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.selective_feature_deletion_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.selective_feature_deletion_start
|
|
self.selective_feature_deletion_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
|
|
# Handle Ctrl+Right-click release for selective feature deletion
|
|
if event == cv2.EVENT_RBUTTONUP and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.selective_feature_deletion_start:
|
|
if not self.is_image_mode and self.feature_tracker.tracking_enabled and self.selective_feature_deletion_rect:
|
|
self._delete_features_from_region(self.selective_feature_deletion_rect)
|
|
self.selective_feature_deletion_start = None
|
|
self.selective_feature_deletion_rect = None
|
|
|
|
# Handle Ctrl+Left-click+drag for template region selection
|
|
if event == cv2.EVENT_LBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY):
|
|
if not self.is_image_mode:
|
|
self.template_selection_start = (x, y)
|
|
self.template_selection_rect = None
|
|
print(f"DEBUG: Started template selection at ({x}, {y})")
|
|
|
|
# Handle Ctrl+Left-click+drag for template region selection
|
|
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.template_selection_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.template_selection_start
|
|
self.template_selection_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
|
|
# Handle Ctrl+Left-click release for template region selection
|
|
if event == cv2.EVENT_LBUTTONUP and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.template_selection_start:
|
|
if not self.is_image_mode and self.template_selection_rect:
|
|
self._set_template_from_region(self.template_selection_rect)
|
|
self.template_selection_start = None
|
|
self.template_selection_rect = None
|
|
|
|
# Handle right-click for tracking points (no modifiers)
|
|
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
|
if not self.is_image_mode:
|
|
# Store tracking points in ROTATED frame coordinates (pre-crop)
|
|
rx, ry = self._map_screen_to_rotated(x, y)
|
|
threshold = self.TRACKING_POINT_THRESHOLD
|
|
removed = False
|
|
|
|
# First check for removal of existing points on current frame
|
|
if self.current_frame in self.tracking_points:
|
|
pts_screen = []
|
|
for idx, (px, py) in enumerate(self.tracking_points[self.current_frame]):
|
|
sxp, syp = self._map_rotated_to_screen(px, py)
|
|
pts_screen.append((idx, sxp, syp))
|
|
for idx, sxp, syp in pts_screen:
|
|
if (sxp - x) ** 2 + (syp - y) ** 2 <= threshold ** 2:
|
|
del self.tracking_points[self.current_frame][idx]
|
|
if not self.tracking_points[self.current_frame]:
|
|
del self.tracking_points[self.current_frame]
|
|
# self.show_feedback_message("Tracking point removed")
|
|
removed = True
|
|
break
|
|
|
|
# If not removed, check for snapping to nearby points or lines from other frames
|
|
if not removed:
|
|
snapped = False
|
|
best_snap_distance = float('inf')
|
|
best_snap_point = None
|
|
|
|
# Check all tracking points from all frames for point snapping
|
|
for _, points in self.tracking_points.items():
|
|
for (px, py) in points:
|
|
sxp, syp = self._map_rotated_to_screen(px, py)
|
|
distance = ((sxp - x) ** 2 + (syp - y) ** 2) ** 0.5
|
|
if distance <= threshold and distance < best_snap_distance:
|
|
best_snap_distance = distance
|
|
best_snap_point = (int(px), int(py))
|
|
|
|
# Check for line snapping - either previous→next OR previous→current
|
|
prev_result = self._get_previous_tracking_point()
|
|
next_result = self._get_next_tracking_point()
|
|
|
|
print(f"DEBUG: Line snapping - prev_result: {prev_result}, next_result: {next_result}")
|
|
|
|
# Determine which line to check: previous→current OR previous→next
|
|
line_to_check = None
|
|
if prev_result and self.current_frame in self.tracking_points:
|
|
# Check previous→current line (we're on a frame with tracking points)
|
|
line_to_check = ("prev_current", prev_result, (self.current_frame, self.tracking_points[self.current_frame]))
|
|
print(f"DEBUG: Checking prev->current line")
|
|
elif prev_result and next_result:
|
|
# Check previous→next line (we're between frames)
|
|
line_to_check = ("prev_next", prev_result, next_result)
|
|
print(f"DEBUG: Checking prev->next line")
|
|
|
|
if line_to_check:
|
|
line_type, (_, pts1), (_, pts2) = line_to_check
|
|
|
|
# Check each corresponding pair of points
|
|
for j in range(min(len(pts1), len(pts2))):
|
|
px1, py1 = pts1[j]
|
|
px2, py2 = pts2[j]
|
|
|
|
# Convert to screen coordinates
|
|
sx1, sy1 = self._map_rotated_to_screen(px1, py1)
|
|
sx2, sy2 = self._map_rotated_to_screen(px2, py2)
|
|
|
|
# Calculate distance to infinite line and foot of perpendicular
|
|
line_distance, (foot_x, foot_y) = self._point_to_line_distance_and_foot(x, y, sx1, sy1, sx2, sy2)
|
|
|
|
print(f"DEBUG: {line_type} Line {j}: ({sx1},{sy1}) to ({sx2},{sy2}), distance to click ({x},{y}) = {line_distance:.2f}, foot = ({foot_x:.1f}, {foot_y:.1f})")
|
|
|
|
if line_distance <= threshold and line_distance < best_snap_distance:
|
|
print(f"DEBUG: Line snap found! Distance {line_distance:.2f} <= threshold {threshold}")
|
|
|
|
# Convert foot of perpendicular back to rotated coordinates (no clamping - infinite line)
|
|
closest_rx, closest_ry = self._map_screen_to_rotated(int(foot_x), int(foot_y))
|
|
|
|
best_snap_distance = line_distance
|
|
best_snap_point = (int(closest_rx), int(closest_ry))
|
|
print(f"DEBUG: Best line snap point: ({closest_rx}, {closest_ry})")
|
|
else:
|
|
print(f"DEBUG: No line found for snapping")
|
|
|
|
# Apply the best snap if found
|
|
if best_snap_point:
|
|
print(f"DEBUG: Final best_snap_point: {best_snap_point} (distance: {best_snap_distance:.2f})")
|
|
self.tracking_points.setdefault(self.current_frame, []).append(best_snap_point)
|
|
snapped = True
|
|
else:
|
|
print(f"DEBUG: No snap found, adding new point at: ({rx}, {ry})")
|
|
|
|
# If no snapping, add new point at clicked location
|
|
if not snapped:
|
|
print(f"DEBUG: No snap found, adding new point at: ({rx}, {ry})")
|
|
print(f"DEBUG: Click was at screen coords: ({x}, {y})")
|
|
print(f"DEBUG: Converted to rotated coords: ({rx}, {ry})")
|
|
# Verify the conversion
|
|
verify_sx, verify_sy = self._map_rotated_to_screen(rx, ry)
|
|
print(f"DEBUG: Verification - rotated ({rx}, {ry}) -> screen ({verify_sx}, {verify_sy})")
|
|
self.tracking_points.setdefault(self.current_frame, []).append((int(rx), int(ry)))
|
|
# self.show_feedback_message("Tracking point added")
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
|
|
# Force immediate display update to recalculate previous/next points and arrows
|
|
self.display_current_frame()
|
|
|
|
# Handle scroll wheel: Ctrl+scroll -> zoom; plain scroll -> seek ±1 frame (independent of multiplier)
|
|
if event == cv2.EVENT_MOUSEWHEEL:
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY:
|
|
if flags > 0: # Scroll up -> zoom in
|
|
self.zoom_factor = min(self.MAX_ZOOM, self.zoom_factor + self.ZOOM_INCREMENT)
|
|
else: # Scroll down -> zoom out
|
|
self.zoom_factor = max(self.MIN_ZOOM, self.zoom_factor - self.ZOOM_INCREMENT)
|
|
self.clear_transformation_cache()
|
|
else:
|
|
if not self.is_image_mode:
|
|
direction = 1 if flags > 0 else -1
|
|
self.seek_video_exact_frame(direction)
|
|
|
|
def set_crop_from_screen_coords(self, screen_rect):
|
|
"""Convert screen coordinates to video frame coordinates and set crop"""
|
|
x, y, w, h = screen_rect
|
|
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Debug context for crop mapping
|
|
print("DEBUG: set_crop_from_screen_coords")
|
|
print(f"DEBUG: input screen_rect=({x},{y},{w},{h})")
|
|
print(f"DEBUG: state rotation={self.rotation_angle} zoom={self.zoom_factor} window=({self.window_width},{self.window_height})")
|
|
print(f"DEBUG: display_offset={self.display_offset} is_image_mode={self.is_image_mode}")
|
|
print(f"DEBUG: current crop_rect={self.crop_rect}")
|
|
eff = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0)) if self.crop_rect else None
|
|
print(f"DEBUG: effective_crop_for_frame={eff}")
|
|
|
|
# Map both corners from screen to ROTATED space, then derive crop in rotated coords
|
|
x2 = x + w
|
|
y2 = y + h
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x2, y2)
|
|
print(f"DEBUG: mapped ROTATED corners -> ({rx1},{ry1}) and ({rx2},{ry2})")
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
crop_x = left_r
|
|
crop_y = top_r
|
|
crop_w = max(10, right_r - left_r)
|
|
crop_h = max(10, bottom_r - top_r)
|
|
|
|
# Clamp to rotated frame bounds
|
|
if self.rotation_angle in (90, 270):
|
|
rot_w, rot_h = self.frame_height, self.frame_width
|
|
else:
|
|
rot_w, rot_h = self.frame_width, self.frame_height
|
|
crop_x = max(0, min(crop_x, rot_w - 1))
|
|
crop_y = max(0, min(crop_y, rot_h - 1))
|
|
crop_w = min(crop_w, rot_w - crop_x)
|
|
crop_h = min(crop_h, rot_h - crop_y)
|
|
|
|
print(f"DEBUG: final ROTATED_rect=({crop_x},{crop_y},{crop_w},{crop_h}) rotated_size=({rot_w},{rot_h})")
|
|
|
|
# Snap to full rotated frame if selection covers it
|
|
if crop_w >= int(0.9 * rot_w) and crop_h >= int(0.9 * rot_h):
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
print("DEBUG: selection ~full frame -> clearing crop (use full frame)")
|
|
return
|
|
|
|
if crop_w > 10 and crop_h > 10:
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
# Store crop in ROTATED frame coordinates
|
|
self.crop_rect = (crop_x, crop_y, crop_w, crop_h)
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
print(f"DEBUG: crop_rect (ROTATED space) set -> {self.crop_rect}")
|
|
# Disable motion tracking upon explicit crop set to avoid unintended offsets
|
|
if self.tracking_enabled:
|
|
self.tracking_enabled = False
|
|
print("DEBUG: tracking disabled due to manual crop set")
|
|
self.save_state()
|
|
else:
|
|
print("DEBUG: rejected small crop (<=10px)")
|
|
|
|
def seek_to_timeline_position(self, mouse_x, bar_x_start, bar_width):
|
|
"""Seek to position based on mouse click on timeline"""
|
|
relative_x = mouse_x - bar_x_start
|
|
position_ratio = max(0, min(1, relative_x / bar_width))
|
|
target_frame = int(position_ratio * (self.total_frames - 1))
|
|
self.seek_to_frame(target_frame)
|
|
|
|
def undo_crop(self):
|
|
"""Undo the last crop operation"""
|
|
if self.crop_history:
|
|
self.crop_rect = self.crop_history.pop()
|
|
else:
|
|
self.crop_rect = None
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is undone
|
|
|
|
def complete_reset(self):
|
|
"""Complete reset of all transformations and settings"""
|
|
# Reset crop
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
|
|
# Reset zoom
|
|
self.zoom_factor = 1.0
|
|
self.zoom_center = None
|
|
|
|
# Reset rotation
|
|
self.rotation_angle = 0
|
|
|
|
# Reset brightness and contrast
|
|
self.brightness = 0
|
|
self.contrast = 1.0
|
|
|
|
# Reset motion tracking
|
|
self.tracking_enabled = False
|
|
self.tracking_points = {}
|
|
|
|
# Reset feature tracking
|
|
self.feature_tracker.clear_features()
|
|
|
|
# Reset cut markers
|
|
self.cut_start_frame = None
|
|
self.cut_end_frame = None
|
|
self.looping_between_markers = False
|
|
|
|
# Reset display offset
|
|
self.display_offset = [0, 0]
|
|
|
|
# Clear transformation cache
|
|
self.clear_transformation_cache()
|
|
|
|
# Save state
|
|
self.save_state()
|
|
|
|
print("Complete reset applied - all transformations and markers cleared")
|
|
|
|
def toggle_marker_looping(self):
|
|
"""Toggle looping between cut markers"""
|
|
# Check if both markers are set
|
|
if self.cut_start_frame is None or self.cut_end_frame is None:
|
|
print("Both markers must be set to enable looping. Use '1' and '2' to set markers.")
|
|
return False
|
|
|
|
if self.cut_start_frame >= self.cut_end_frame:
|
|
print("Invalid marker range - start frame must be before end frame")
|
|
return False
|
|
|
|
self.looping_between_markers = not self.looping_between_markers
|
|
|
|
if self.looping_between_markers:
|
|
print(f"Marker looping ENABLED: frames {self.cut_start_frame} - {self.cut_end_frame}")
|
|
# Jump to start marker when enabling
|
|
self.seek_to_frame(self.cut_start_frame)
|
|
else:
|
|
print("Marker looping DISABLED")
|
|
|
|
self.save_state() # Save state when looping is toggled
|
|
return True
|
|
|
|
|
|
|
|
def adjust_crop_size(self, direction: str, expand: bool, amount: int = None):
|
|
"""
|
|
Adjust crop size in given direction
|
|
direction: 'up', 'down', 'left', 'right'
|
|
expand: True to expand, False to contract
|
|
amount: pixels to adjust by (uses self.crop_size_step if None)
|
|
"""
|
|
if amount is None:
|
|
amount = self.crop_size_step
|
|
if not self.crop_rect:
|
|
# If no crop exists, create a default one in the center
|
|
center_x = self.frame_width // 2
|
|
center_y = self.frame_height // 2
|
|
default_size = min(self.frame_width, self.frame_height) // 4
|
|
self.crop_rect = (
|
|
center_x - default_size // 2,
|
|
center_y - default_size // 2,
|
|
default_size,
|
|
default_size
|
|
)
|
|
return
|
|
|
|
x, y, w, h = self.crop_rect
|
|
|
|
if direction == 'up':
|
|
if expand:
|
|
# Expand upward - decrease y, increase height
|
|
new_y = max(0, y - amount)
|
|
new_h = h + (y - new_y)
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
else:
|
|
# Contract from bottom - decrease height
|
|
new_h = max(10, h - amount) # Minimum size of 10 pixels
|
|
self.crop_rect = (x, y, w, new_h)
|
|
|
|
elif direction == 'down':
|
|
if expand:
|
|
# Expand downward - increase height
|
|
new_h = min(self.frame_height - y, h + amount)
|
|
self.crop_rect = (x, y, w, new_h)
|
|
else:
|
|
# Contract from top - increase y, decrease height
|
|
amount = min(amount, h - 10) # Don't make it smaller than 10 pixels
|
|
new_y = y + amount
|
|
new_h = h - amount
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
|
|
elif direction == 'left':
|
|
if expand:
|
|
# Expand leftward - decrease x, increase width
|
|
new_x = max(0, x - amount)
|
|
new_w = w + (x - new_x)
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
else:
|
|
# Contract from right - decrease width
|
|
new_w = max(10, w - amount) # Minimum size of 10 pixels
|
|
self.crop_rect = (x, y, new_w, h)
|
|
|
|
elif direction == 'right':
|
|
if expand:
|
|
# Expand rightward - increase width
|
|
new_w = min(self.frame_width - x, w + amount)
|
|
self.crop_rect = (x, y, new_w, h)
|
|
else:
|
|
# Contract from left - increase x, decrease width
|
|
amount = min(amount, w - 10) # Don't make it smaller than 10 pixels
|
|
new_x = x + amount
|
|
new_w = w - amount
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is adjusted
|
|
|
|
def render_video(self, output_path: str):
|
|
"""Render video or save image with current edits applied"""
|
|
if self.is_image_mode:
|
|
return self._render_image(output_path)
|
|
else:
|
|
return self._render_video_threaded(output_path)
|
|
|
|
def _render_video_threaded(self, output_path: str):
|
|
"""Start video rendering in a separate thread"""
|
|
# Check if already rendering
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
print("Render already in progress! Use 'X' to cancel first.")
|
|
return False
|
|
|
|
# Reset render state
|
|
self.render_cancelled = False
|
|
|
|
# Start render thread
|
|
self.render_thread = threading.Thread(
|
|
target=self._render_video_worker,
|
|
args=(output_path,),
|
|
daemon=True
|
|
)
|
|
self.render_thread.start()
|
|
|
|
print(f"Started rendering to {output_path} in background thread...")
|
|
print("You can continue editing while rendering. Press 'X' to cancel.")
|
|
return True
|
|
|
|
def _render_video_worker(self, output_path: str):
|
|
"""Worker method that runs in the render thread"""
|
|
render_cap = None
|
|
try:
|
|
if not output_path.endswith(".mp4"):
|
|
output_path += ".mp4"
|
|
|
|
start_time = time.time()
|
|
|
|
# Send progress update to main thread
|
|
self.render_progress_queue.put(("init", "Initializing render...", 0.0, 0.0))
|
|
|
|
# No need to create VideoCapture since we use FFmpeg directly
|
|
|
|
# Determine frame range
|
|
start_frame = self.cut_start_frame if self.cut_start_frame is not None else 0
|
|
end_frame = (
|
|
self.cut_end_frame
|
|
if self.cut_end_frame is not None
|
|
else self.total_frames - 1
|
|
)
|
|
|
|
if start_frame >= end_frame:
|
|
self.render_progress_queue.put(("error", "Invalid cut range!", 1.0, 0.0))
|
|
return False
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Calculating output dimensions...", 0.05, 0.0))
|
|
|
|
# Calculate output dimensions to MATCH preview visible region
|
|
params = self._get_display_params()
|
|
output_width = max(2, params['visible_w'] - (params['visible_w'] % 2))
|
|
output_height = max(2, params['visible_h'] - (params['visible_h'] % 2))
|
|
|
|
# Ensure dimensions are divisible by 2 for H.264 encoding
|
|
output_width = output_width - (output_width % 2)
|
|
output_height = output_height - (output_height % 2)
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Setting up FFmpeg encoder...", 0.1, 0.0))
|
|
|
|
# Debug output dimensions
|
|
print(f"Output dimensions (match preview): {output_width}x{output_height}")
|
|
print(f"Zoom factor: {self.zoom_factor}")
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(start_frame)
|
|
print(f"Effective crop (rotated): {eff_x},{eff_y} {eff_w}x{eff_h}")
|
|
|
|
# Skip all the OpenCV codec bullshit and go straight to FFmpeg
|
|
print("Using FFmpeg for encoding with OpenCV transformations...")
|
|
return self._render_with_ffmpeg_pipe(output_path, start_frame, end_frame, output_width, output_height)
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
# Handle specific FFmpeg threading errors
|
|
if "async_lock" in error_msg or "pthread_frame" in error_msg:
|
|
error_msg = "FFmpeg threading error - try restarting the application"
|
|
elif "Assertion" in error_msg:
|
|
error_msg = "Video codec error - the video file may be corrupted or incompatible"
|
|
|
|
self.render_progress_queue.put(("error", f"Render error: {error_msg}", 1.0, 0.0))
|
|
print(f"Render error: {error_msg}")
|
|
return False
|
|
finally:
|
|
# No cleanup needed since we don't create VideoCapture
|
|
pass
|
|
|
|
def update_render_progress(self):
|
|
"""Process progress updates from the render thread"""
|
|
try:
|
|
while True:
|
|
# Non-blocking get from queue
|
|
update_type, text, progress, fps = self.render_progress_queue.get_nowait()
|
|
|
|
if update_type == "init":
|
|
self.show_progress_bar(text)
|
|
elif update_type == "progress":
|
|
self.update_progress_bar(progress, text, fps)
|
|
elif update_type == "complete":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Handle file overwrite if this was an overwrite operation
|
|
if hasattr(self, 'overwrite_temp_path') and self.overwrite_temp_path:
|
|
self._handle_overwrite_completion()
|
|
elif update_type == "error":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Also show error as feedback message for better visibility
|
|
self.show_feedback_message(f"ERROR: {text}")
|
|
elif update_type == "cancelled":
|
|
self.hide_progress_bar()
|
|
self.show_feedback_message("Render cancelled")
|
|
|
|
except queue.Empty:
|
|
# No more updates in queue
|
|
pass
|
|
|
|
def _handle_overwrite_completion(self):
|
|
"""Handle file replacement after successful render"""
|
|
try:
|
|
print("Replacing original file...")
|
|
# Release current video capture before replacing the file
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
|
|
# Replace the original file with the temporary file
|
|
import shutil
|
|
print(f"DEBUG: Moving {self.overwrite_temp_path} to {self.overwrite_target_path}")
|
|
try:
|
|
shutil.move(self.overwrite_temp_path, self.overwrite_target_path)
|
|
print("DEBUG: File move successful")
|
|
except Exception as e:
|
|
print(f"DEBUG: File move failed: {e}")
|
|
# Try to clean up temp file
|
|
if os.path.exists(self.overwrite_temp_path):
|
|
os.remove(self.overwrite_temp_path)
|
|
raise
|
|
|
|
# Small delay to ensure file system operations are complete
|
|
time.sleep(0.1)
|
|
|
|
try:
|
|
self._load_video(self.video_path)
|
|
self.load_current_frame()
|
|
print("File reloaded successfully")
|
|
except Exception as e:
|
|
print(f"Warning: Could not reload file after overwrite: {e}")
|
|
print("The file was saved successfully, but you may need to restart the editor to continue editing it.")
|
|
except Exception as e:
|
|
print(f"Error during file overwrite: {e}")
|
|
finally:
|
|
# Clean up overwrite state
|
|
self.overwrite_temp_path = None
|
|
self.overwrite_target_path = None
|
|
|
|
def cancel_render(self):
|
|
"""Cancel the current render operation"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
print("Render cancellation requested...")
|
|
return True
|
|
return False
|
|
|
|
def is_rendering(self):
|
|
"""Check if a render operation is currently active"""
|
|
return self.render_thread and self.render_thread.is_alive()
|
|
|
|
def cleanup_render_thread(self):
|
|
"""Clean up render thread resources"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
# Terminate FFmpeg process if running
|
|
if self.ffmpeg_process:
|
|
try:
|
|
self.ffmpeg_process.terminate()
|
|
self.ffmpeg_process.wait(timeout=1.0)
|
|
except:
|
|
try:
|
|
self.ffmpeg_process.kill()
|
|
except:
|
|
pass
|
|
self.ffmpeg_process = None
|
|
# Wait a bit for the thread to finish gracefully
|
|
self.render_thread.join(timeout=2.0)
|
|
if self.render_thread.is_alive():
|
|
print("Warning: Render thread did not finish gracefully")
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
|
|
def _render_image(self, output_path: str):
|
|
"""Save image with current edits applied"""
|
|
# Get the appropriate file extension
|
|
original_ext = self.video_path.suffix.lower()
|
|
if not output_path.endswith(original_ext):
|
|
output_path += original_ext
|
|
|
|
print(f"Saving image to {output_path}...")
|
|
|
|
# Apply all transformations to the image
|
|
processed_image = self.apply_crop_zoom_and_rotation(self.static_image.copy())
|
|
|
|
if processed_image is not None:
|
|
# Save the image with high quality settings
|
|
success = cv2.imwrite(output_path, processed_image, [cv2.IMWRITE_JPEG_QUALITY, 95])
|
|
if success:
|
|
print(f"Image saved successfully to {output_path}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save image to {output_path}")
|
|
return False
|
|
else:
|
|
print("Error: Could not process image")
|
|
return False
|
|
|
|
|
|
def _process_frame_for_render(self, frame, output_width: int, output_height: int, frame_number: int = None):
|
|
"""Process a single frame for rendering (optimized for speed)"""
|
|
try:
|
|
# Apply rotation first to work in rotated space
|
|
if self.rotation_angle != 0:
|
|
frame = self.apply_rotation(frame)
|
|
|
|
# Apply EFFECTIVE crop regardless of whether a base crop exists, to enable follow and out-of-frame pad
|
|
x, y, w, h = self._get_effective_crop_rect_for_frame(frame_number or self.current_frame)
|
|
|
|
# Allow out-of-bounds by padding with black so center can remain when near edges
|
|
h_frame, w_frame = frame.shape[:2]
|
|
pad_left = max(0, -x)
|
|
pad_top = max(0, -y)
|
|
pad_right = max(0, (x + w) - w_frame)
|
|
pad_bottom = max(0, (y + h) - h_frame)
|
|
if any(p > 0 for p in (pad_left, pad_top, pad_right, pad_bottom)):
|
|
frame = cv2.copyMakeBorder(
|
|
frame,
|
|
pad_top,
|
|
pad_bottom,
|
|
pad_left,
|
|
pad_right,
|
|
borderType=cv2.BORDER_CONSTANT,
|
|
value=(0, 0, 0),
|
|
)
|
|
x = x + pad_left
|
|
y = y + pad_top
|
|
w_frame, h_frame = frame.shape[1], frame.shape[0]
|
|
|
|
# Clamp crop to padded frame
|
|
x = max(0, min(x, w_frame - 1))
|
|
y = max(0, min(y, h_frame - 1))
|
|
w = min(w, w_frame - x)
|
|
h = min(h, h_frame - y)
|
|
if w <= 0 or h <= 0:
|
|
return None
|
|
frame = frame[y : y + h, x : x + w]
|
|
|
|
# Apply brightness and contrast
|
|
frame = self.apply_brightness_contrast(frame)
|
|
|
|
# Apply zoom and resize directly to final output dimensions
|
|
if self.zoom_factor != 1.0:
|
|
height, width = frame.shape[:2]
|
|
# Calculate what the zoomed dimensions would be
|
|
zoomed_width = int(width * self.zoom_factor)
|
|
zoomed_height = int(height * self.zoom_factor)
|
|
|
|
# If zoomed dimensions match output, use them; otherwise resize directly to output
|
|
if zoomed_width == output_width and zoomed_height == output_height:
|
|
frame = cv2.resize(
|
|
frame, (zoomed_width, zoomed_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# Resize directly to final output dimensions
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# No zoom, just resize to output dimensions if needed
|
|
if frame.shape[1] != output_width or frame.shape[0] != output_height:
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
return frame
|
|
|
|
except Exception as e:
|
|
print(f"Error processing frame: {e}")
|
|
return None
|
|
|
|
def _render_with_ffmpeg_pipe(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int):
|
|
"""Render video with transformations"""
|
|
try:
|
|
# Test FFmpeg with a simple command first
|
|
try:
|
|
test_result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True, timeout=10)
|
|
if test_result.returncode != 0:
|
|
print(f"FFmpeg test failed with return code {test_result.returncode}")
|
|
print(f"FFmpeg stderr: {test_result.stderr}")
|
|
error_msg = "FFmpeg is not working properly"
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired) as e:
|
|
error_msg = f"FFmpeg not found or not working: {e}"
|
|
print(error_msg)
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
|
|
self.render_progress_queue.put(("progress", "Starting encoder...", 0.0, 0.0))
|
|
|
|
import tempfile
|
|
import os
|
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.raw')
|
|
temp_file.close()
|
|
|
|
# Use a simpler, more Windows-compatible FFmpeg command
|
|
ffmpeg_cmd = [
|
|
'ffmpeg', '-y',
|
|
'-f', 'rawvideo',
|
|
'-s', f'{output_width}x{output_height}',
|
|
'-pix_fmt', 'bgr24',
|
|
'-r', str(self.fps),
|
|
'-i', temp_file.name,
|
|
'-c:v', 'libx264',
|
|
'-preset', 'veryslow',
|
|
'-crf', '12',
|
|
'-pix_fmt', 'yuv420p',
|
|
'-profile:v', 'high',
|
|
'-level', '4.2',
|
|
'-x264-params', 'ref=5:bframes=8:deblock=1,1',
|
|
output_path
|
|
]
|
|
self.temp_file_name = temp_file.name
|
|
|
|
render_cap = cv2.VideoCapture(str(self.video_path))
|
|
render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
|
|
|
total_frames = end_frame - start_frame + 1
|
|
frames_written = 0
|
|
start_time = time.time()
|
|
last_progress_update = 0
|
|
|
|
self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0))
|
|
with open(self.temp_file_name, 'wb') as temp_file:
|
|
for i in range(total_frames):
|
|
if self.render_cancelled:
|
|
render_cap.release()
|
|
self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0))
|
|
return False
|
|
|
|
ret, frame = render_cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
processed_frame = self._process_frame_for_render(frame, output_width, output_height, start_frame + i)
|
|
if processed_frame is not None:
|
|
if i == 0:
|
|
print(f"Processed frame dimensions: {processed_frame.shape[1]}x{processed_frame.shape[0]}")
|
|
print(f"Expected dimensions: {output_width}x{output_height}")
|
|
|
|
temp_file.write(processed_frame.tobytes())
|
|
frames_written += 1
|
|
|
|
current_time = time.time()
|
|
progress = 0.1 + (0.8 * (i + 1) / total_frames)
|
|
|
|
if current_time - last_progress_update > 0.5:
|
|
elapsed = current_time - start_time
|
|
fps_rate = frames_written / elapsed if elapsed > 0 else 0
|
|
self.render_progress_queue.put(("progress", f"Processed {i+1}/{total_frames} frames", progress, fps_rate))
|
|
last_progress_update = current_time
|
|
|
|
render_cap.release()
|
|
|
|
self.render_progress_queue.put(("progress", "Encoding...", 0.9, 0.0))
|
|
|
|
# Use subprocess.run() with timeout for better Windows reliability
|
|
result = subprocess.run(
|
|
ffmpeg_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=300, # 5 minute timeout
|
|
creationflags=subprocess.CREATE_NO_WINDOW if hasattr(subprocess, 'CREATE_NO_WINDOW') else 0
|
|
)
|
|
|
|
return_code = result.returncode
|
|
stdout = result.stdout
|
|
stderr = result.stderr
|
|
|
|
# Debug output
|
|
print(f"FFmpeg return code: {return_code}")
|
|
if stdout:
|
|
print(f"FFmpeg stdout: {stdout}")
|
|
if stderr:
|
|
print(f"FFmpeg stderr: {stderr}")
|
|
|
|
if os.path.exists(self.temp_file_name):
|
|
try:
|
|
os.unlink(self.temp_file_name)
|
|
except OSError:
|
|
pass
|
|
|
|
if return_code == 0:
|
|
total_time = time.time() - start_time
|
|
avg_fps = frames_written / total_time if total_time > 0 else 0
|
|
self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames", 1.0, avg_fps))
|
|
print(f"Successfully rendered {frames_written} frames (avg {avg_fps:.1f} FPS)")
|
|
return True
|
|
else:
|
|
error_details = stderr if stderr else "No error details available"
|
|
print(f"Encoding failed with return code {return_code}")
|
|
print(f"Error: {error_details}")
|
|
self.render_progress_queue.put(("error", f"Encoding failed: {error_details}", 1.0, 0.0))
|
|
return False
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
print(f"Rendering exception: {error_msg}")
|
|
print(f"Exception type: {type(e).__name__}")
|
|
|
|
if "Errno 22" in error_msg or "invalid argument" in error_msg.lower():
|
|
error_msg = "File system error - try using a different output path"
|
|
elif "BrokenPipeError" in error_msg:
|
|
error_msg = "Process terminated unexpectedly"
|
|
elif "FileNotFoundError" in error_msg or "ffmpeg" in error_msg.lower():
|
|
error_msg = "FFmpeg not found - please install FFmpeg and ensure it's in your PATH"
|
|
|
|
self.render_progress_queue.put(("error", f"Rendering failed: {error_msg}", 1.0, 0.0))
|
|
return False
|
|
|
|
def run(self):
|
|
"""Main editor loop"""
|
|
if self.is_image_mode:
|
|
print("Image Editor Controls:")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" c: Clear crop")
|
|
print(" C: Complete reset (crop, zoom, rotation, brightness, contrast, tracking)")
|
|
print()
|
|
print("Motion Tracking:")
|
|
print(" Right-click: Add/remove tracking point (at current frame)")
|
|
print(" v: Toggle motion tracking on/off")
|
|
print(" V: Clear all tracking points")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next file")
|
|
print(" n: Previous file")
|
|
print(" Enter: Save image (overwrites if '_edited_' in name)")
|
|
print(" b: Save image as _edited_edited")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
else:
|
|
print("Video Editor Controls:")
|
|
print(" Space: Play/Pause")
|
|
print(" A/D: Seek backward/forward (1 frame)")
|
|
print(" Shift+A/D: Seek backward/forward (10 frames)")
|
|
print(" Ctrl+A/D: Seek backward/forward (60 frames)")
|
|
print(" W/S: Increase/Decrease speed")
|
|
print(" Q/Y: Increase/Decrease seek multiplier")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" c: Clear crop")
|
|
print(" C: Complete reset (crop, zoom, rotation, brightness, contrast, tracking)")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
print(" 1: Set cut start point")
|
|
print(" 2: Set cut end point")
|
|
print(" t: Toggle loop between markers")
|
|
print(" ,: Jump to previous marker")
|
|
print(" .: Jump to next marker")
|
|
print(" F: Toggle feature tracking")
|
|
print(" Shift+T: Extract features from current frame")
|
|
print(" g: Toggle auto feature extraction")
|
|
print(" G: Clear all feature data")
|
|
print(" H: Switch detector (SIFT/ORB)")
|
|
print(" o: Toggle optical flow tracking")
|
|
print(" m: Toggle template matching tracking")
|
|
print(" Shift+Right-click+drag: Extract features from selected region")
|
|
print(" Ctrl+Right-click+drag: Delete features from selected region")
|
|
print(" Ctrl+Left-click+drag: Set template region for tracking")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next video")
|
|
print(" n: Previous video")
|
|
print(" Enter: Render video (overwrites if '_edited_' in name)")
|
|
print(" b: Render video")
|
|
print(" x: Cancel render")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, self.window_width, self.window_height)
|
|
cv2.setMouseCallback(window_title, self.mouse_callback)
|
|
|
|
self.load_current_frame()
|
|
|
|
while True:
|
|
# Update auto-repeat seeking if active
|
|
self.update_auto_repeat_seek()
|
|
|
|
# Update render progress from background thread
|
|
self.update_render_progress()
|
|
|
|
# Update display
|
|
self.display_current_frame()
|
|
|
|
# Handle project view window if it exists
|
|
if self.project_view_mode and self.project_view:
|
|
# Draw project view in its own window
|
|
project_canvas = self.project_view.draw()
|
|
cv2.imshow("Project View", project_canvas)
|
|
|
|
# Calculate appropriate delay based on playback state
|
|
if self.is_playing and not self.is_image_mode:
|
|
# Use calculated frame delay for proper playback speed
|
|
delay_ms = self.calculate_frame_delay()
|
|
else:
|
|
# Use minimal delay for immediate responsiveness when not playing
|
|
delay_ms = 1
|
|
|
|
# Auto advance frame when playing (videos only)
|
|
if self.is_playing and not self.is_image_mode:
|
|
self.advance_frame()
|
|
|
|
# Key capture with appropriate delay
|
|
key = cv2.waitKey(delay_ms) & 0xFF
|
|
|
|
# Route keys based on window focus
|
|
if key != 255: # Key was pressed
|
|
active_window = get_active_window_title()
|
|
|
|
if "Project View" in active_window:
|
|
# Project view window has focus - handle project view keys
|
|
if self.project_view_mode and self.project_view:
|
|
action = self.project_view.handle_key(key)
|
|
if action == "back_to_editor":
|
|
self.toggle_project_view()
|
|
elif action == "quit":
|
|
return # Exit the main loop
|
|
elif action.startswith("open_video:"):
|
|
video_path_str = action.split(":", 1)[1]
|
|
video_path = Path(video_path_str)
|
|
self.open_video_from_project_view(video_path)
|
|
continue # Skip main window key handling
|
|
|
|
elif "Video Editor" in active_window or "Image Editor" in active_window:
|
|
# Main window has focus - handle editor keys
|
|
pass # Continue to main window key handling below
|
|
else:
|
|
# Neither window has focus, ignore key
|
|
continue
|
|
|
|
# Handle auto-repeat - stop if no key is pressed
|
|
if key == 255 and self.auto_repeat_active: # 255 means no key pressed
|
|
self.stop_auto_repeat_seek()
|
|
|
|
if key == ord("q") or key == 27: # ESC
|
|
self.stop_auto_repeat_seek()
|
|
self.save_state()
|
|
break
|
|
elif key == ord("p"): # P - Toggle project view
|
|
self.toggle_project_view()
|
|
elif key == ord(" "):
|
|
# Don't allow play/pause for images
|
|
if not self.is_image_mode:
|
|
self.stop_auto_repeat_seek() # Stop seeking when toggling play/pause
|
|
self.is_playing = not self.is_playing
|
|
elif key == ord("a") or key == ord("A"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase A (Shift+A)
|
|
if key == ord("A"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, True, False) # Shift+A: -10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, False) # A: -1 frame
|
|
elif key == ord("d") or key == ord("D"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase D (Shift+D)
|
|
if key == ord("D"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, True, False) # Shift+D: +10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, False) # D: +1 frame
|
|
elif key == 1: # Ctrl+A
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, True) # Ctrl+A: -60 frames
|
|
elif key == 4: # Ctrl+D
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, True) # Ctrl+D: +60 frames
|
|
elif key == ord(","):
|
|
# Jump to previous marker (cut start or end)
|
|
if not self.is_image_mode:
|
|
self.jump_to_previous_marker()
|
|
elif key == ord("."):
|
|
# Jump to next marker (cut start or end)
|
|
if not self.is_image_mode:
|
|
self.jump_to_next_marker()
|
|
elif key == ord("-") or key == ord("_"):
|
|
self.rotate_clockwise()
|
|
print(f"Rotated to {self.rotation_angle}°")
|
|
elif key == ord("f"):
|
|
self.toggle_fullscreen()
|
|
elif key == ord("S"): # Shift+S - Save screenshot
|
|
self.save_current_frame()
|
|
elif key == ord("w"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = min(
|
|
self.MAX_PLAYBACK_SPEED, self.playback_speed + self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("s"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = max(
|
|
self.MIN_PLAYBACK_SPEED, self.playback_speed - self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("Q"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = min(
|
|
self.MAX_SEEK_MULTIPLIER, self.seek_multiplier + self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("Y"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = max(
|
|
self.MIN_SEEK_MULTIPLIER, self.seek_multiplier - self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("e") or key == ord("E"):
|
|
# Brightness adjustment: E (increase), Shift+E (decrease)
|
|
if key == ord("E"):
|
|
self.adjust_brightness(-5)
|
|
print(f"Brightness: {self.brightness}")
|
|
else:
|
|
self.adjust_brightness(5)
|
|
print(f"Brightness: {self.brightness}")
|
|
elif key == ord("r") or key == ord("R"):
|
|
# Contrast adjustment: R (increase), Shift+R (decrease)
|
|
if key == ord("R"):
|
|
self.adjust_contrast(-0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
else:
|
|
self.adjust_contrast(0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
elif key == ord("u"):
|
|
self.undo_crop()
|
|
elif key == ord("c"):
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
self.zoom_factor = 1.0
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is cleared
|
|
elif key == ord("C"):
|
|
self.complete_reset()
|
|
elif key == ord("1"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_start_frame = self.current_frame
|
|
print(f"Set cut start at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut start is set
|
|
elif key == ord("2"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_end_frame = self.current_frame
|
|
print(f"Set cut end at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut end is set
|
|
elif key == ord("N"):
|
|
if len(self.video_files) > 1:
|
|
self.previous_video()
|
|
elif key == ord("n"):
|
|
if len(self.video_files) > 1:
|
|
self.next_video()
|
|
elif key == ord("b"):
|
|
directory = self.video_path.parent
|
|
base_name = self.video_path.stem
|
|
extension = self.video_path.suffix
|
|
|
|
# Remove any existing _edited_ suffix to get clean base name
|
|
clean_base = base_name.replace("_edited", "")
|
|
|
|
# Find next available number
|
|
counter = 1
|
|
while True:
|
|
new_name = f"{clean_base}_edited_{counter:05d}{extension}"
|
|
output_path = directory / new_name
|
|
if not output_path.exists():
|
|
break
|
|
counter += 1
|
|
|
|
success = self.render_video(str(output_path))
|
|
elif key == 13: # Enter
|
|
# Only overwrite if file already contains "_edited_" in name
|
|
print(f"DEBUG: Checking if '{self.video_path.stem}' contains '_edited_'")
|
|
if "_edited_" in self.video_path.stem:
|
|
print("DEBUG: File contains '_edited_', proceeding with overwrite")
|
|
print(f"DEBUG: Original file path: {self.video_path}")
|
|
print(f"DEBUG: Original file exists: {self.video_path.exists()}")
|
|
output_path = str(self.video_path)
|
|
|
|
# If we're overwriting the same file, use a temporary file first
|
|
import tempfile
|
|
temp_dir = self.video_path.parent
|
|
temp_fd, temp_path = tempfile.mkstemp(suffix=self.video_path.suffix, dir=temp_dir)
|
|
os.close(temp_fd) # Close the file descriptor, we just need the path
|
|
|
|
print(f"DEBUG: Created temp file: {temp_path}")
|
|
print("Rendering to temporary file first...")
|
|
|
|
success = self.render_video(temp_path)
|
|
|
|
# Store the temp path so we can replace the file when render completes
|
|
self.overwrite_temp_path = temp_path
|
|
self.overwrite_target_path = str(self.video_path)
|
|
else:
|
|
print(f"DEBUG: File '{self.video_path.stem}' does not contain '_edited_'")
|
|
print("Enter key only overwrites files with '_edited_' in the name. Use 'n' to create new files.")
|
|
elif key == ord("v"):
|
|
# Toggle motion tracking on/off
|
|
self.tracking_enabled = not self.tracking_enabled
|
|
self.show_feedback_message(f"Motion tracking {'ON' if self.tracking_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("V"):
|
|
# Clear all tracking points
|
|
self.tracking_points = {}
|
|
self.show_feedback_message("Tracking points cleared")
|
|
self.save_state()
|
|
elif key == ord("F"):
|
|
# Toggle feature tracking on/off
|
|
self.feature_tracker.tracking_enabled = not self.feature_tracker.tracking_enabled
|
|
self.show_feedback_message(f"Feature tracking {'ON' if self.feature_tracker.tracking_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("T"):
|
|
# Extract features from current frame (Shift+T)
|
|
if not self.is_image_mode and self.current_display_frame is not None:
|
|
# Extract features from the transformed frame (what user sees)
|
|
# This handles all transformations (crop, zoom, rotation) correctly
|
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
if display_frame is not None:
|
|
# Map coordinates from transformed frame to rotated frame coordinates
|
|
# Use the existing coordinate transformation system
|
|
def coord_mapper(x, y):
|
|
# The transformed frame coordinates are in the display frame space
|
|
# We need to map them to screen coordinates first, then use the existing
|
|
# _map_screen_to_rotated function
|
|
|
|
# Map from transformed frame coordinates to screen coordinates
|
|
# The transformed frame is centered on the canvas
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Convert to screen coordinates
|
|
screen_x = x + start_x
|
|
screen_y = y + start_y
|
|
|
|
# Use the existing coordinate transformation system
|
|
return self._map_screen_to_rotated(screen_x, screen_y)
|
|
|
|
success = self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper)
|
|
if success:
|
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
self.show_feedback_message(f"Extracted {count} features from visible area")
|
|
else:
|
|
self.show_feedback_message("Failed to extract features")
|
|
else:
|
|
self.show_feedback_message("No display frame available")
|
|
self.save_state()
|
|
else:
|
|
self.show_feedback_message("No frame data available")
|
|
elif key == ord("g"):
|
|
# Toggle auto tracking
|
|
self.feature_tracker.auto_tracking = not self.feature_tracker.auto_tracking
|
|
print(f"DEBUG: Auto tracking toggled to {self.feature_tracker.auto_tracking}")
|
|
self.show_feedback_message(f"Auto tracking {'ON' if self.feature_tracker.auto_tracking else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("G"):
|
|
# Clear all feature tracking data
|
|
self.feature_tracker.clear_features()
|
|
self.show_feedback_message("Feature tracking data cleared")
|
|
self.save_state()
|
|
elif key == ord("H"):
|
|
# Switch detector type (SIFT -> ORB -> SIFT) - SURF not available
|
|
current_type = self.feature_tracker.detector_type
|
|
if current_type == 'SIFT':
|
|
new_type = 'ORB'
|
|
elif current_type == 'ORB':
|
|
new_type = 'SIFT'
|
|
else:
|
|
new_type = 'SIFT'
|
|
self.feature_tracker.set_detector_type(new_type)
|
|
self.show_feedback_message(f"Detector switched to {new_type}")
|
|
self.save_state()
|
|
elif key == ord("o"):
|
|
# Toggle optical flow tracking
|
|
self.optical_flow_enabled = not self.optical_flow_enabled
|
|
print(f"DEBUG: Optical flow toggled to {self.optical_flow_enabled}")
|
|
|
|
# If enabling optical flow, fill all gaps between existing features
|
|
if self.optical_flow_enabled:
|
|
self._fill_all_gaps_with_interpolation()
|
|
|
|
self.show_feedback_message(f"Optical flow {'ON' if self.optical_flow_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("m"):
|
|
# Toggle template matching tracking
|
|
self.template_matching_enabled = not self.template_matching_enabled
|
|
print(f"DEBUG: Template matching toggled to {self.template_matching_enabled}")
|
|
self.show_feedback_message(f"Template matching {'ON' if self.template_matching_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("t"):
|
|
# Marker looping only for videos
|
|
if not self.is_image_mode:
|
|
self.toggle_marker_looping()
|
|
elif key == ord("x"):
|
|
# Cancel render if active
|
|
if self.is_rendering():
|
|
self.cancel_render()
|
|
print("Render cancellation requested")
|
|
else:
|
|
print("No render operation to cancel")
|
|
|
|
# Individual direction controls using shift combinations we can detect
|
|
elif key == ord("J"): # Shift+i - expand up
|
|
self.adjust_crop_size('up', False)
|
|
print(f"Expanded crop upward by {self.crop_size_step}px")
|
|
elif key == ord("K"): # Shift+k - expand down
|
|
self.adjust_crop_size('down', False)
|
|
print(f"Expanded crop downward by {self.crop_size_step}px")
|
|
elif key == ord("L"): # Shift+j - expand left
|
|
self.adjust_crop_size('left', False)
|
|
print(f"Expanded crop leftward by {self.crop_size_step}px")
|
|
elif key == ord("H"): # Shift+l - expand right
|
|
self.adjust_crop_size('right', False)
|
|
print(f"Expanded crop rightward by {self.crop_size_step}px")
|
|
|
|
# Contract in specific directions
|
|
elif key == ord("k"): # i - contract from bottom (reduce height from bottom)
|
|
self.adjust_crop_size('up', True)
|
|
print(f"Contracted crop from bottom by {self.crop_size_step}px")
|
|
elif key == ord("j"): # k - contract from top (reduce height from top)
|
|
self.adjust_crop_size('down', True)
|
|
print(f"Contracted crop from top by {self.crop_size_step}px")
|
|
elif key == ord("h"): # j - contract from right (reduce width from right)
|
|
self.adjust_crop_size('left', True)
|
|
print(f"Contracted crop from right by {self.crop_size_step}px")
|
|
elif key == ord("l"): # l - contract from left (reduce width from left)
|
|
self.adjust_crop_size('right', True)
|
|
print(f"Contracted crop from left by {self.crop_size_step}px")
|
|
|
|
|
|
self.save_state()
|
|
self.cleanup_render_thread()
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Fast Media Editor - Crop, Zoom, and Edit videos and images"
|
|
)
|
|
parser.add_argument(
|
|
"media", help="Path to media file or directory containing videos/images"
|
|
)
|
|
|
|
try:
|
|
args = parser.parse_args()
|
|
except SystemExit:
|
|
# If launched from context menu without arguments, this might fail
|
|
input("Argument parsing failed. Press Enter to exit...")
|
|
return
|
|
|
|
if not os.path.exists(args.media):
|
|
error_msg = f"Error: {args.media} does not exist"
|
|
print(error_msg)
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
try:
|
|
editor = VideoEditor(args.media)
|
|
editor.run()
|
|
except Exception as e:
|
|
error_msg = f"Error initializing media editor: {e}"
|
|
print(error_msg)
|
|
import traceback
|
|
traceback.print_exc() # Full error trace for debugging
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|