Introduce a new constant, CROP_DRAG_MIN_DISTANCE, to define the minimum drag distance required before applying crop adjustments. This enhancement improves the responsiveness of the crop border dragging functionality, ensuring that minor movements do not trigger unintended adjustments.
4865 lines
224 KiB
Python
4865 lines
224 KiB
Python
import os
|
|
import sys
|
|
import cv2
|
|
import argparse
|
|
import numpy as np
|
|
from pathlib import Path
|
|
from typing import List, Dict, Any
|
|
import time
|
|
import re
|
|
import threading
|
|
import json
|
|
import subprocess
|
|
import queue
|
|
import ctypes
|
|
from collections import OrderedDict
|
|
from datetime import datetime
|
|
from PIL import Image
|
|
|
|
from utils import load_image_utf8, get_active_window_title
|
|
from tracking import FeatureTracker
|
|
from capture import Cv2BufferedCap
|
|
from project_view import ProjectView
|
|
|
|
|
|
class VideoEditor:
|
|
# Configuration constants
|
|
TARGET_FPS = 80 # Target FPS for speed calculations
|
|
SPEED_INCREMENT = 0.1
|
|
MIN_PLAYBACK_SPEED = 0.05
|
|
MAX_PLAYBACK_SPEED = 1.0
|
|
|
|
# Seek multiplier configuration
|
|
SEEK_MULTIPLIER_INCREMENT = 4.0
|
|
MIN_SEEK_MULTIPLIER = 1.0
|
|
MAX_SEEK_MULTIPLIER = 1000.0
|
|
|
|
# Auto-repeat seeking configuration
|
|
AUTO_REPEAT_DISPLAY_RATE = 0.1
|
|
|
|
# Timeline configuration
|
|
TIMELINE_HEIGHT = 60
|
|
TIMELINE_MARGIN = 20
|
|
TIMELINE_BAR_HEIGHT = 12
|
|
TIMELINE_HANDLE_SIZE = 12
|
|
TIMELINE_COLOR_BG = (80, 80, 80)
|
|
TIMELINE_COLOR_PROGRESS = (0, 120, 255)
|
|
TIMELINE_COLOR_HANDLE = (255, 255, 255)
|
|
TIMELINE_COLOR_BORDER = (200, 200, 200)
|
|
TIMELINE_COLOR_CUT_POINT = (255, 0, 0)
|
|
|
|
# Progress bar configuration
|
|
PROGRESS_BAR_HEIGHT = 30
|
|
PROGRESS_BAR_MARGIN_PERCENT = 5 # 5% margin on each side
|
|
PROGRESS_BAR_TOP_MARGIN = 20 # Fixed top margin
|
|
PROGRESS_BAR_FADE_DURATION = 3.0 # seconds to fade out after completion
|
|
PROGRESS_BAR_COLOR_BG = (50, 50, 50)
|
|
PROGRESS_BAR_COLOR_FILL = (0, 255, 0) # Green when complete
|
|
PROGRESS_BAR_COLOR_PROGRESS = (0, 120, 255) # Blue during progress
|
|
PROGRESS_BAR_COLOR_BORDER = (200, 200, 200)
|
|
|
|
# Zoom and crop settings
|
|
MIN_ZOOM = 0.1
|
|
MAX_ZOOM = 10.0
|
|
ZOOM_INCREMENT = 0.1
|
|
|
|
# Supported video extensions
|
|
VIDEO_EXTENSIONS = {".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"}
|
|
|
|
# Supported image extensions
|
|
IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp", ".jp2", ".pbm", ".pgm", ".ppm", ".sr", ".ras"}
|
|
|
|
# Crop adjustment settings
|
|
CROP_SIZE_STEP = 5 # pixels to expand/contract crop
|
|
CROP_MIN_SIZE = 10 # minimum crop width/height in pixels
|
|
CROP_BORDER_DETECTION_MAX_DISTANCE = 8000 # pixels - maximum distance for border hit detection
|
|
CROP_DRAG_MIN_DISTANCE = 10 # pixels - minimum drag distance before applying crop adjustment
|
|
|
|
# Motion tracking settings
|
|
TRACKING_POINT_THRESHOLD = 10 # pixels for delete/snap radius
|
|
|
|
# Seek frame counts
|
|
SEEK_FRAMES_CTRL = 60 # Ctrl modifier: 60 frames
|
|
SEEK_FRAMES_SHIFT = 10 # Shift modifier: 10 frames
|
|
SEEK_FRAMES_DEFAULT = 1 # Default: 1 frame
|
|
|
|
# Brightness and contrast settings
|
|
MIN_BRIGHTNESS = -100
|
|
MAX_BRIGHTNESS = 100
|
|
MIN_CONTRAST = 0.1
|
|
MAX_CONTRAST = 3.0
|
|
|
|
# Image/video quality settings
|
|
JPEG_QUALITY = 95 # JPEG quality for screenshots (0-100)
|
|
IMAGE_MODE_FPS = 30 # Dummy FPS for image mode
|
|
HIGH_FPS_THRESHOLD = 60 # FPS threshold for high FPS detection
|
|
|
|
# Frame difference detection settings
|
|
FRAME_DIFFERENCE_THRESHOLD_DEFAULT = 10.0 # Percentage threshold for frame difference
|
|
FRAME_DIFFERENCE_GAP_DEFAULT = 10 # Number of frames between comparisons
|
|
FRAME_DIFFERENCE_PIXEL_THRESHOLD = 30 # Pixel threshold for binary thresholding
|
|
|
|
# Template matching settings
|
|
TEMPLATE_MATCH_HISTORY_SIZE = 20 # Number of recent matches to keep
|
|
TEMPLATE_MATCH_AVERAGE_SIZE = 10 # Number of recent matches for average calculation
|
|
TEMPLATE_MATCH_MIN_THRESHOLD = 0.3 # Minimum confidence threshold
|
|
TEMPLATE_MATCH_AVERAGE_FACTOR = 0.8 # Factor for adaptive threshold (80% of average)
|
|
TEMPLATE_MATCH_DEFAULT_THRESHOLD = 0.5 # Default confidence threshold
|
|
|
|
# Search/update intervals
|
|
INTERESTING_POINT_SEARCH_UPDATE_INTERVAL = 10 # Frames between progress updates
|
|
|
|
# UI display settings
|
|
FONT_SCALE_SMALL = 0.5 # Small font scale for UI text
|
|
OVERLAY_ALPHA_LOW = 0.3 # Low alpha for transparent overlays
|
|
OVERLAY_ALPHA_HIGH = 0.7 # High alpha for semi-transparent overlays
|
|
|
|
def __init__(self, path: str):
|
|
self.path = Path(path)
|
|
|
|
# Video file management
|
|
self.video_files = []
|
|
self.current_video_index = 0
|
|
|
|
# Media type tracking
|
|
self.is_image_mode = False # True if current file is an image
|
|
|
|
# Determine if path is file or directory
|
|
if self.path.is_file():
|
|
self.video_files = [self.path]
|
|
elif self.path.is_dir():
|
|
# Load all media files from directory
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if not self.video_files:
|
|
raise ValueError(f"No media files found in directory: {path}")
|
|
else:
|
|
raise ValueError(f"Path does not exist: {path}")
|
|
|
|
# Mouse and keyboard interaction
|
|
self.mouse_dragging = False
|
|
self.timeline_rect = None
|
|
self.window_width = 1920 # Increased to accommodate 1080p videos
|
|
self.window_height = 1200
|
|
|
|
# Auto-repeat seeking state
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
self.last_display_update = 0
|
|
|
|
# Crop settings
|
|
self.crop_rect = None # (x, y, width, height)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
self.crop_history = [] # For undo
|
|
self.crop_border_dragging = False
|
|
self.crop_border_drag_start_pos = None # (screen_x, screen_y) when drag started
|
|
self.crop_border_drag_start_rect = None # (x, y, w, h) in rotated coords when drag started
|
|
self.crop_border_drag_inside = None # True if drag started inside crop area, False if outside
|
|
self.crop_border_drag_outside_side = None # 'left', 'right', 'top', 'bottom' if outside
|
|
|
|
# Zoom settings
|
|
self.zoom_factor = 1.0
|
|
self.zoom_center = None # (x, y) center point for zoom
|
|
|
|
# Rotation settings
|
|
self.rotation_angle = 0 # 0, 90, 180, 270 degrees
|
|
|
|
# Brightness and contrast settings
|
|
self.brightness = 0 # -100 to 100
|
|
self.contrast = 1.0 # 0.1 to 3.0
|
|
|
|
# Marker looping state
|
|
self.looping_between_markers = False
|
|
|
|
# Display offset for panning when zoomed
|
|
self.display_offset = [0, 0]
|
|
|
|
# Fullscreen state
|
|
self.is_fullscreen = False
|
|
|
|
# Progress bar state
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_progress = 0.0 # 0.0 to 1.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = ""
|
|
self.progress_bar_fps = 0.0 # Current rendering FPS
|
|
|
|
# Feedback message state
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
self.feedback_message_duration = 0.2 # seconds to show message
|
|
|
|
# Crop adjustment settings
|
|
self.crop_size_step = self.CROP_SIZE_STEP
|
|
|
|
# Render thread management
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
self.render_progress_queue = queue.Queue()
|
|
self.ffmpeg_process = None # Track FFmpeg process for cancellation
|
|
|
|
# Display optimization - track when redraw is needed
|
|
self.display_needs_update = True
|
|
self.last_display_state = None
|
|
|
|
# Cached transformations for performance
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
# Motion tracking state
|
|
self.tracking_points = {} # {frame_number: [(x, y), ...]} in original frame coords
|
|
self.tracking_enabled = False
|
|
|
|
# Feature tracking system
|
|
self.feature_tracker = FeatureTracker()
|
|
|
|
# Initialize selective feature extraction/deletion
|
|
self.selective_feature_extraction_start = None
|
|
self.selective_feature_extraction_rect = None
|
|
self.selective_feature_deletion_start = None
|
|
self.selective_feature_deletion_rect = None
|
|
|
|
# Optical flow tracking
|
|
self.optical_flow_enabled = False
|
|
self.previous_frame_for_flow = None
|
|
|
|
# Template matching tracking
|
|
self.template_match_history = [] # Store recent match confidences for adaptive thresholding
|
|
# (x, y, w, h) in rotated frame coordinates
|
|
self.template_selection_start = None
|
|
self.template_selection_rect = None
|
|
|
|
# Simple template system - list of (start_frame, region, template_image) tuples sorted by start_frame
|
|
self.templates = [] # [(start_frame, region, template_image), ...] sorted by start_frame
|
|
|
|
# Template matching modes
|
|
self.template_matching_full_frame = False # Toggle for full frame vs cropped template matching
|
|
|
|
# Frame difference for interesting point detection
|
|
self.frame_difference_threshold = self.FRAME_DIFFERENCE_THRESHOLD_DEFAULT
|
|
self.frame_difference_gap = self.FRAME_DIFFERENCE_GAP_DEFAULT
|
|
|
|
# Region selection for interesting point detection
|
|
self.interesting_region = None # (x, y, width, height) or None for full frame
|
|
self.selecting_interesting_region = False
|
|
self.region_selection_start = None
|
|
self.region_selection_current = None
|
|
|
|
# Search state for interesting point detection
|
|
self.searching_interesting_point = False
|
|
self.search_progress_text = ""
|
|
self.search_progress_percent = 0.0
|
|
self.search_state = None # For non-blocking search state
|
|
|
|
# Project view mode
|
|
self.project_view_mode = False
|
|
self.project_view = None
|
|
|
|
# Initialize with first video
|
|
self._load_video(self.video_files[0])
|
|
|
|
# Load saved state after all attributes are initialized
|
|
self.load_state()
|
|
|
|
def _get_state_file_path(self) -> Path:
|
|
"""Get the state file path for the current media file"""
|
|
if not hasattr(self, 'video_path') or not self.video_path:
|
|
return None
|
|
state_path = self.video_path.with_suffix('.json')
|
|
return state_path
|
|
|
|
def save_state(self):
|
|
"""Save current editor state to JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
|
|
try:
|
|
state = {
|
|
'timestamp': time.time(),
|
|
'current_frame': getattr(self, 'current_frame', 0),
|
|
'crop_rect': self.crop_rect,
|
|
'zoom_factor': self.zoom_factor,
|
|
'zoom_center': self.zoom_center,
|
|
'rotation_angle': self.rotation_angle,
|
|
'brightness': self.brightness,
|
|
'contrast': self.contrast,
|
|
'cut_start_frame': self.cut_start_frame,
|
|
'cut_end_frame': self.cut_end_frame,
|
|
'looping_between_markers': self.looping_between_markers,
|
|
'display_offset': self.display_offset,
|
|
'playback_speed': getattr(self, 'playback_speed', 1.0),
|
|
'seek_multiplier': getattr(self, 'seek_multiplier', 1.0),
|
|
'is_playing': getattr(self, 'is_playing', False),
|
|
'tracking_enabled': self.tracking_enabled,
|
|
'tracking_points': {str(k): v for k, v in self.tracking_points.items()},
|
|
'feature_tracker': self.feature_tracker.get_state_dict(),
|
|
'template_matching_full_frame': self.template_matching_full_frame,
|
|
'frame_difference_threshold': self.frame_difference_threshold,
|
|
'frame_difference_gap': self.frame_difference_gap,
|
|
'interesting_region': self.interesting_region,
|
|
'templates': [{
|
|
'start_frame': start_frame,
|
|
'region': region
|
|
} for start_frame, region, template_image in self.templates]
|
|
}
|
|
|
|
with open(state_file, 'w') as f:
|
|
json.dump(state, f, indent=2)
|
|
print(f"State saved to {state_file}")
|
|
|
|
# Also save dated copy
|
|
iso_date = datetime.now().isoformat().replace(':', '-').split('.')[0]
|
|
dated_state_file = self.video_path.parent / f"{self.video_path.stem}-{iso_date}.json"
|
|
with open(dated_state_file, 'w') as f:
|
|
json.dump(state, f, indent=2)
|
|
|
|
# Refresh project view progress data if project view is active
|
|
if self.project_view_mode and self.project_view:
|
|
self.project_view.refresh_progress_data()
|
|
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error saving state: {e}")
|
|
return False
|
|
|
|
def load_state(self) -> bool:
|
|
"""Load editor state from JSON file"""
|
|
state_file = self._get_state_file_path()
|
|
if not state_file:
|
|
print("No state file path available")
|
|
return False
|
|
if not state_file.exists():
|
|
print(f"State file does not exist: {state_file}")
|
|
return False
|
|
|
|
print(f"Loading state from: {state_file}")
|
|
try:
|
|
with open(state_file, 'r') as f:
|
|
state = json.load(f)
|
|
|
|
print(f"State file contents: {state}")
|
|
|
|
# Restore state values
|
|
if 'current_frame' in state:
|
|
self.current_frame = state['current_frame']
|
|
print(f"Loaded current_frame: {self.current_frame}")
|
|
if 'crop_rect' in state and state['crop_rect'] is not None:
|
|
self.crop_rect = tuple(state['crop_rect'])
|
|
print(f"Loaded crop_rect: {self.crop_rect}")
|
|
if 'zoom_factor' in state:
|
|
self.zoom_factor = state['zoom_factor']
|
|
print(f"Loaded zoom_factor: {self.zoom_factor}")
|
|
if 'zoom_center' in state and state['zoom_center'] is not None:
|
|
self.zoom_center = tuple(state['zoom_center'])
|
|
print(f"Loaded zoom_center: {self.zoom_center}")
|
|
if 'rotation_angle' in state:
|
|
self.rotation_angle = state['rotation_angle']
|
|
print(f"Loaded rotation_angle: {self.rotation_angle}")
|
|
if 'brightness' in state:
|
|
self.brightness = state['brightness']
|
|
print(f"Loaded brightness: {self.brightness}")
|
|
if 'contrast' in state:
|
|
self.contrast = state['contrast']
|
|
print(f"Loaded contrast: {self.contrast}")
|
|
if 'cut_start_frame' in state:
|
|
self.cut_start_frame = state['cut_start_frame']
|
|
print(f"Loaded cut_start_frame: {self.cut_start_frame}")
|
|
if 'cut_end_frame' in state:
|
|
self.cut_end_frame = state['cut_end_frame']
|
|
print(f"Loaded cut_end_frame: {self.cut_end_frame}")
|
|
if 'looping_between_markers' in state:
|
|
self.looping_between_markers = state['looping_between_markers']
|
|
print(f"Loaded looping_between_markers: {self.looping_between_markers}")
|
|
if 'display_offset' in state:
|
|
self.display_offset = state['display_offset']
|
|
print(f"Loaded display_offset: {self.display_offset}")
|
|
if 'playback_speed' in state:
|
|
self.playback_speed = state['playback_speed']
|
|
print(f"Loaded playback_speed: {self.playback_speed}")
|
|
if 'seek_multiplier' in state:
|
|
self.seek_multiplier = state['seek_multiplier']
|
|
print(f"Loaded seek_multiplier: {self.seek_multiplier}")
|
|
if 'is_playing' in state:
|
|
self.is_playing = state['is_playing']
|
|
print(f"Loaded is_playing: {self.is_playing}")
|
|
if 'tracking_enabled' in state:
|
|
self.tracking_enabled = state['tracking_enabled']
|
|
print(f"Loaded tracking_enabled: {self.tracking_enabled}")
|
|
if 'tracking_points' in state and isinstance(state['tracking_points'], dict):
|
|
self.tracking_points = {int(k): v for k, v in state['tracking_points'].items()}
|
|
print(f"Loaded tracking_points: {sum(len(v) for v in self.tracking_points.values())} points")
|
|
|
|
# Load feature tracker state
|
|
if 'feature_tracker' in state:
|
|
self.feature_tracker.load_state_dict(state['feature_tracker'])
|
|
print(f"Loaded feature tracker state")
|
|
|
|
# Load template matching state
|
|
if 'template_matching_full_frame' in state:
|
|
self.template_matching_full_frame = state['template_matching_full_frame']
|
|
|
|
# Load frame difference threshold
|
|
if 'frame_difference_threshold' in state:
|
|
self.frame_difference_threshold = state['frame_difference_threshold']
|
|
print(f"Loaded frame difference threshold: {self.frame_difference_threshold:.1f}%")
|
|
|
|
# Load frame difference gap
|
|
if 'frame_difference_gap' in state:
|
|
self.frame_difference_gap = state['frame_difference_gap']
|
|
print(f"Loaded frame difference gap: {self.frame_difference_gap} frames")
|
|
|
|
# Load interesting region
|
|
if 'interesting_region' in state and state['interesting_region'] is not None:
|
|
self.interesting_region = tuple(state['interesting_region'])
|
|
x, y, w, h = self.interesting_region
|
|
print(f"Loaded interesting region: ({x}, {y}, {w}, {h})")
|
|
else:
|
|
self.interesting_region = None
|
|
|
|
# Load simple templates state
|
|
if 'templates' in state:
|
|
self.templates = []
|
|
for template_data in state['templates']:
|
|
start_frame = template_data['start_frame']
|
|
region = template_data['region']
|
|
# We'll recreate the template image when needed
|
|
self.templates.append((start_frame, region, None))
|
|
# Sort by start_frame
|
|
self.templates.sort(key=lambda x: x[0])
|
|
print(f"Loaded {len(self.templates)} templates")
|
|
|
|
# Recreate template images by seeking to capture frames
|
|
self._recreate_template_images()
|
|
|
|
# Validate cut markers against current video length
|
|
if self.cut_start_frame is not None and self.cut_start_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_start_frame {self.cut_start_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_start_frame = None
|
|
if self.cut_end_frame is not None and self.cut_end_frame >= self.total_frames:
|
|
print(f"DEBUG: cut_end_frame {self.cut_end_frame} is beyond video length {self.total_frames}, clearing")
|
|
self.cut_end_frame = None
|
|
|
|
# Calculate and show marker positions on timeline
|
|
if self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
start_progress = self.cut_start_frame / max(1, self.total_frames - 1)
|
|
end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
print(f"Markers will be drawn at: Start {start_progress:.4f} ({self.cut_start_frame}/{self.total_frames}), End {end_progress:.4f} ({self.cut_end_frame}/{self.total_frames})")
|
|
|
|
# Validate and clamp values
|
|
self.current_frame = max(0, min(self.current_frame, getattr(self, 'total_frames', 1) - 1))
|
|
self.zoom_factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, self.zoom_factor))
|
|
self.brightness = max(self.MIN_BRIGHTNESS, min(self.MAX_BRIGHTNESS, self.brightness))
|
|
self.contrast = max(self.MIN_CONTRAST, min(self.MAX_CONTRAST, self.contrast))
|
|
self.playback_speed = max(self.MIN_PLAYBACK_SPEED, min(self.MAX_PLAYBACK_SPEED, self.playback_speed))
|
|
self.seek_multiplier = max(self.MIN_SEEK_MULTIPLIER, min(self.MAX_SEEK_MULTIPLIER, self.seek_multiplier))
|
|
|
|
# Apply loaded settings
|
|
self.clear_transformation_cache()
|
|
self.load_current_frame()
|
|
|
|
print("Successfully loaded and applied all settings from state file")
|
|
return True
|
|
except Exception as e:
|
|
print(f"Error loading state: {e}")
|
|
return False
|
|
|
|
def _is_video_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported video format"""
|
|
return file_path.suffix.lower() in self.VIDEO_EXTENSIONS
|
|
|
|
def _is_image_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported image format"""
|
|
return file_path.suffix.lower() in self.IMAGE_EXTENSIONS
|
|
|
|
def _is_media_file(self, file_path: Path) -> bool:
|
|
"""Check if file is a supported media format (video or image)"""
|
|
return self._is_video_file(file_path) or self._is_image_file(file_path)
|
|
|
|
|
|
def _get_next_screenshot_filename(self, video_path: Path) -> str:
|
|
"""Generate the next available screenshot filename: video_frame_00001.jpg, video_frame_00002.jpg, etc."""
|
|
directory = video_path.parent
|
|
base_name = video_path.stem
|
|
|
|
# Pattern to match existing screenshot files: video_frame_00001.jpg, video_frame_00002.jpg, etc.
|
|
pattern = re.compile(rf"^{re.escape(base_name)}_frame_(\d{{5}})\.(jpg|jpeg|png)$")
|
|
|
|
existing_numbers = set()
|
|
for file_path in directory.iterdir():
|
|
if file_path.is_file():
|
|
match = pattern.match(file_path.name)
|
|
if match:
|
|
existing_numbers.add(int(match.group(1)))
|
|
|
|
# Find the next available number starting from 1
|
|
next_number = 1
|
|
while next_number in existing_numbers:
|
|
next_number += 1
|
|
|
|
return f"{base_name}_frame_{next_number:05d}.jpg"
|
|
|
|
def save_current_frame(self):
|
|
"""Save the current frame as a screenshot"""
|
|
if self.current_display_frame is None:
|
|
print("No frame to save")
|
|
return False
|
|
|
|
# Generate the next available screenshot filename
|
|
screenshot_name = self._get_next_screenshot_filename(self.video_path)
|
|
screenshot_path = self.video_path.parent / screenshot_name
|
|
|
|
# Apply current transformations (crop, zoom, rotation, brightness/contrast) to the frame
|
|
processed_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame.copy())
|
|
|
|
if processed_frame is not None:
|
|
# Save the processed frame with high quality settings
|
|
success = cv2.imwrite(str(screenshot_path), processed_frame, [cv2.IMWRITE_JPEG_QUALITY, self.JPEG_QUALITY])
|
|
if success:
|
|
print(f"Screenshot saved: {screenshot_name}")
|
|
self.show_feedback_message(f"Screenshot saved: {screenshot_name}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save screenshot to {screenshot_path}")
|
|
self.show_feedback_message("Error: Could not save screenshot")
|
|
return False
|
|
else:
|
|
print("Error: Could not process frame for screenshot")
|
|
self.show_feedback_message("Error: Could not process frame")
|
|
return False
|
|
|
|
def _get_media_files_from_directory(self, directory: Path) -> List[Path]:
|
|
"""Get all media files (video and image) from a directory, sorted by name"""
|
|
media_files = set()
|
|
for file_path in directory.iterdir():
|
|
if (
|
|
file_path.is_file()
|
|
and self._is_media_file(file_path)
|
|
):
|
|
media_files.add(file_path)
|
|
|
|
# Pattern to match edited files: basename_edited_001.ext, basename_edited_002.ext, etc.
|
|
edited_pattern = re.compile(r"^(.+)_edited_\d{3}$")
|
|
|
|
edited_base_names = set()
|
|
for file_path in media_files:
|
|
match = edited_pattern.match(file_path.stem)
|
|
if match:
|
|
edited_base_names.add(match.group(1))
|
|
|
|
non_edited_media = set()
|
|
for file_path in media_files:
|
|
# Skip if this is an edited file
|
|
if edited_pattern.match(file_path.stem):
|
|
continue
|
|
|
|
# Skip if there's already an edited version of this file
|
|
if file_path.stem in edited_base_names:
|
|
continue
|
|
|
|
non_edited_media.add(file_path)
|
|
|
|
return sorted(non_edited_media)
|
|
def _load_video(self, media_path: Path):
|
|
"""Load a media file (video or image) and initialize properties"""
|
|
if hasattr(self, "cap") and self.cap:
|
|
self.cap.release()
|
|
|
|
|
|
self.video_path = media_path
|
|
self.is_image_mode = self._is_image_file(media_path)
|
|
|
|
if self.is_image_mode:
|
|
# Load static image with UTF-8 support
|
|
self.static_image = load_image_utf8(media_path)
|
|
|
|
# Set up image properties to mimic video interface
|
|
self.frame_height, self.frame_width = self.static_image.shape[:2]
|
|
self.total_frames = 1
|
|
self.fps = self.IMAGE_MODE_FPS
|
|
self.cap = None
|
|
|
|
print(f"Loaded image: {self.video_path.name}")
|
|
print(f" Resolution: {self.frame_width}x{self.frame_height}")
|
|
else:
|
|
# Try different backends for better performance
|
|
# Order of preference: FFmpeg (best for video files), DirectShow (cameras), any available
|
|
backends_to_try = []
|
|
if hasattr(cv2, 'CAP_FFMPEG'): # FFmpeg - best for video files
|
|
backends_to_try.append(cv2.CAP_FFMPEG)
|
|
if hasattr(cv2, 'CAP_DSHOW'): # DirectShow - usually for cameras
|
|
backends_to_try.append(cv2.CAP_DSHOW)
|
|
backends_to_try.append(cv2.CAP_ANY) # Fallback
|
|
|
|
self.cap = None
|
|
for backend in backends_to_try:
|
|
try:
|
|
self.cap = Cv2BufferedCap(self.video_path, backend)
|
|
if self.cap.isOpened():
|
|
break
|
|
except Exception:
|
|
continue
|
|
|
|
if not self.cap or not self.cap.isOpened():
|
|
raise ValueError(f"Could not open video file: {media_path}")
|
|
|
|
# Video properties from buffered cap
|
|
self.total_frames = self.cap.total_frames
|
|
self.fps = self.cap.fps
|
|
self.frame_width = self.cap.frame_width
|
|
self.frame_height = self.cap.frame_height
|
|
|
|
# Get codec information for debugging
|
|
fourcc = int(self.cap.cap.get(cv2.CAP_PROP_FOURCC))
|
|
codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
|
|
|
|
# Get backend information
|
|
backend_name = "FFmpeg" if hasattr(cv2, 'CAP_FFMPEG') and backend == cv2.CAP_FFMPEG else "Other"
|
|
|
|
print(f"Loaded video: {self.current_video_index + 1}/{len(self.video_files)}")
|
|
print(f" Codec: {codec} | Backend: {backend_name} | Resolution: {self.frame_width}x{self.frame_height}")
|
|
print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s")
|
|
|
|
# Performance warning for known problematic cases
|
|
if codec in ['H264', 'H.264', 'AVC1', 'avc1'] and self.total_frames > 10000:
|
|
print(" Warning: Large H.264 video detected - seeking may be slow")
|
|
if self.frame_width * self.frame_height > 1920 * 1080:
|
|
print(" Warning: High resolution video - decoding may be slow")
|
|
if self.fps > self.HIGH_FPS_THRESHOLD:
|
|
print(" Warning: High framerate video - may impact playback smoothness")
|
|
|
|
# Set default values for video-specific properties
|
|
self.current_frame = 0
|
|
self.is_playing = False if self.is_image_mode else False # Images start paused
|
|
self.playback_speed = 1.0
|
|
self.seek_multiplier = 1.0
|
|
self.cut_start_frame = None
|
|
self.cut_end_frame = None
|
|
|
|
# Always reset these regardless of state
|
|
self.current_display_frame = None
|
|
|
|
def switch_to_video(self, index: int):
|
|
"""Switch to a specific video by index"""
|
|
if 0 <= index < len(self.video_files):
|
|
self.current_video_index = index
|
|
self._load_video(self.video_files[index])
|
|
self.load_current_frame()
|
|
|
|
def next_video(self):
|
|
"""Switch to the next video"""
|
|
self.save_state() # Save current video state before switching
|
|
next_index = (self.current_video_index + 1) % len(self.video_files)
|
|
self.switch_to_video(next_index)
|
|
|
|
def previous_video(self):
|
|
"""Switch to the previous video"""
|
|
self.save_state() # Save current video state before switching
|
|
prev_index = (self.current_video_index - 1) % len(self.video_files)
|
|
self.switch_to_video(prev_index)
|
|
|
|
def load_current_frame(self) -> bool:
|
|
"""Load the current frame into display cache"""
|
|
if self.is_image_mode:
|
|
# For images, just copy the static image
|
|
self.current_display_frame = self.static_image.copy()
|
|
return True
|
|
else:
|
|
# Use buffered cap to get frame
|
|
try:
|
|
self.current_display_frame = self.cap.get_frame(self.current_frame)
|
|
return True
|
|
except Exception as e:
|
|
print(f"Failed to load frame {self.current_frame}: {e}")
|
|
return False
|
|
|
|
def calculate_frame_delay(self) -> int:
|
|
"""Calculate frame delay in milliseconds based on playback speed"""
|
|
# Round to 2 decimals to handle floating point precision issues
|
|
speed = round(self.playback_speed, 2)
|
|
# print(f"Playback speed: {speed}")
|
|
if speed >= 1.0:
|
|
# Speed >= 1: maximum FPS (no delay)
|
|
return 1
|
|
else:
|
|
# Speed < 1: scale FPS based on speed
|
|
# Formula: fps = TARGET_FPS * speed, so delay = 1000 / fps
|
|
target_fps = self.TARGET_FPS * speed
|
|
delay_ms = int(1000 / target_fps)
|
|
return max(1, delay_ms)
|
|
|
|
def seek_video(self, frames_delta: int):
|
|
"""Seek video by specified number of frames"""
|
|
target_frame = max(
|
|
0, min(self.current_frame + frames_delta, self.total_frames - 1)
|
|
)
|
|
self.current_frame = target_frame
|
|
self.load_current_frame()
|
|
self.display_needs_update = True
|
|
|
|
|
|
def seek_video_with_modifier(
|
|
self, direction: int, shift_pressed: bool, ctrl_pressed: bool
|
|
):
|
|
"""Seek video with different frame counts based on modifiers and seek multiplier"""
|
|
if ctrl_pressed:
|
|
base_frames = self.SEEK_FRAMES_CTRL
|
|
elif shift_pressed:
|
|
base_frames = self.SEEK_FRAMES_SHIFT
|
|
else:
|
|
base_frames = self.SEEK_FRAMES_DEFAULT
|
|
|
|
# Apply seek multiplier to the base frame count
|
|
frames = direction * int(base_frames * self.seek_multiplier)
|
|
self.seek_video(frames)
|
|
|
|
def seek_video_exact_frame(self, direction: int):
|
|
"""Seek video by exactly 1 frame, unaffected by seek multiplier"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
frames = direction # Always exactly 1 frame
|
|
self.seek_video(frames)
|
|
|
|
def start_auto_repeat_seek(self, direction: int, shift_pressed: bool, ctrl_pressed: bool):
|
|
"""Start auto-repeat seeking"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
self.auto_repeat_active = True
|
|
self.auto_repeat_direction = direction
|
|
self.auto_repeat_shift = shift_pressed
|
|
self.auto_repeat_ctrl = ctrl_pressed
|
|
|
|
# Initialize last_display_update to prevent immediate auto-repeat
|
|
self.last_display_update = time.time()
|
|
|
|
self.seek_video_with_modifier(direction, shift_pressed, ctrl_pressed)
|
|
|
|
def stop_auto_repeat_seek(self):
|
|
"""Stop auto-repeat seeking"""
|
|
self.auto_repeat_active = False
|
|
self.auto_repeat_direction = 0
|
|
self.auto_repeat_shift = False
|
|
self.auto_repeat_ctrl = False
|
|
|
|
def update_auto_repeat_seek(self):
|
|
"""Update auto-repeat seeking"""
|
|
if not self.auto_repeat_active or self.is_image_mode:
|
|
return
|
|
|
|
current_time = time.time()
|
|
|
|
if current_time - self.last_display_update >= self.AUTO_REPEAT_DISPLAY_RATE:
|
|
self.seek_video_with_modifier(
|
|
self.auto_repeat_direction,
|
|
self.auto_repeat_shift,
|
|
self.auto_repeat_ctrl
|
|
)
|
|
self.last_display_update = current_time
|
|
|
|
|
|
def seek_to_frame(self, frame_number: int):
|
|
"""Seek to specific frame"""
|
|
old_frame = self.current_frame
|
|
self.current_frame = max(0, min(frame_number, self.total_frames - 1))
|
|
self.load_current_frame()
|
|
|
|
# Only log when we actually change frames
|
|
if old_frame != self.current_frame:
|
|
print(f"DEBUG: === LOADED NEW FRAME {self.current_frame} ===")
|
|
print(f"DEBUG: Features available for frames: {sorted(self.feature_tracker.features.keys())}")
|
|
if self.current_frame in self.feature_tracker.features:
|
|
feature_count = len(self.feature_tracker.features[self.current_frame]['positions'])
|
|
print(f"DEBUG: Frame {self.current_frame} has {feature_count} features")
|
|
else:
|
|
print(f"DEBUG: Frame {self.current_frame} has NO features")
|
|
|
|
# Select the best template for the new frame
|
|
if self.templates:
|
|
self._select_best_template_for_frame(self.current_frame)
|
|
|
|
# Auto-extract features if feature tracking is enabled and auto-tracking is on
|
|
print(f"DEBUG: seek_to_frame {frame_number}: is_image_mode={self.is_image_mode}, tracking_enabled={self.feature_tracker.tracking_enabled}, auto_tracking={self.feature_tracker.auto_tracking}, display_frame={self.current_display_frame is not None}")
|
|
|
|
if (not self.is_image_mode and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.feature_tracker.auto_tracking and
|
|
self.current_display_frame is not None):
|
|
|
|
print(f"DEBUG: Auto-tracking conditions met for frame {self.current_frame}")
|
|
# Only extract if we don't already have features for this frame
|
|
if self.current_frame not in self.feature_tracker.features:
|
|
print(f"DEBUG: Extracting features for frame {self.current_frame}")
|
|
# Extract features from the transformed frame (what user sees)
|
|
# This handles all transformations (crop, zoom, rotation) correctly
|
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
if display_frame is not None:
|
|
# Map coordinates from transformed frame to rotated frame coordinates
|
|
# Use the existing coordinate transformation system
|
|
def coord_mapper(x, y):
|
|
# Map from transformed frame coordinates to screen coordinates
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Convert to screen coordinates
|
|
screen_x = x + start_x
|
|
screen_y = y + start_y
|
|
|
|
# Use the existing coordinate transformation system
|
|
return self._map_screen_to_rotated(screen_x, screen_y)
|
|
|
|
self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper)
|
|
else:
|
|
print(f"DEBUG: Frame {self.current_frame} already has features, skipping")
|
|
|
|
# Optical flow tracking - track features from previous frame
|
|
if (not self.is_image_mode and
|
|
self.optical_flow_enabled and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.previous_frame_for_flow is not None and
|
|
self.current_display_frame is not None):
|
|
|
|
self._track_with_optical_flow()
|
|
|
|
|
|
# Store current frame for next optical flow iteration
|
|
if not self.is_image_mode and self.current_display_frame is not None:
|
|
self.previous_frame_for_flow = self.current_display_frame.copy()
|
|
|
|
def jump_to_previous_marker(self):
|
|
"""Jump to the previous tracking marker (frame with tracking points)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v)
|
|
if not tracking_frames:
|
|
print("DEBUG: No tracking markers; prev jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
candidates = [f for f in tracking_frames if f < current]
|
|
if candidates:
|
|
target = candidates[-1]
|
|
print(f"DEBUG: Jump prev tracking from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
else:
|
|
target = tracking_frames[0]
|
|
print(f"DEBUG: Jump prev tracking to first marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
|
|
def jump_to_next_marker(self):
|
|
"""Jump to the next tracking marker (frame with tracking points)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v)
|
|
if not tracking_frames:
|
|
print("DEBUG: No tracking markers; next jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
for f in tracking_frames:
|
|
if f > current:
|
|
print(f"DEBUG: Jump next tracking from {current} -> {f}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(f)
|
|
return
|
|
target = tracking_frames[-1]
|
|
print(f"DEBUG: Jump next tracking to last marker from {current} -> {target}; tracking_frames={tracking_frames}")
|
|
self.seek_to_frame(target)
|
|
|
|
def continue_interesting_point_search(self):
|
|
"""Continue non-blocking search for interesting point - called from main loop"""
|
|
if not self.search_state or self.search_state.get('completed', False):
|
|
return
|
|
|
|
try:
|
|
# Process a small number of steps per call
|
|
steps_per_call = 3
|
|
update_interval = 5
|
|
|
|
for _ in range(steps_per_call):
|
|
if self.search_state['target_frame'] >= self.total_frames:
|
|
# End of video reached
|
|
self.search_state['completed'] = True
|
|
print("Reached end of video")
|
|
break
|
|
|
|
# Read comparison frame
|
|
comparison_frame_num = min(self.search_state['target_frame'] + self.frame_difference_gap, self.total_frames - 1)
|
|
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, comparison_frame_num)
|
|
ret, comparison_frame = self.cap.cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
self.search_state['frames_checked'] += 1
|
|
|
|
# Calculate difference using full resolution
|
|
diff_percentage = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
|
|
|
|
# Update OSD
|
|
if self.search_state['frames_checked'] % update_interval == 0 or diff_percentage >= self.frame_difference_threshold:
|
|
progress_percent = (self.search_state['frames_checked'] / max(1, (self.total_frames - self.search_state['current_frame_backup']) // self.frame_difference_gap)) * 100
|
|
self.search_progress_percent = progress_percent
|
|
self.search_progress_text = f"Gap search: {self.search_state['target_frame']}↔{comparison_frame_num} ({diff_percentage:.1f}% change, gap: {self.frame_difference_gap}) - Press ; to cancel"
|
|
|
|
# Update display frame
|
|
self.current_frame = comparison_frame_num
|
|
self.current_display_frame = comparison_frame
|
|
self.display_needs_update = True
|
|
|
|
# Check if found interesting point
|
|
if diff_percentage >= self.frame_difference_threshold:
|
|
full_diff = self.calculate_frame_difference(self.search_state['base_frame'], comparison_frame)
|
|
print(f"Found interesting point between frames {self.search_state['target_frame']} and {comparison_frame_num} ({full_diff:.1f}% change)")
|
|
self.show_feedback_message(f"Interesting: {full_diff:.1f}% change over {self.frame_difference_gap} frames")
|
|
|
|
self.current_frame = comparison_frame_num
|
|
self.current_display_frame = comparison_frame
|
|
|
|
# Clean up search state
|
|
self.search_state = None
|
|
self.searching_interesting_point = False
|
|
self.search_progress_text = ""
|
|
self.display_needs_update = True
|
|
self.tracking_points.setdefault(comparison_frame_num, []).append((self.frame_width // 2, self.frame_height // 2))
|
|
self.go_to_next_interesting_point()
|
|
return
|
|
|
|
# Move to next comparison
|
|
self.search_state['target_frame'] += self.frame_difference_gap
|
|
self.search_state['base_frame'] = comparison_frame.copy() if comparison_frame is not None else None
|
|
|
|
except Exception as e:
|
|
print(f"Error during search: {e}")
|
|
self.search_state['completed'] = True
|
|
|
|
def go_to_next_interesting_point(self):
|
|
"""Go to the next frame where the difference from the previous frame exceeds the threshold"""
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
self.stop_auto_repeat_seek()
|
|
|
|
if self.current_frame >= self.total_frames - 1:
|
|
print("Already at last frame")
|
|
return
|
|
|
|
# Store current frame for comparison
|
|
current_frame_backup = self.current_frame
|
|
current_display_frame = self.current_display_frame.copy() if self.current_display_frame is not None else None
|
|
|
|
print(f"Searching for next interesting point from frame {current_frame_backup + 1} with threshold {self.frame_difference_threshold:.1f}% (gap: {self.frame_difference_gap} frames)")
|
|
|
|
# Initialize search state for main loop processing instead of blocking
|
|
self.search_state = {
|
|
'current_frame_backup': current_frame_backup,
|
|
'target_frame': current_frame_backup + 1,
|
|
'frames_checked': 0,
|
|
'base_frame': None,
|
|
'base_frame_num': None,
|
|
'search_cancelled': False,
|
|
'update_interval': self.INTERESTING_POINT_SEARCH_UPDATE_INTERVAL
|
|
}
|
|
|
|
# Enable search mode for OSD display
|
|
self.searching_interesting_point = True
|
|
self.search_progress_text = f"Starting search from frame {current_frame_backup + 1} (threshold: {self.frame_difference_threshold:.1f}%, gap: {self.frame_difference_gap} frames) - Press ; to cancel"
|
|
self.search_progress_percent = 0
|
|
self.display_needs_update = True
|
|
|
|
# Read the first frame to start comparisons
|
|
self.cap.cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame_backup)
|
|
ret, base_frame = self.cap.cap.read()
|
|
if not ret:
|
|
self.search_state['search_cancelled'] = True
|
|
print("Could not read base frame")
|
|
return
|
|
|
|
self.search_state['base_frame'] = base_frame
|
|
self.search_state['base_frame_num'] = current_frame_backup
|
|
|
|
# Let main loop handle the search - don't block here
|
|
return
|
|
|
|
def _get_previous_tracking_point(self):
|
|
"""Get the tracking point from the previous frame that has tracking points (like jump_to_previous_marker)."""
|
|
if self.is_image_mode or not self.tracking_points:
|
|
return None
|
|
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v and 0 <= k < self.total_frames)
|
|
if not tracking_frames:
|
|
return None
|
|
|
|
current = self.current_frame
|
|
candidates = [f for f in tracking_frames if f < current]
|
|
|
|
if candidates:
|
|
# Use the most recent frame before current (like jump_to_previous_marker)
|
|
prev_frame = candidates[-1]
|
|
return prev_frame, self.tracking_points[prev_frame]
|
|
else:
|
|
# If no previous frames, use the first frame with tracking points
|
|
prev_frame = tracking_frames[0]
|
|
return prev_frame, self.tracking_points[prev_frame]
|
|
|
|
def _get_next_tracking_point(self):
|
|
"""Get the tracking point from the next frame that has tracking points."""
|
|
if self.is_image_mode or not self.tracking_points:
|
|
return None
|
|
|
|
tracking_frames = sorted(k for k, v in self.tracking_points.items() if v and 0 <= k < self.total_frames)
|
|
if not tracking_frames:
|
|
return None
|
|
|
|
# Find the first frame with tracking points that's after current frame
|
|
next_frames = [f for f in tracking_frames if f > self.current_frame]
|
|
if not next_frames:
|
|
return None
|
|
|
|
next_frame = min(next_frames)
|
|
return next_frame, self.tracking_points[next_frame]
|
|
|
|
def _point_to_line_distance_and_foot(self, px, py, x1, y1, x2, y2):
|
|
"""Calculate distance from point (px, py) to infinite line (x1, y1) to (x2, y2) and return foot of perpendicular"""
|
|
# Convert line to general form: Ax + By + C = 0
|
|
# (y2 - y1)(x - x1) - (x2 - x1)(y - y1) = 0
|
|
A = y2 - y1
|
|
B = -(x2 - x1) # Note the negative sign
|
|
C = -(A * x1 + B * y1)
|
|
|
|
# Calculate distance: d = |Ax + By + C| / sqrt(A^2 + B^2)
|
|
denominator = (A * A + B * B) ** 0.5
|
|
if denominator == 0:
|
|
# Line is actually a point
|
|
distance = ((px - x1) ** 2 + (py - y1) ** 2) ** 0.5
|
|
return distance, (x1, y1)
|
|
|
|
distance = abs(A * px + B * py + C) / denominator
|
|
|
|
# Calculate foot of perpendicular: (xf, yf)
|
|
# xf = xu - A(Axu + Byu + C)/(A^2 + B^2)
|
|
# yf = yu - B(Axu + Byu + C)/(A^2 + B^2)
|
|
numerator = A * px + B * py + C
|
|
xf = px - A * numerator / (A * A + B * B)
|
|
yf = py - B * numerator / (A * A + B * B)
|
|
|
|
return distance, (xf, yf)
|
|
|
|
def advance_frame(self) -> bool:
|
|
"""Advance to next frame - handles playback speed and marker looping"""
|
|
if not self.is_playing:
|
|
return True
|
|
|
|
# Always advance by 1 frame - speed is controlled by delay timing
|
|
new_frame = self.current_frame + 1
|
|
|
|
# Handle marker looping bounds
|
|
if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None:
|
|
if new_frame >= self.cut_end_frame:
|
|
# Loop back to start marker
|
|
new_frame = self.cut_start_frame
|
|
elif new_frame >= self.total_frames:
|
|
# Loop to beginning
|
|
new_frame = 0
|
|
|
|
# Update current frame and load it
|
|
self.current_frame = new_frame
|
|
return self.load_current_frame()
|
|
|
|
def apply_crop_zoom_and_rotation(self, frame):
|
|
"""Apply current crop, zoom, rotation, and brightness/contrast settings to frame"""
|
|
if frame is None:
|
|
return None
|
|
|
|
# Create a hash of the transformation parameters for caching
|
|
transform_hash = hash((
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
tuple(self.display_offset)
|
|
))
|
|
|
|
# Check if we can use cached transformation during auto-repeat seeking
|
|
if (self.auto_repeat_active and
|
|
self.cached_transformed_frame is not None and
|
|
self.cached_frame_number == self.current_frame and
|
|
self.cached_transform_hash == transform_hash):
|
|
return self.cached_transformed_frame.copy()
|
|
|
|
# Work in-place when possible to avoid unnecessary copying
|
|
processed_frame = frame
|
|
|
|
# Apply brightness/contrast first (to original frame for best quality)
|
|
processed_frame = self.apply_brightness_contrast(processed_frame)
|
|
|
|
# Apply rotation first so crop_rect is in ROTATED frame coordinates
|
|
if self.rotation_angle != 0:
|
|
processed_frame = self.apply_rotation(processed_frame)
|
|
|
|
# Apply crop (interpreted in rotated frame coordinates) using EFFECTIVE rect
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
if eff_w > 0 and eff_h > 0:
|
|
eff_x = max(0, min(eff_x, processed_frame.shape[1] - 1))
|
|
eff_y = max(0, min(eff_y, processed_frame.shape[0] - 1))
|
|
eff_w = min(eff_w, processed_frame.shape[1] - eff_x)
|
|
eff_h = min(eff_h, processed_frame.shape[0] - eff_y)
|
|
processed_frame = processed_frame[eff_y : eff_y + eff_h, eff_x : eff_x + eff_w]
|
|
|
|
# Apply zoom
|
|
if self.zoom_factor != 1.0:
|
|
height, width = processed_frame.shape[:2]
|
|
new_width = int(width * self.zoom_factor)
|
|
new_height = int(height * self.zoom_factor)
|
|
processed_frame = cv2.resize(
|
|
processed_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
# Handle zoom center and display offset
|
|
if new_width > self.window_width or new_height > self.window_height:
|
|
# Calculate crop from zoomed image to fit window
|
|
start_x = max(0, self.display_offset[0])
|
|
start_y = max(0, self.display_offset[1])
|
|
end_x = min(new_width, start_x + self.window_width)
|
|
end_y = min(new_height, start_y + self.window_height)
|
|
processed_frame = processed_frame[start_y:end_y, start_x:end_x]
|
|
|
|
# Cache the result for auto-repeat seeking performance
|
|
if self.auto_repeat_active:
|
|
self.cached_transformed_frame = processed_frame.copy()
|
|
self.cached_frame_number = self.current_frame
|
|
self.cached_transform_hash = transform_hash
|
|
|
|
return processed_frame
|
|
|
|
def calculate_frame_difference(self, frame1, frame2) -> float:
|
|
"""Calculate percentage difference between two frames, optionally within a region"""
|
|
if frame1 is None or frame2 is None:
|
|
return 0.0
|
|
|
|
try:
|
|
# Ensure frames are the same size
|
|
if frame1.shape != frame2.shape:
|
|
# Resize frame2 to match frame1
|
|
frame2 = cv2.resize(frame2, (frame1.shape[1], frame1.shape[0]))
|
|
|
|
# Apply region selection if set
|
|
if self.interesting_region is not None:
|
|
x, y, w, h = self.interesting_region
|
|
|
|
# Ensure region is within frame bounds
|
|
x = max(0, min(x, frame1.shape[1] - 1))
|
|
y = max(0, min(y, frame1.shape[0] - 1))
|
|
w = min(w, frame1.shape[1] - x)
|
|
h = min(h, frame1.shape[0] - y)
|
|
|
|
if w <= 0 or h <= 0:
|
|
return 0.0
|
|
|
|
frame1_region = frame1[y:y+h, x:x+w]
|
|
frame2_region = frame2[y:y+h, x:x+w]
|
|
else:
|
|
# Use full frames
|
|
frame1_region = frame1
|
|
frame2_region = frame2
|
|
|
|
# Convert to grayscale for difference calculation
|
|
if len(frame1_region.shape) == 3:
|
|
gray1 = cv2.cvtColor(frame1_region, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray1 = frame1_region
|
|
|
|
if len(frame2_region.shape) == 3:
|
|
gray2 = cv2.cvtColor(frame2_region, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray2 = frame2_region
|
|
|
|
# Calculate absolute difference
|
|
diff = cv2.absdiff(gray1, gray2)
|
|
|
|
# Calculate percentage of pixels that changed significantly
|
|
# Use threshold to ignore minor noise
|
|
_, thresh_diff = cv2.threshold(diff, self.FRAME_DIFFERENCE_PIXEL_THRESHOLD, 255, cv2.THRESH_BINARY)
|
|
|
|
# Count changed pixels
|
|
changed_pixels = cv2.countNonZero(thresh_diff)
|
|
total_pixels = gray1.size
|
|
|
|
if total_pixels == 0:
|
|
return 0.0
|
|
|
|
# Calculate percentage
|
|
difference_percentage = (changed_pixels / total_pixels) * 100.0
|
|
|
|
return difference_percentage
|
|
|
|
except Exception as e:
|
|
print(f"Error calculating frame difference: {e}")
|
|
return 0.0
|
|
|
|
# --- Motion tracking helpers ---
|
|
def _get_effective_crop_rect_for_frame(self, frame_number):
|
|
"""Return EFFECTIVE crop_rect in ROTATED frame coords for this frame (applies tracking follow)."""
|
|
# Rotated base dims
|
|
if self.rotation_angle in (90, 270):
|
|
rot_w, rot_h = self.frame_height, self.frame_width
|
|
else:
|
|
rot_w, rot_h = self.frame_width, self.frame_height
|
|
# Default full-frame
|
|
if not self.crop_rect:
|
|
return (0, 0, rot_w, rot_h)
|
|
x, y, w, h = map(int, self.crop_rect)
|
|
# Tracking follow: center crop on interpolated rotated position
|
|
if self.tracking_enabled:
|
|
pos = self._get_interpolated_tracking_position(frame_number)
|
|
if pos:
|
|
cx, cy = pos
|
|
x = int(round(cx - w / 2))
|
|
y = int(round(cy - h / 2))
|
|
# Clamp in rotated space
|
|
x = max(0, min(x, rot_w - 1))
|
|
y = max(0, min(y, rot_h - 1))
|
|
w = min(w, rot_w - x)
|
|
h = min(h, rot_h - y)
|
|
return (x, y, w, h)
|
|
|
|
|
|
def toggle_interesting_region_selection(self):
|
|
"""Toggle region selection mode for interesting point detection"""
|
|
# If a region is already defined and we're not currently selecting, clear the region
|
|
if self.interesting_region is not None and not self.selecting_interesting_region:
|
|
self.interesting_region = None
|
|
print("Interesting point region cleared")
|
|
self.show_feedback_message("Region cleared")
|
|
self.display_needs_update = True
|
|
return
|
|
|
|
if self.selecting_interesting_region:
|
|
# Finish region selection
|
|
self.selecting_interesting_region = False
|
|
if (self.region_selection_start is not None and
|
|
self.region_selection_current is not None):
|
|
|
|
# Calculate region rectangle
|
|
x1, y1 = self.region_selection_start
|
|
x2, y2 = self.region_selection_current
|
|
|
|
x = min(x1, x2)
|
|
y = min(y1, y2)
|
|
w = abs(x2 - x1)
|
|
h = abs(y2 - y1)
|
|
|
|
if w > 5 and h > 5: # Minimum size threshold
|
|
# Get raw frame dimensions for direct coordinate mapping
|
|
frame_height, frame_width = self.current_display_frame.shape[:2]
|
|
|
|
# Calculate display scaling (how much the frame is scaled to fit on screen)
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
|
|
# Let's use a simpler approach - just proportionally map screen coords to frame coords
|
|
# This assumes the frame is centered and scaled to fit
|
|
display_scale = min(self.window_width / frame_width, available_height / frame_height)
|
|
if display_scale > 1:
|
|
display_scale = 1 # Frame is smaller than window, no scaling
|
|
|
|
# Calculate displayed dimensions
|
|
display_width = int(frame_width * display_scale)
|
|
display_height = int(frame_height * display_scale)
|
|
|
|
# Calculate offset (frame is centered on screen)
|
|
offset_x = (self.window_width - display_width) // 2
|
|
offset_y = (available_height - display_height) // 2
|
|
|
|
# Map screen coordinates to frame coordinates
|
|
# Adjust for the offset and scale
|
|
frame_x = int((x - offset_x) / display_scale)
|
|
frame_y = int((y - offset_y) / display_scale)
|
|
frame_x2 = int((x + w - offset_x) / display_scale)
|
|
frame_y2 = int((y + h - offset_y) / display_scale)
|
|
|
|
frame_w = frame_x2 - frame_x
|
|
frame_h = frame_y2 - frame_y
|
|
|
|
# Ensure coordinates are within frame bounds
|
|
frame_x = max(0, min(frame_x, frame_width - 1))
|
|
frame_y = max(0, min(frame_y, frame_height - 1))
|
|
frame_x2 = max(0, min(frame_x2, frame_width - 1))
|
|
frame_y2 = max(0, min(frame_y2, frame_height - 1))
|
|
frame_w = max(1, frame_x2 - frame_x)
|
|
frame_h = max(1, frame_y2 - frame_y)
|
|
|
|
self.interesting_region = (frame_x, frame_y, frame_w, frame_h)
|
|
print(f"Interesting point region set: ({frame_x}, {frame_y}, {frame_w}, {frame_h})")
|
|
self.show_feedback_message(f"Region set: {frame_w}x{frame_h}")
|
|
else:
|
|
# Region too small, clear it
|
|
self.interesting_region = None
|
|
print("Region too small, cleared")
|
|
self.show_feedback_message("Region cleared")
|
|
|
|
# Reset selection state
|
|
self.region_selection_start = None
|
|
self.region_selection_current = None
|
|
self.display_needs_update = True
|
|
|
|
else:
|
|
# Start region selection
|
|
self.selecting_interesting_region = True
|
|
self.region_selection_start = None
|
|
self.region_selection_current = None
|
|
print("Select region for interesting point detection (click and drag)")
|
|
self.show_feedback_message("Select region (click and drag)")
|
|
|
|
def _get_interpolated_tracking_position(self, frame_number):
|
|
"""Linear interpolation in ROTATED frame coords. Returns (rx, ry) or None."""
|
|
# Get base position from manual tracking points
|
|
base_pos = self._get_manual_tracking_position(frame_number)
|
|
|
|
# Calculate offset from template matching if enabled
|
|
template_offset = None
|
|
if self.templates:
|
|
if self.current_display_frame is not None:
|
|
if self.template_matching_full_frame:
|
|
# Full frame mode - use the entire original frame
|
|
result = self.track_template(self.current_display_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
# print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
|
|
template_offset = (center_x, center_y)
|
|
else:
|
|
# Cropped mode - use only the cropped region for faster template matching
|
|
if self.crop_rect:
|
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
|
# Extract only the cropped region from raw frame
|
|
cropped_frame = self.current_display_frame[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
|
|
if cropped_frame is not None and cropped_frame.size > 0:
|
|
# Apply motion tracking offset to the cropped frame
|
|
offset_frame = self._apply_motion_tracking_offset(cropped_frame, base_pos)
|
|
if offset_frame is not None:
|
|
# Track template in cropped and offset frame (much faster!)
|
|
result = self.track_template(offset_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
|
|
|
|
# Map from cropped frame coordinates to raw frame coordinates
|
|
# Add crop offset back
|
|
raw_x = center_x + crop_x
|
|
raw_y = center_y + crop_y
|
|
template_offset = (raw_x, raw_y)
|
|
else:
|
|
# No crop - use full frame with offset
|
|
offset_frame = self._apply_motion_tracking_offset(self.current_display_frame, base_pos)
|
|
if offset_frame is not None:
|
|
result = self.track_template(offset_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
template_offset = (center_x, center_y)
|
|
|
|
# Calculate offset from feature tracking if enabled
|
|
feature_offset = None
|
|
if self.feature_tracker.tracking_enabled:
|
|
# Get the nearest frames with features for smooth interpolation
|
|
feature_frames = sorted(self.feature_tracker.features.keys())
|
|
if feature_frames:
|
|
# Find the two nearest frames for interpolation
|
|
if frame_number <= feature_frames[0]:
|
|
# Before first feature frame - use first frame
|
|
feature_offset = self._get_feature_center(feature_frames[0])
|
|
elif frame_number >= feature_frames[-1]:
|
|
# After last feature frame - use last frame
|
|
feature_offset = self._get_feature_center(feature_frames[-1])
|
|
else:
|
|
# Between two feature frames - interpolate smoothly
|
|
for i in range(len(feature_frames) - 1):
|
|
if feature_frames[i] <= frame_number <= feature_frames[i + 1]:
|
|
feature_offset = self._interpolate_feature_positions(
|
|
feature_frames[i], feature_frames[i + 1], frame_number
|
|
)
|
|
break
|
|
|
|
# Combine tracking methods: average of all available positions
|
|
positions = []
|
|
|
|
# Add manual tracking position
|
|
if base_pos:
|
|
positions.append(base_pos)
|
|
# print(f"DEBUG: Manual tracking: ({base_pos[0]:.1f}, {base_pos[1]:.1f})")
|
|
|
|
# Add template matching position
|
|
if template_offset:
|
|
positions.append(template_offset)
|
|
# print(f"DEBUG: Template matching: ({template_offset[0]:.1f}, {template_offset[1]:.1f})")
|
|
|
|
# Add feature tracking position
|
|
if feature_offset:
|
|
positions.append(feature_offset)
|
|
print(f"DEBUG: Feature tracking: ({feature_offset[0]:.1f}, {feature_offset[1]:.1f})")
|
|
|
|
# Calculate average of all available positions
|
|
if positions:
|
|
avg_x = sum(pos[0] for pos in positions) / len(positions)
|
|
avg_y = sum(pos[1] for pos in positions) / len(positions)
|
|
# print(f"DEBUG: Average of {len(positions)} positions: ({avg_x:.1f}, {avg_y:.1f})")
|
|
return (avg_x, avg_y)
|
|
|
|
# Fall back to individual tracking methods if no base position
|
|
if template_offset:
|
|
return template_offset
|
|
elif feature_offset:
|
|
return feature_offset
|
|
else:
|
|
return None
|
|
|
|
def _get_manual_tracking_position(self, frame_number):
|
|
"""Get manual tracking position for a frame"""
|
|
if not self.tracking_points:
|
|
return None
|
|
frames = sorted(self.tracking_points.keys())
|
|
if not frames:
|
|
return None
|
|
if frame_number in self.tracking_points and self.tracking_points[frame_number]:
|
|
pts = self.tracking_points[frame_number]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts))
|
|
if frame_number < frames[0]:
|
|
pts = self.tracking_points[frames[0]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
if frame_number > frames[-1]:
|
|
pts = self.tracking_points[frames[-1]]
|
|
return (sum(p[0] for p in pts) / len(pts), sum(p[1] for p in pts) / len(pts)) if pts else None
|
|
for i in range(len(frames) - 1):
|
|
f1, f2 = frames[i], frames[i + 1]
|
|
if f1 <= frame_number <= f2:
|
|
pts1 = self.tracking_points.get(f1) or []
|
|
pts2 = self.tracking_points.get(f2) or []
|
|
if not pts1 or not pts2:
|
|
continue
|
|
x1 = sum(p[0] for p in pts1) / len(pts1)
|
|
y1 = sum(p[1] for p in pts1) / len(pts1)
|
|
x2 = sum(p[0] for p in pts2) / len(pts2)
|
|
y2 = sum(p[1] for p in pts2) / len(pts2)
|
|
t = (frame_number - f1) / (f2 - f1) if f2 != f1 else 0.0
|
|
return (x1 + t * (x2 - x1), y1 + t * (y2 - y1))
|
|
return None
|
|
|
|
def _apply_motion_tracking_offset(self, frame, base_pos):
|
|
"""Apply motion tracking offset to frame for template matching"""
|
|
if base_pos is None:
|
|
return frame
|
|
|
|
try:
|
|
# Get the motion tracking offset
|
|
offset_x, offset_y = base_pos
|
|
|
|
# Create offset frame by shifting the content
|
|
h, w = frame.shape[:2]
|
|
offset_frame = np.zeros_like(frame)
|
|
|
|
# Calculate the shift
|
|
shift_x = int(offset_x)
|
|
shift_y = int(offset_y)
|
|
|
|
# Apply the offset
|
|
if shift_x != 0 or shift_y != 0:
|
|
# Calculate source and destination regions
|
|
src_x1 = max(0, -shift_x)
|
|
src_y1 = max(0, -shift_y)
|
|
src_x2 = min(w, w - shift_x)
|
|
src_y2 = min(h, h - shift_y)
|
|
|
|
dst_x1 = max(0, shift_x)
|
|
dst_y1 = max(0, shift_y)
|
|
dst_x2 = min(w, w + shift_x)
|
|
dst_y2 = min(h, h + shift_y)
|
|
|
|
if src_x2 > src_x1 and src_y2 > src_y1 and dst_x2 > dst_x1 and dst_y2 > dst_y1:
|
|
offset_frame[dst_y1:dst_y2, dst_x1:dst_x2] = frame[src_y1:src_y2, src_x1:src_x2]
|
|
else:
|
|
offset_frame = frame.copy()
|
|
else:
|
|
offset_frame = frame.copy()
|
|
|
|
return offset_frame
|
|
|
|
except Exception as e:
|
|
print(f"Error applying motion tracking offset: {e}")
|
|
return frame
|
|
|
|
def _get_template_matching_position(self, frame_number):
|
|
"""Get template matching position and confidence for a frame"""
|
|
if not self.templates:
|
|
return None
|
|
|
|
if self.current_display_frame is not None:
|
|
# Get base position for motion tracking offset
|
|
base_pos = self._get_manual_tracking_position(frame_number)
|
|
|
|
if self.template_matching_full_frame:
|
|
# Full frame mode - use the entire original frame
|
|
result = self.track_template(self.current_display_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
return (center_x, center_y, confidence)
|
|
else:
|
|
# Cropped mode - use only the cropped region for faster template matching
|
|
if self.crop_rect:
|
|
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
|
# Extract only the cropped region from raw frame
|
|
cropped_frame = self.current_display_frame[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
|
|
if cropped_frame is not None and cropped_frame.size > 0:
|
|
# Apply motion tracking offset to the cropped frame
|
|
offset_frame = self._apply_motion_tracking_offset(cropped_frame, base_pos)
|
|
if offset_frame is not None:
|
|
# Track template in cropped and offset frame (much faster!)
|
|
result = self.track_template(offset_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
# Map from cropped frame coordinates to raw frame coordinates
|
|
# Add crop offset back
|
|
raw_x = center_x + crop_x
|
|
raw_y = center_y + crop_y
|
|
return (raw_x, raw_y, confidence)
|
|
else:
|
|
# No crop - use full frame with offset
|
|
offset_frame = self._apply_motion_tracking_offset(self.current_display_frame, base_pos)
|
|
if offset_frame is not None:
|
|
result = self.track_template(offset_frame)
|
|
if result:
|
|
center_x, center_y, confidence = result
|
|
return (center_x, center_y, confidence)
|
|
|
|
return None
|
|
|
|
def _get_display_params(self):
|
|
"""Unified display transform parameters for current frame in rotated space."""
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
new_w = int(eff_w * self.zoom_factor)
|
|
new_h = int(eff_h * self.zoom_factor)
|
|
cropped_due_to_zoom = (self.zoom_factor != 1.0) and (new_w > self.window_width or new_h > self.window_height)
|
|
if cropped_due_to_zoom:
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
visible_w = min(new_w, self.window_width)
|
|
visible_h = min(new_h, self.window_height)
|
|
else:
|
|
offx = 0
|
|
offy = 0
|
|
visible_w = new_w
|
|
visible_h = new_h
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale_raw = min(self.window_width / max(1, visible_w), available_height / max(1, visible_h))
|
|
scale = scale_raw if scale_raw < 1.0 else 1.0
|
|
final_w = int(visible_w * scale)
|
|
final_h = int(visible_h * scale)
|
|
start_x = (self.window_width - final_w) // 2
|
|
start_y = (available_height - final_h) // 2
|
|
return {
|
|
'eff_x': eff_x, 'eff_y': eff_y, 'eff_w': eff_w, 'eff_h': eff_h,
|
|
'offx': offx, 'offy': offy,
|
|
'scale': scale,
|
|
'start_x': start_x, 'start_y': start_y,
|
|
'visible_w': visible_w, 'visible_h': visible_h,
|
|
'available_h': available_height
|
|
}
|
|
|
|
def _map_rotated_to_screen(self, rx, ry):
|
|
"""Map a point in ROTATED frame coords to canvas screen coords (post-crop)."""
|
|
# Subtract crop offset in rotated space (EFFECTIVE crop at current frame)
|
|
cx, cy, cw, ch = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
rx2 = rx - cx
|
|
ry2 = ry - cy
|
|
# Zoomed dimensions of cropped-rotated frame
|
|
new_w = int(cw * self.zoom_factor)
|
|
new_h = int(ch * self.zoom_factor)
|
|
cropped_due_to_zoom = (self.zoom_factor != 1.0) and (new_w > self.window_width or new_h > self.window_height)
|
|
if cropped_due_to_zoom:
|
|
offx_max = max(0, new_w - self.window_width)
|
|
offy_max = max(0, new_h - self.window_height)
|
|
offx = max(0, min(int(self.display_offset[0]), offx_max))
|
|
offy = max(0, min(int(self.display_offset[1]), offy_max))
|
|
else:
|
|
offx = 0
|
|
offy = 0
|
|
zx = rx2 * self.zoom_factor - offx
|
|
zy = ry2 * self.zoom_factor - offy
|
|
visible_w = new_w if not cropped_due_to_zoom else min(new_w, self.window_width)
|
|
visible_h = new_h if not cropped_due_to_zoom else min(new_h, self.window_height)
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
scale_raw = min(self.window_width / max(1, visible_w), available_height / max(1, visible_h))
|
|
scale_canvas = scale_raw if scale_raw < 1.0 else 1.0
|
|
final_w = int(visible_w * scale_canvas)
|
|
final_h = int(visible_h * scale_canvas)
|
|
start_x_canvas = (self.window_width - final_w) // 2
|
|
start_y_canvas = (available_height - final_h) // 2
|
|
sx = int(round(start_x_canvas + zx * scale_canvas))
|
|
sy = int(round(start_y_canvas + zy * scale_canvas))
|
|
return sx, sy
|
|
|
|
def _map_screen_to_rotated(self, sx, sy):
|
|
"""Map a point on canvas screen coords back to ROTATED frame coords (pre-crop)."""
|
|
# Use unified display params
|
|
params = self._get_display_params()
|
|
# Back to processed (zoomed+cropped) space
|
|
zx = (sx - params['start_x']) / max(1e-6, params['scale'])
|
|
zy = (sy - params['start_y']) / max(1e-6, params['scale'])
|
|
zx += params['offx']
|
|
zy += params['offy']
|
|
# Reverse zoom
|
|
rx = zx / max(1e-6, self.zoom_factor)
|
|
ry = zy / max(1e-6, self.zoom_factor)
|
|
# Unapply current EFFECTIVE crop to get PRE-crop rotated coords
|
|
rx = rx + params['eff_x']
|
|
ry = ry + params['eff_y']
|
|
return int(round(rx)), int(round(ry))
|
|
|
|
def clear_transformation_cache(self):
|
|
"""Clear the cached transformation to force recalculation"""
|
|
self.cached_transformed_frame = None
|
|
self.cached_frame_number = None
|
|
self.cached_transform_hash = None
|
|
|
|
def _extract_features_from_region(self, screen_rect):
|
|
"""Extract features from a specific screen region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Extracting features from region ({x}, {y}, {w}, {h})")
|
|
|
|
# Map screen coordinates to rotated frame coordinates
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x + w, y + h)
|
|
|
|
# Get the region in rotated frame coordinates
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
|
|
# Extract features from this region of the original frame
|
|
if self.rotation_angle in (90, 270):
|
|
# For rotated frames, we need to map back to original frame coordinates
|
|
if self.rotation_angle == 90:
|
|
orig_x = top_r
|
|
orig_y = self.frame_height - right_r
|
|
orig_w = bottom_r - top_r
|
|
orig_h = right_r - left_r
|
|
else: # 270
|
|
orig_x = self.frame_width - bottom_r
|
|
orig_y = left_r
|
|
orig_w = bottom_r - top_r
|
|
orig_h = right_r - left_r
|
|
else:
|
|
orig_x, orig_y = left_r, top_r
|
|
orig_w, orig_h = right_r - left_r, bottom_r - top_r
|
|
|
|
# Extract features from this region
|
|
if (orig_x >= 0 and orig_y >= 0 and
|
|
orig_x + orig_w <= self.frame_width and
|
|
orig_y + orig_h <= self.frame_height):
|
|
|
|
if self.current_display_frame is not None:
|
|
region_frame = self.current_display_frame[orig_y:orig_y+orig_h, orig_x:orig_x+orig_w]
|
|
if region_frame is not None and region_frame.size > 0:
|
|
# Map coordinates from region to rotated frame coordinates
|
|
def coord_mapper(px, py):
|
|
# Map from region coordinates to rotated frame coordinates
|
|
if self.rotation_angle == 90:
|
|
rot_x = orig_x + py
|
|
rot_y = self.frame_height - (orig_y + px)
|
|
elif self.rotation_angle == 270:
|
|
rot_x = self.frame_width - (orig_y + py)
|
|
rot_y = orig_x + px
|
|
else:
|
|
rot_x = orig_x + px
|
|
rot_y = orig_y + py
|
|
return (int(rot_x), int(rot_y))
|
|
|
|
# Extract features and add them to existing features
|
|
success = self.feature_tracker.extract_features_from_region(region_frame, self.current_frame, coord_mapper)
|
|
if success:
|
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
self.show_feedback_message(f"Added features from selected region (total: {count})")
|
|
else:
|
|
self.show_feedback_message("Failed to extract features from region")
|
|
else:
|
|
self.show_feedback_message("Region too small")
|
|
else:
|
|
self.show_feedback_message("Region outside frame bounds")
|
|
|
|
def _delete_features_from_region(self, screen_rect):
|
|
"""Delete features from a specific screen region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Deleting features from region ({x}, {y}, {w}, {h})")
|
|
|
|
if self.current_frame not in self.feature_tracker.features:
|
|
self.show_feedback_message("No features to delete")
|
|
return
|
|
|
|
# Map screen coordinates to rotated frame coordinates
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x + w, y + h)
|
|
|
|
# Get the region in rotated frame coordinates
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
|
|
# Remove features within this region
|
|
features = self.feature_tracker.features[self.current_frame]
|
|
original_count = len(features['positions'])
|
|
|
|
# Filter out features within the region
|
|
filtered_positions = []
|
|
for fx, fy in features['positions']:
|
|
if not (left_r <= fx <= right_r and top_r <= fy <= bottom_r):
|
|
filtered_positions.append((fx, fy))
|
|
|
|
# Update the features
|
|
features['positions'] = filtered_positions
|
|
removed_count = original_count - len(filtered_positions)
|
|
|
|
if removed_count > 0:
|
|
self.show_feedback_message(f"Removed {removed_count} features from selected region")
|
|
self.save_state()
|
|
else:
|
|
self.show_feedback_message("No features found in selected region")
|
|
|
|
def _track_with_optical_flow(self):
|
|
"""Track features using optical flow from previous frame"""
|
|
try:
|
|
# Get previous frame features
|
|
prev_frame_number = self.current_frame - 1
|
|
if prev_frame_number not in self.feature_tracker.features:
|
|
print(f"DEBUG: No features on previous frame {prev_frame_number} for optical flow")
|
|
return
|
|
|
|
prev_features = self.feature_tracker.features[prev_frame_number]
|
|
prev_positions = np.array(prev_features['positions'], dtype=np.float32).reshape(-1, 1, 2)
|
|
|
|
if len(prev_positions) == 0:
|
|
print(f"DEBUG: No positions on previous frame {prev_frame_number} for optical flow")
|
|
return
|
|
|
|
print(f"DEBUG: Optical flow tracking from frame {prev_frame_number} to {self.current_frame}")
|
|
|
|
# Apply transformations to get the display frames
|
|
prev_display_frame = self.apply_crop_zoom_and_rotation(self.previous_frame_for_flow)
|
|
curr_display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
|
|
if prev_display_frame is None or curr_display_frame is None:
|
|
print("DEBUG: Could not get display frames for optical flow")
|
|
return
|
|
|
|
# Map previous positions to display frame coordinates
|
|
display_prev_positions = []
|
|
for px, py in prev_positions.reshape(-1, 2):
|
|
# Map from rotated frame coordinates to screen coordinates
|
|
sx, sy = self._map_rotated_to_screen(px, py)
|
|
|
|
# Map from screen coordinates to display frame coordinates
|
|
frame_height, frame_width = prev_display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
display_x = sx - start_x
|
|
display_y = sy - start_y
|
|
|
|
if 0 <= display_x < frame_width and 0 <= display_y < frame_height:
|
|
display_prev_positions.append([display_x, display_y])
|
|
|
|
if len(display_prev_positions) == 0:
|
|
print("DEBUG: No valid display positions for optical flow")
|
|
return
|
|
|
|
display_prev_positions = np.array(display_prev_positions, dtype=np.float32).reshape(-1, 1, 2)
|
|
print(f"DEBUG: Tracking {len(display_prev_positions)} points with optical flow")
|
|
|
|
# Track using optical flow
|
|
new_points, good_old, status = self.feature_tracker.track_features_optical_flow(
|
|
prev_display_frame, curr_display_frame, display_prev_positions
|
|
)
|
|
|
|
if new_points is not None and len(new_points) > 0:
|
|
print(f"DEBUG: Optical flow found {len(new_points)} tracked points")
|
|
|
|
# Map new positions back to rotated frame coordinates
|
|
mapped_positions = []
|
|
for point in new_points.reshape(-1, 2):
|
|
# Map from display frame coordinates to screen coordinates
|
|
frame_height, frame_width = curr_display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
screen_x = point[0] + start_x
|
|
screen_y = point[1] + start_y
|
|
|
|
# Map from screen coordinates to rotated frame coordinates
|
|
rx, ry = self._map_screen_to_rotated(screen_x, screen_y)
|
|
mapped_positions.append((int(rx), int(ry)))
|
|
|
|
# Store tracked features
|
|
self.feature_tracker.features[self.current_frame] = {
|
|
'keypoints': [], # Optical flow doesn't use keypoints
|
|
'descriptors': np.array([]), # Optical flow doesn't use descriptors
|
|
'positions': mapped_positions
|
|
}
|
|
|
|
print(f"Optical flow tracked {len(mapped_positions)} features to frame {self.current_frame}")
|
|
else:
|
|
print("DEBUG: Optical flow failed to track any points")
|
|
|
|
except Exception as e:
|
|
print(f"Error in optical flow tracking: {e}")
|
|
|
|
|
|
def _interpolate_features_between_frames(self, start_frame, end_frame):
|
|
"""Interpolate features between two frames using linear interpolation"""
|
|
try:
|
|
print(f"DEBUG: Starting interpolation between frame {start_frame} and {end_frame}")
|
|
|
|
if start_frame not in self.feature_tracker.features or end_frame not in self.feature_tracker.features:
|
|
print(f"DEBUG: Missing features on start_frame={start_frame} or end_frame={end_frame}")
|
|
return
|
|
|
|
start_features = self.feature_tracker.features[start_frame]['positions']
|
|
end_features = self.feature_tracker.features[end_frame]['positions']
|
|
|
|
print(f"DEBUG: Start frame {start_frame} has {len(start_features)} features")
|
|
print(f"DEBUG: End frame {end_frame} has {len(end_features)} features")
|
|
|
|
if len(start_features) != len(end_features):
|
|
print(f"DEBUG: Feature count mismatch between frames {start_frame} and {end_frame} ({len(start_features)} vs {len(end_features)})")
|
|
print(f"DEBUG: Using minimum count for interpolation")
|
|
# Use the minimum count to avoid index errors
|
|
min_count = min(len(start_features), len(end_features))
|
|
start_features = start_features[:min_count]
|
|
end_features = end_features[:min_count]
|
|
|
|
# Interpolate for all frames between start and end
|
|
frames_to_interpolate = []
|
|
for frame_num in range(start_frame + 1, end_frame):
|
|
if frame_num in self.feature_tracker.features:
|
|
print(f"DEBUG: Frame {frame_num} already has features, skipping")
|
|
continue # Skip if already has features
|
|
frames_to_interpolate.append(frame_num)
|
|
|
|
print(f"DEBUG: Will interpolate {len(frames_to_interpolate)} frames: {frames_to_interpolate}")
|
|
|
|
for frame_num in frames_to_interpolate:
|
|
# Linear interpolation
|
|
alpha = (frame_num - start_frame) / (end_frame - start_frame)
|
|
interpolated_positions = []
|
|
|
|
for i in range(len(start_features)):
|
|
start_x, start_y = start_features[i]
|
|
end_x, end_y = end_features[i]
|
|
|
|
interp_x = start_x + alpha * (end_x - start_x)
|
|
interp_y = start_y + alpha * (end_y - start_y)
|
|
|
|
interpolated_positions.append((int(interp_x), int(interp_y)))
|
|
|
|
# Store interpolated features
|
|
self.feature_tracker.features[frame_num] = {
|
|
'keypoints': [],
|
|
'descriptors': np.array([]),
|
|
'positions': interpolated_positions
|
|
}
|
|
|
|
print(f"DEBUG: Interpolated {len(interpolated_positions)} features for frame {frame_num}")
|
|
|
|
print(f"DEBUG: Finished interpolation between frame {start_frame} and {end_frame}")
|
|
|
|
except Exception as e:
|
|
print(f"Error interpolating features: {e}")
|
|
|
|
def _fill_all_gaps_with_interpolation(self):
|
|
"""Fill all gaps between existing features with linear interpolation"""
|
|
try:
|
|
print("=== FILLING ALL GAPS WITH INTERPOLATION ===")
|
|
print(f"DEBUG: Total features stored: {len(self.feature_tracker.features)}")
|
|
|
|
if not self.feature_tracker.features:
|
|
print("DEBUG: No features to interpolate between")
|
|
return
|
|
|
|
# Get all frames with features, sorted
|
|
frames_with_features = sorted(self.feature_tracker.features.keys())
|
|
print(f"DEBUG: Frames with features: {frames_with_features}")
|
|
|
|
if len(frames_with_features) < 2:
|
|
print("DEBUG: Need at least 2 frames with features to interpolate")
|
|
return
|
|
|
|
# Fill gaps between each pair of consecutive frames with features
|
|
for i in range(len(frames_with_features) - 1):
|
|
start_frame = frames_with_features[i]
|
|
end_frame = frames_with_features[i + 1]
|
|
|
|
print(f"DEBUG: Interpolating between frame {start_frame} and {end_frame}")
|
|
self._interpolate_features_between_frames(start_frame, end_frame)
|
|
|
|
print(f"DEBUG: After interpolation, total features stored: {len(self.feature_tracker.features)}")
|
|
print("=== FINISHED FILLING ALL GAPS ===")
|
|
|
|
except Exception as e:
|
|
print(f"Error filling all gaps: {e}")
|
|
|
|
def _get_feature_center(self, frame_number):
|
|
"""Get the center of features for a frame (smooth, not jarring)"""
|
|
if frame_number not in self.feature_tracker.features:
|
|
return None
|
|
|
|
positions = self.feature_tracker.features[frame_number]['positions']
|
|
if not positions:
|
|
return None
|
|
|
|
# Calculate center of mass (smoother than average)
|
|
center_x = sum(pos[0] for pos in positions) / len(positions)
|
|
center_y = sum(pos[1] for pos in positions) / len(positions)
|
|
|
|
return (center_x, center_y)
|
|
|
|
def _interpolate_feature_positions(self, start_frame, end_frame, target_frame):
|
|
"""Smoothly interpolate between feature centers of two frames"""
|
|
start_center = self._get_feature_center(start_frame)
|
|
end_center = self._get_feature_center(end_frame)
|
|
|
|
if not start_center or not end_center:
|
|
return None
|
|
|
|
# Linear interpolation between centers
|
|
alpha = (target_frame - start_frame) / (end_frame - start_frame)
|
|
|
|
interp_x = start_center[0] + alpha * (end_center[0] - start_center[0])
|
|
interp_y = start_center[1] + alpha * (end_center[1] - start_center[1])
|
|
|
|
return (interp_x, interp_y)
|
|
|
|
|
|
def track_template(self, frame):
|
|
"""Track the template in the current frame"""
|
|
if not self.templates:
|
|
return None
|
|
|
|
# Get the template for current frame
|
|
template_index = self.get_template_for_frame(self.current_frame)
|
|
if template_index is None:
|
|
return None
|
|
|
|
start_frame, region, template_image = self.templates[template_index]
|
|
|
|
# Use the stored template image from when it was captured
|
|
tracking_template = template_image
|
|
|
|
try:
|
|
# Apply image preprocessing for better template matching
|
|
gray_frame, gray_template = self._improve_template_matching(frame, tracking_template)
|
|
|
|
# Single-scale template matching (faster)
|
|
result = cv2.matchTemplate(gray_frame, gray_template, cv2.TM_CCOEFF_NORMED)
|
|
_, max_val, _, max_loc = cv2.minMaxLoc(result)
|
|
|
|
if max_val > 0.6: # Higher threshold for single-scale
|
|
template_h, template_w = gray_template.shape
|
|
center_x = max_loc[0] + template_w // 2
|
|
center_y = max_loc[1] + template_h // 2
|
|
best_match = (center_x, center_y, max_val)
|
|
best_confidence = max_val
|
|
else:
|
|
best_match = None
|
|
best_confidence = 0.0
|
|
|
|
# Adaptive thresholding based on recent match history
|
|
if len(self.template_match_history) > 0:
|
|
# Use average of recent matches as baseline
|
|
avg_confidence = sum(self.template_match_history[-self.TEMPLATE_MATCH_AVERAGE_SIZE:]) / len(self.template_match_history[-self.TEMPLATE_MATCH_AVERAGE_SIZE:])
|
|
threshold = max(self.TEMPLATE_MATCH_MIN_THRESHOLD, avg_confidence * self.TEMPLATE_MATCH_AVERAGE_FACTOR)
|
|
else:
|
|
threshold = self.TEMPLATE_MATCH_DEFAULT_THRESHOLD
|
|
|
|
# Only accept matches above adaptive threshold
|
|
if best_confidence > threshold:
|
|
# Store confidence for adaptive thresholding
|
|
self.template_match_history.append(best_confidence)
|
|
if len(self.template_match_history) > self.TEMPLATE_MATCH_HISTORY_SIZE:
|
|
self.template_match_history.pop(0)
|
|
return best_match
|
|
else:
|
|
return None
|
|
|
|
except Exception as e:
|
|
print(f"Error in template tracking: {e}")
|
|
return None
|
|
|
|
|
|
def _improve_template_matching(self, frame, template):
|
|
"""Apply image preprocessing to improve template matching"""
|
|
try:
|
|
# Convert to grayscale if needed
|
|
if len(frame.shape) == 3:
|
|
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray_frame = frame
|
|
|
|
if len(template.shape) == 3:
|
|
gray_template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
gray_template = template
|
|
|
|
# Apply histogram equalization for better contrast
|
|
gray_frame = cv2.equalizeHist(gray_frame)
|
|
gray_template = cv2.equalizeHist(gray_template)
|
|
|
|
# Apply Gaussian blur to reduce noise
|
|
gray_frame = cv2.GaussianBlur(gray_frame, (3, 3), 0)
|
|
gray_template = cv2.GaussianBlur(gray_template, (3, 3), 0)
|
|
|
|
return gray_frame, gray_template
|
|
except Exception as e:
|
|
print(f"Error improving template matching: {e}")
|
|
return frame, template
|
|
|
|
def _set_template_from_region(self, screen_rect):
|
|
"""Set template from selected region"""
|
|
x, y, w, h = screen_rect
|
|
print(f"DEBUG: Setting template from region ({x}, {y}, {w}, {h})")
|
|
|
|
if self.current_display_frame is not None:
|
|
# Map screen coordinates to rotated frame coordinates (raw frame)
|
|
# This is what we need for template matching during rendering
|
|
rot_x, rot_y = self._map_screen_to_rotated(x, y)
|
|
rot_x2, rot_y2 = self._map_screen_to_rotated(x + w, y + h)
|
|
|
|
# Calculate region in rotated frame coordinates
|
|
raw_x = min(rot_x, rot_x2)
|
|
raw_y = min(rot_y, rot_y2)
|
|
raw_w = abs(rot_x2 - rot_x)
|
|
raw_h = abs(rot_y2 - rot_y)
|
|
|
|
print(f"DEBUG: Mapped to raw frame coordinates ({raw_x}, {raw_y}, {raw_w}, {raw_h})")
|
|
|
|
# Ensure region is within raw frame bounds
|
|
if (raw_x >= 0 and raw_y >= 0 and
|
|
raw_x + raw_w <= self.frame_width and
|
|
raw_y + raw_h <= self.frame_height):
|
|
|
|
# Extract template from raw frame
|
|
template = self.current_display_frame[raw_y:raw_y+raw_h, raw_x:raw_x+raw_w]
|
|
if template.size > 0:
|
|
# Add template to collection
|
|
template_id = self.add_template(template, (raw_x, raw_y, raw_w, raw_h))
|
|
self.show_feedback_message(f"Template {template_id} set from region ({raw_w}x{raw_h})")
|
|
print(f"DEBUG: Template {template_id} set with size {template.shape}")
|
|
else:
|
|
self.show_feedback_message("Template region too small")
|
|
else:
|
|
self.show_feedback_message("Template region outside frame bounds")
|
|
|
|
def add_template(self, template, region, start_frame=None):
|
|
"""Add a new template to the collection"""
|
|
if start_frame is None:
|
|
start_frame = self.current_frame
|
|
|
|
# Add template to list with the actual template image
|
|
self.templates.append((start_frame, region, template.copy()))
|
|
|
|
# Sort by start_frame
|
|
self.templates.sort(key=lambda x: x[0])
|
|
|
|
self.show_feedback_message(f"Template added at frame {start_frame}")
|
|
return len(self.templates) - 1
|
|
|
|
def remove_template(self, template_id):
|
|
"""Remove a template from the collection"""
|
|
if not self.templates:
|
|
return False
|
|
|
|
# Use the existing function to find the template to remove
|
|
template_to_remove = self.get_template_for_frame(self.current_frame)
|
|
|
|
if template_to_remove is not None:
|
|
removed_template = self.templates.pop(template_to_remove)
|
|
self.show_feedback_message(f"Template removed (was at frame {removed_template[0]})")
|
|
return True
|
|
else:
|
|
self.show_feedback_message("No template to remove")
|
|
return False
|
|
|
|
def get_template_for_frame(self, frame_number):
|
|
"""Get the template for the current frame"""
|
|
if not self.templates:
|
|
return None
|
|
|
|
# Find template with start_frame > current_frame
|
|
for i, (start_frame, region, template_image) in enumerate(self.templates):
|
|
if start_frame > frame_number:
|
|
# Found template with start_frame > current_frame
|
|
# Use the previous one (if it exists)
|
|
if i > 0:
|
|
return i - 1
|
|
else:
|
|
return None
|
|
elif start_frame == frame_number:
|
|
# Found template with start_frame == current_frame
|
|
# Use THIS template
|
|
return i
|
|
|
|
# If no template found with start_frame > current_frame, use the last one
|
|
return len(self.templates) - 1 if self.templates else None
|
|
|
|
def _select_best_template_for_frame(self, frame_number):
|
|
"""Select the best template for the current frame"""
|
|
template_index = self.get_template_for_frame(frame_number)
|
|
return template_index is not None
|
|
|
|
def _recreate_template_images(self):
|
|
"""Recreate template images by seeking to their capture frames"""
|
|
if not self.templates:
|
|
return
|
|
|
|
current_frame_backup = self.current_frame
|
|
|
|
for i, (start_frame, region, template_image) in enumerate(self.templates):
|
|
if template_image is None: # Only recreate if missing
|
|
try:
|
|
# Seek to the capture frame
|
|
self.seek_to_frame(start_frame)
|
|
|
|
# Extract template from current frame
|
|
x, y, w, h = region
|
|
if (y + h <= self.current_display_frame.shape[0] and
|
|
x + w <= self.current_display_frame.shape[1]):
|
|
template_image = self.current_display_frame[y:y+h, x:x+w].copy()
|
|
# Update the template in the list
|
|
self.templates[i] = (start_frame, region, template_image)
|
|
print(f"DEBUG: Recreated template {i} from frame {start_frame}")
|
|
else:
|
|
print(f"DEBUG: Failed to recreate template {i} - region outside frame bounds")
|
|
except Exception as e:
|
|
print(f"DEBUG: Failed to recreate template {i}: {e}")
|
|
|
|
# Restore original frame
|
|
self.seek_to_frame(current_frame_backup)
|
|
|
|
|
|
def jump_to_previous_template(self):
|
|
"""Jump to the previous template marker (frame where template was created)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
if not self.templates:
|
|
print("DEBUG: No template markers; prev jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
candidates = [start_frame for start_frame, region, template_image in self.templates if start_frame < current]
|
|
if candidates:
|
|
target = candidates[-1]
|
|
print(f"DEBUG: Jump prev template from {current} -> {target}")
|
|
self.seek_to_frame(target)
|
|
else:
|
|
target = self.templates[0][0]
|
|
print(f"DEBUG: Jump prev template to first marker from {current} -> {target}")
|
|
self.seek_to_frame(target)
|
|
|
|
def jump_to_next_template(self):
|
|
"""Jump to the next template marker (frame where template was created)."""
|
|
if self.is_image_mode:
|
|
return
|
|
self.stop_auto_repeat_seek()
|
|
if not self.templates:
|
|
print("DEBUG: No template markers; next jump ignored")
|
|
return
|
|
current = self.current_frame
|
|
for start_frame, region, template_image in self.templates:
|
|
if start_frame > current:
|
|
print(f"DEBUG: Jump next template from {current} -> {start_frame}")
|
|
self.seek_to_frame(start_frame)
|
|
return
|
|
target = self.templates[-1][0]
|
|
print(f"DEBUG: Jump next template to last marker from {current} -> {target}")
|
|
self.seek_to_frame(target)
|
|
|
|
def apply_rotation(self, frame):
|
|
"""Apply rotation to frame"""
|
|
if self.rotation_angle == 0:
|
|
return frame
|
|
elif self.rotation_angle == 90:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
elif self.rotation_angle == 180:
|
|
return cv2.rotate(frame, cv2.ROTATE_180)
|
|
elif self.rotation_angle == 270:
|
|
return cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
return frame
|
|
|
|
def rotate_clockwise(self):
|
|
"""Rotate video 90 degrees clockwise"""
|
|
self.rotation_angle = (self.rotation_angle + 90) % 360
|
|
self.clear_transformation_cache()
|
|
|
|
def apply_brightness_contrast(self, frame):
|
|
"""Apply brightness and contrast adjustments to frame"""
|
|
if self.brightness == 0 and self.contrast == 1.0:
|
|
return frame
|
|
|
|
# Convert brightness from -100/100 range to -255/255 range
|
|
brightness_value = self.brightness * 2.55
|
|
|
|
# Apply brightness and contrast: new_pixel = contrast * old_pixel + brightness
|
|
adjusted = cv2.convertScaleAbs(
|
|
frame, alpha=self.contrast, beta=brightness_value
|
|
)
|
|
return adjusted
|
|
|
|
def adjust_brightness(self, delta: int):
|
|
"""Adjust brightness by delta (-100 to 100)"""
|
|
self.brightness = max(self.MIN_BRIGHTNESS, min(self.MAX_BRIGHTNESS, self.brightness + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def adjust_contrast(self, delta: float):
|
|
"""Adjust contrast by delta (0.1 to 3.0)"""
|
|
self.contrast = max(self.MIN_CONTRAST, min(self.MAX_CONTRAST, self.contrast + delta))
|
|
self.clear_transformation_cache()
|
|
self.display_needs_update = True
|
|
|
|
def show_progress_bar(self, text: str = "Processing..."):
|
|
"""Show progress bar with given text"""
|
|
self.progress_bar_visible = True
|
|
self.progress_bar_progress = 0.0
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_text = text
|
|
self.display_needs_update = True
|
|
|
|
def update_progress_bar(self, progress: float, text: str = None, fps: float = None):
|
|
"""Update progress bar progress (0.0 to 1.0) and optionally text and FPS"""
|
|
if self.progress_bar_visible:
|
|
self.progress_bar_progress = max(0.0, min(1.0, progress))
|
|
if text is not None:
|
|
self.progress_bar_text = text
|
|
if fps is not None:
|
|
self.progress_bar_fps = fps
|
|
|
|
# Mark as complete when reaching 100%
|
|
if self.progress_bar_progress >= 1.0 and not self.progress_bar_complete:
|
|
self.progress_bar_complete = True
|
|
self.progress_bar_complete_time = time.time()
|
|
|
|
def hide_progress_bar(self):
|
|
"""Hide progress bar"""
|
|
self.progress_bar_visible = False
|
|
self.progress_bar_complete = False
|
|
self.progress_bar_complete_time = None
|
|
self.progress_bar_fps = 0.0
|
|
|
|
def show_feedback_message(self, message: str):
|
|
"""Show a feedback message on screen for a few seconds"""
|
|
self.feedback_message = message
|
|
self.feedback_message_time = time.time()
|
|
self.display_needs_update = True
|
|
|
|
def toggle_fullscreen(self):
|
|
"""Toggle between windowed and fullscreen mode"""
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
|
|
if self.is_fullscreen:
|
|
# Switch to windowed mode
|
|
self.is_fullscreen = False
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, 1200, 800)
|
|
print("Switched to windowed mode")
|
|
else:
|
|
# Switch to fullscreen mode
|
|
self.is_fullscreen = True
|
|
cv2.setWindowProperty(window_title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
|
|
print("Switched to fullscreen mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def toggle_project_view(self):
|
|
"""Toggle between editor and project view mode"""
|
|
if self.project_view_mode:
|
|
# Switch back to editor mode
|
|
self.project_view_mode = False
|
|
if self.project_view:
|
|
cv2.destroyWindow("Project View")
|
|
self.project_view = None
|
|
print("Switched to editor mode")
|
|
else:
|
|
# Switch to project view mode
|
|
self.project_view_mode = True
|
|
# Create project view for the current directory
|
|
if self.path.is_dir():
|
|
project_dir = self.path
|
|
else:
|
|
project_dir = self.path.parent
|
|
self.project_view = ProjectView(project_dir, self)
|
|
# Create separate window for project view
|
|
cv2.namedWindow("Project View", cv2.WINDOW_AUTOSIZE)
|
|
print("Switched to project view mode")
|
|
|
|
self.display_needs_update = True
|
|
|
|
def open_video_from_project_view(self, video_path: Path):
|
|
"""Open a video from project view in editor mode"""
|
|
print(f"Attempting to open video: {video_path}")
|
|
print(f"Video path exists: {video_path.exists()}")
|
|
|
|
# Save current state before switching
|
|
self.save_state()
|
|
|
|
# Find the video in our video_files list
|
|
try:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
except ValueError:
|
|
print(f"Video not found in current session: {video_path.name}")
|
|
# If video not in current session, reload the directory
|
|
self.path = video_path.parent
|
|
self.video_files = self._get_media_files_from_directory(self.path)
|
|
if video_path in self.video_files:
|
|
video_index = self.video_files.index(video_path)
|
|
self.current_video_index = video_index
|
|
self._load_video(video_path)
|
|
self.load_current_frame()
|
|
# Load the saved state for this video (same logic as normal video loading)
|
|
self.load_state()
|
|
print(f"Opened video: {video_path.name}")
|
|
else:
|
|
print(f"Could not find video: {video_path.name}")
|
|
return
|
|
|
|
# Keep project view open but switch focus to video editor
|
|
# Don't destroy the project view window - just let the user switch between them
|
|
|
|
def draw_feedback_message(self, frame):
|
|
"""Draw feedback message on frame if visible"""
|
|
if not self.feedback_message or not self.feedback_message_time:
|
|
return
|
|
|
|
# Check if message should still be shown
|
|
elapsed = time.time() - self.feedback_message_time
|
|
if elapsed > self.feedback_message_duration:
|
|
self.feedback_message = ""
|
|
self.feedback_message_time = None
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate message position (center of frame)
|
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
font_scale = 1.0
|
|
thickness = 2
|
|
|
|
# Get text size
|
|
text_size = cv2.getTextSize(self.feedback_message, font, font_scale, thickness)[0]
|
|
text_x = (width - text_size[0]) // 2
|
|
text_y = (height + text_size[1]) // 2
|
|
|
|
# Draw background rectangle
|
|
padding = 10
|
|
rect_x1 = text_x - padding
|
|
rect_y1 = text_y - text_size[1] - padding
|
|
rect_x2 = text_x + text_size[0] + padding
|
|
rect_y2 = text_y + padding
|
|
|
|
# Semi-transparent background
|
|
overlay = frame.copy()
|
|
cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (0, 0, 0), -1)
|
|
alpha = self.OVERLAY_ALPHA_HIGH
|
|
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
|
|
|
|
# Draw text with shadow
|
|
cv2.putText(frame, self.feedback_message, (text_x + 2, text_y + 2), font, font_scale, (0, 0, 0), thickness + 1)
|
|
cv2.putText(frame, self.feedback_message, (text_x, text_y), font, font_scale, (255, 255, 255), thickness)
|
|
|
|
def draw_progress_bar(self, frame):
|
|
"""Draw progress bar on frame if visible - positioned at top with full width"""
|
|
if not self.progress_bar_visible:
|
|
return
|
|
|
|
# Check if we should fade out
|
|
if self.progress_bar_complete and self.progress_bar_complete_time:
|
|
elapsed = time.time() - self.progress_bar_complete_time
|
|
if elapsed > self.PROGRESS_BAR_FADE_DURATION:
|
|
self.hide_progress_bar()
|
|
return
|
|
|
|
# Calculate fade alpha (1.0 at start, 0.0 at end)
|
|
fade_alpha = max(0.0, 1.0 - (elapsed / self.PROGRESS_BAR_FADE_DURATION))
|
|
else:
|
|
fade_alpha = 1.0
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Calculate progress bar position (top of frame with 5% margins)
|
|
margin_width = int(width * self.PROGRESS_BAR_MARGIN_PERCENT / 100)
|
|
bar_width = width - (2 * margin_width)
|
|
bar_x = margin_width
|
|
bar_y = self.PROGRESS_BAR_TOP_MARGIN
|
|
|
|
# Apply fade alpha to colors
|
|
bg_color = tuple(int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BG)
|
|
border_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_BORDER
|
|
)
|
|
|
|
if self.progress_bar_complete:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_FILL
|
|
)
|
|
else:
|
|
fill_color = tuple(
|
|
int(c * fade_alpha) for c in self.PROGRESS_BAR_COLOR_PROGRESS
|
|
)
|
|
|
|
# Draw background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
bg_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw progress fill
|
|
fill_width = int(bar_width * self.progress_bar_progress)
|
|
if fill_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + fill_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
fill_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw border
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x, bar_y),
|
|
(bar_x + bar_width, bar_y + self.PROGRESS_BAR_HEIGHT),
|
|
border_color,
|
|
2,
|
|
)
|
|
|
|
# Draw progress percentage on the left
|
|
percentage_text = f"{self.progress_bar_progress * 100:.1f}%"
|
|
text_color = tuple(int(255 * fade_alpha) for _ in range(3))
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 12, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
self.FONT_SCALE_SMALL,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
percentage_text,
|
|
(bar_x + 10, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
self.FONT_SCALE_SMALL,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw FPS on the right if available
|
|
if self.progress_bar_fps > 0:
|
|
fps_text = f"{self.progress_bar_fps:.1f} FPS"
|
|
fps_text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE_SMALL, 1)[
|
|
0
|
|
]
|
|
fps_x = bar_x + bar_width - fps_text_size[0] - 10
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x + 2, bar_y + 22),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
self.FONT_SCALE_SMALL,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
fps_text,
|
|
(fps_x, bar_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
self.FONT_SCALE_SMALL,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
# Draw main text in center
|
|
if self.progress_bar_text:
|
|
text_size = cv2.getTextSize(
|
|
self.progress_bar_text, cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE_SMALL, 1
|
|
)[0]
|
|
text_x = bar_x + (bar_width - text_size[0]) // 2
|
|
text_y = bar_y + 20
|
|
|
|
# Draw text shadow for better visibility
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x + 2, text_y + 2),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
(0, 0, 0),
|
|
4,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
self.progress_bar_text,
|
|
(text_x, text_y),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.5,
|
|
text_color,
|
|
2,
|
|
)
|
|
|
|
def draw_timeline(self, frame):
|
|
"""Draw timeline at the bottom of the frame"""
|
|
# Don't draw timeline for images
|
|
if self.is_image_mode:
|
|
return
|
|
|
|
height, width = frame.shape[:2]
|
|
|
|
# Timeline background area
|
|
timeline_y = height - self.TIMELINE_HEIGHT
|
|
cv2.rectangle(frame, (0, timeline_y), (width, height), (40, 40, 40), -1)
|
|
|
|
# Calculate timeline bar position
|
|
bar_y = timeline_y + (self.TIMELINE_HEIGHT - self.TIMELINE_BAR_HEIGHT) // 2
|
|
bar_x_start = self.TIMELINE_MARGIN
|
|
bar_x_end = width - self.TIMELINE_MARGIN
|
|
bar_width = bar_x_end - bar_x_start
|
|
|
|
self.timeline_rect = (bar_x_start, bar_y, bar_width, self.TIMELINE_BAR_HEIGHT)
|
|
|
|
# Draw timeline background
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BG,
|
|
-1,
|
|
)
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_end, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_BORDER,
|
|
1,
|
|
)
|
|
|
|
# Draw progress
|
|
if self.total_frames > 0:
|
|
progress = self.current_frame / max(1, self.total_frames - 1)
|
|
progress_width = int(bar_width * progress)
|
|
if progress_width > 0:
|
|
cv2.rectangle(
|
|
frame,
|
|
(bar_x_start, bar_y),
|
|
(bar_x_start + progress_width, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_PROGRESS,
|
|
-1,
|
|
)
|
|
|
|
# Draw current position handle
|
|
handle_x = bar_x_start + progress_width
|
|
handle_y = bar_y + self.TIMELINE_BAR_HEIGHT // 2
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_HANDLE,
|
|
-1,
|
|
)
|
|
cv2.circle(
|
|
frame,
|
|
(handle_x, handle_y),
|
|
self.TIMELINE_HANDLE_SIZE // 2,
|
|
self.TIMELINE_COLOR_BORDER,
|
|
2,
|
|
)
|
|
|
|
# Draw cut points
|
|
if self.cut_start_frame is not None:
|
|
cut_start_progress = self.cut_start_frame / max(
|
|
1, self.total_frames - 1
|
|
)
|
|
cut_start_x = bar_x_start + int(bar_width * cut_start_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_start_x, bar_y),
|
|
(cut_start_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"1",
|
|
(cut_start_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
if self.cut_end_frame is not None:
|
|
cut_end_progress = self.cut_end_frame / max(1, self.total_frames - 1)
|
|
cut_end_x = bar_x_start + int(bar_width * cut_end_progress)
|
|
cv2.line(
|
|
frame,
|
|
(cut_end_x, bar_y),
|
|
(cut_end_x, bar_y + self.TIMELINE_BAR_HEIGHT),
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
3,
|
|
)
|
|
cv2.putText(
|
|
frame,
|
|
"2",
|
|
(cut_end_x - 5, bar_y - 5),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
self.TIMELINE_COLOR_CUT_POINT,
|
|
1,
|
|
)
|
|
|
|
# Draw template markers
|
|
for start_frame, region, template_image in self.templates:
|
|
# Draw template start point
|
|
start_progress = start_frame / max(1, self.total_frames - 1)
|
|
start_x = bar_x_start + int(bar_width * start_progress)
|
|
|
|
# Template color (green for active, red for inactive)
|
|
template_index = self.get_template_for_frame(self.current_frame)
|
|
is_active = (template_index is not None and self.templates[template_index][0] == start_frame)
|
|
template_color = (0, 255, 0) if is_active else (255, 0, 0) # Green if active, red if inactive
|
|
|
|
# Draw template start marker
|
|
cv2.rectangle(
|
|
frame,
|
|
(start_x, bar_y + 2),
|
|
(start_x + 4, bar_y + self.TIMELINE_BAR_HEIGHT - 2),
|
|
template_color,
|
|
-1,
|
|
)
|
|
|
|
# Draw template number
|
|
cv2.putText(
|
|
frame,
|
|
str(start_frame),
|
|
(start_x + 2, bar_y + 10),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.3,
|
|
(255, 255, 255),
|
|
1,
|
|
)
|
|
|
|
# Draw frame difference threshold info
|
|
region_status = "region" if self.interesting_region else "full frame"
|
|
threshold_text = f"Interesting: {self.frame_difference_threshold:.0f}% (gap: {self.frame_difference_gap}, {region_status})"
|
|
cv2.putText(
|
|
frame,
|
|
threshold_text,
|
|
(bar_x_start, bar_y - 15),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.4,
|
|
(200, 200, 200),
|
|
1,
|
|
)
|
|
|
|
def display_current_frame(self):
|
|
"""Display the current frame with all overlays"""
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Check if display needs update (optimization)
|
|
current_state = (
|
|
self.current_frame,
|
|
self.crop_rect,
|
|
self.zoom_factor,
|
|
self.rotation_angle,
|
|
self.brightness,
|
|
self.contrast,
|
|
self.display_offset,
|
|
self.progress_bar_visible,
|
|
self.feedback_message,
|
|
self.searching_interesting_point,
|
|
self.search_progress_text,
|
|
self.search_progress_percent
|
|
)
|
|
|
|
# Always update display when paused or when searching to ensure UI elements are visible
|
|
if not self.display_needs_update and current_state == self.last_display_state and self.is_playing and not self.searching_interesting_point:
|
|
return # Skip redraw if nothing changed and playing (but not when searching)
|
|
|
|
self.last_display_state = current_state
|
|
self.display_needs_update = False
|
|
|
|
# Apply crop, zoom, and rotation transformations for preview
|
|
display_frame = self.apply_crop_zoom_and_rotation(
|
|
self.current_display_frame
|
|
)
|
|
|
|
if display_frame is None:
|
|
return
|
|
|
|
# Resize to fit window while maintaining aspect ratio
|
|
height, width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
|
|
# Scale video to fit screen bounds
|
|
scale = min(self.window_width / width, available_height / height)
|
|
if scale < 1.0:
|
|
# Scale down video to fit screen
|
|
new_width = int(width * scale)
|
|
new_height = int(height * scale)
|
|
display_frame = cv2.resize(display_frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
|
|
|
|
# Create canvas with timeline space
|
|
canvas = np.zeros((self.window_height, self.window_width, 3), dtype=np.uint8)
|
|
|
|
# Center the frame on canvas
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Ensure frame fits within canvas bounds
|
|
end_y = min(start_y + frame_height, available_height)
|
|
end_x = min(start_x + frame_width, self.window_width)
|
|
actual_frame_height = end_y - start_y
|
|
actual_frame_width = end_x - start_x
|
|
|
|
if actual_frame_height > 0 and actual_frame_width > 0:
|
|
canvas[start_y:end_y, start_x:end_x] = display_frame[:actual_frame_height, :actual_frame_width]
|
|
|
|
# Draw crop selection preview during Shift+Click+Drag
|
|
if self.crop_preview_rect:
|
|
x, y, w, h = self.crop_preview_rect
|
|
cv2.rectangle(
|
|
canvas, (int(x), int(y)), (int(x + w), int(y + h)), (0, 255, 0), 2
|
|
)
|
|
|
|
# Add info overlay
|
|
rotation_text = (
|
|
f" | Rotation: {self.rotation_angle}°" if self.rotation_angle != 0 else ""
|
|
)
|
|
brightness_text = (
|
|
f" | Brightness: {self.brightness}" if self.brightness != 0 else ""
|
|
)
|
|
contrast_text = (
|
|
f" | Contrast: {self.contrast:.1f}" if self.contrast != 1.0 else ""
|
|
)
|
|
seek_multiplier_text = (
|
|
f" | Seek: {self.seek_multiplier:.1f}x" if self.seek_multiplier != 1.0 else ""
|
|
)
|
|
motion_text = (
|
|
f" | Motion: {self.tracking_enabled}" if self.tracking_enabled else ""
|
|
)
|
|
feature_text = (
|
|
f" | Features: {self.feature_tracker.tracking_enabled}" if self.feature_tracker.tracking_enabled else ""
|
|
)
|
|
if self.feature_tracker.tracking_enabled and self.current_frame in self.feature_tracker.features:
|
|
feature_count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
feature_text = f" | Features: {feature_count} pts"
|
|
if self.optical_flow_enabled:
|
|
feature_text += " (OPTICAL FLOW)"
|
|
template_text = ""
|
|
if self.templates:
|
|
mode = "Full Frame" if self.template_matching_full_frame else "Cropped"
|
|
template_text = f" | Template: {mode}"
|
|
autorepeat_text = (
|
|
f" | Loop: ON" if self.looping_between_markers else ""
|
|
)
|
|
if self.is_image_mode:
|
|
info_text = f"Image | Zoom: {self.zoom_factor:.1f}x{rotation_text}{brightness_text}{contrast_text}{motion_text}{feature_text}{template_text}"
|
|
else:
|
|
info_text = f"Frame: {self.current_frame}/{self.total_frames} | Speed: {self.playback_speed:.1f}x | Zoom: {self.zoom_factor:.1f}x{seek_multiplier_text}{rotation_text}{brightness_text}{contrast_text}{motion_text}{feature_text}{template_text}{autorepeat_text} | {'Playing' if self.is_playing else 'Paused'}"
|
|
cv2.putText(
|
|
canvas,
|
|
info_text,
|
|
(10, 30),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.7,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1
|
|
)
|
|
|
|
# Add video navigation info
|
|
if len(self.video_files) > 1:
|
|
video_text = f"Video: {self.current_video_index + 1}/{len(self.video_files)} - {self.video_path.name}"
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
video_text,
|
|
(10, 60),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset = 90
|
|
else:
|
|
y_offset = 60
|
|
|
|
# Add crop info
|
|
if self.crop_rect:
|
|
crop_text = f"Crop: {int(self.crop_rect[0])},{int(self.crop_rect[1])} {int(self.crop_rect[2])}x{int(self.crop_rect[3])}"
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
crop_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
y_offset += 30
|
|
|
|
# Add cut info
|
|
if self.cut_start_frame is not None or self.cut_end_frame is not None:
|
|
cut_text = (
|
|
f"Cut: {self.cut_start_frame or '?'} - {self.cut_end_frame or '?'}"
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(255, 255, 255),
|
|
2,
|
|
)
|
|
cv2.putText(
|
|
canvas,
|
|
cut_text,
|
|
(10, y_offset),
|
|
cv2.FONT_HERSHEY_SIMPLEX,
|
|
0.6,
|
|
(0, 0, 0),
|
|
1,
|
|
)
|
|
|
|
# Draw tracking overlays (points and interpolated cross), points stored in ROTATED space
|
|
pts = self.tracking_points.get(self.current_frame, []) if not self.is_image_mode else []
|
|
for (rx, ry) in pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
cv2.circle(canvas, (sx, sy), 6, (255, 0, 0), -1)
|
|
cv2.circle(canvas, (sx, sy), 6, (255, 255, 255), 1)
|
|
|
|
# Draw feature tracking points (green circles)
|
|
if (not self.is_image_mode and
|
|
self.feature_tracker.tracking_enabled and
|
|
self.current_frame in self.feature_tracker.features):
|
|
feature_positions = self.feature_tracker.features[self.current_frame]['positions']
|
|
for (fx, fy) in feature_positions:
|
|
# Features are stored in rotated frame coordinates (like existing motion tracking)
|
|
# Use the existing coordinate transformation system
|
|
sx, sy = self._map_rotated_to_screen(fx, fy)
|
|
cv2.circle(canvas, (sx, sy), 4, (0, 255, 0), -1) # Green circles for features
|
|
cv2.circle(canvas, (sx, sy), 4, (255, 255, 255), 1)
|
|
|
|
# Draw template matching point (blue circle with confidence)
|
|
if (not self.is_image_mode and
|
|
self.templates):
|
|
# Get template matching position for current frame
|
|
template_pos = self._get_template_matching_position(self.current_frame)
|
|
if template_pos:
|
|
tx, ty, confidence = template_pos
|
|
# Map to screen coordinates
|
|
sx, sy = self._map_rotated_to_screen(tx, ty)
|
|
# Draw blue circle for template matching
|
|
cv2.circle(canvas, (sx, sy), 8, (255, 0, 255), -1) # Magenta circle for template
|
|
cv2.circle(canvas, (sx, sy), 8, (255, 255, 255), 2)
|
|
# Draw confidence text
|
|
conf_text = f"{confidence:.2f}"
|
|
cv2.putText(canvas, conf_text, (sx + 10, sy - 10), cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE_SMALL, (255, 255, 255), 1)
|
|
|
|
|
|
# Draw selection rectangles for feature extraction/deletion
|
|
if self.selective_feature_extraction_rect:
|
|
x, y, w, h = self.selective_feature_extraction_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 255, 255), 2) # Yellow for extraction
|
|
|
|
if self.selective_feature_deletion_rect:
|
|
x, y, w, h = self.selective_feature_deletion_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (0, 0, 255), 2) # Red for deletion
|
|
|
|
# Draw template selection rectangle
|
|
if self.template_selection_rect:
|
|
x, y, w, h = self.template_selection_rect
|
|
cv2.rectangle(canvas, (x, y), (x + w, y + h), (255, 0, 255), 2) # Magenta for template selection
|
|
|
|
# Draw previous and next tracking points with motion path visualization
|
|
if not self.is_image_mode and self.tracking_points:
|
|
prev_result = self._get_previous_tracking_point()
|
|
next_result = self._get_next_tracking_point()
|
|
|
|
# Draw motion path - either previous→current OR previous→next
|
|
line_to_draw = None
|
|
if prev_result and self.current_frame in self.tracking_points:
|
|
# Draw previous→current line (we're on a frame with tracking points)
|
|
line_to_draw = ("prev_current", prev_result, (self.current_frame, self.tracking_points[self.current_frame]))
|
|
elif prev_result and next_result:
|
|
# Draw previous→next line (we're between frames)
|
|
line_to_draw = ("prev_next", prev_result, next_result)
|
|
|
|
if line_to_draw:
|
|
line_type, (_, pts1), (_, pts2) = line_to_draw
|
|
|
|
# Draw lines between corresponding tracking points
|
|
for i, (px1, py1) in enumerate(pts1):
|
|
if i < len(pts2):
|
|
px2, py2 = pts2[i]
|
|
sx1, sy1 = self._map_rotated_to_screen(px1, py1)
|
|
sx2, sy2 = self._map_rotated_to_screen(px2, py2)
|
|
|
|
# Draw motion path line with arrow (thin and transparent)
|
|
overlay = canvas.copy()
|
|
cv2.line(overlay, (sx1, sy1), (sx2, sy2), (255, 255, 0), 1) # Thin yellow line
|
|
|
|
# Draw arrow head pointing from first to second point
|
|
angle = np.arctan2(sy2 - sy1, sx2 - sx1)
|
|
arrow_length = 12
|
|
arrow_angle = np.pi / 6 # 30 degrees
|
|
|
|
# Calculate arrow head points
|
|
arrow_x1 = int(sx2 - arrow_length * np.cos(angle - arrow_angle))
|
|
arrow_y1 = int(sy2 - arrow_length * np.sin(angle - arrow_angle))
|
|
arrow_x2 = int(sx2 - arrow_length * np.cos(angle + arrow_angle))
|
|
arrow_y2 = int(sy2 - arrow_length * np.sin(angle + arrow_angle))
|
|
|
|
cv2.line(overlay, (sx2, sy2), (arrow_x1, arrow_y1), (255, 255, 0), 1)
|
|
cv2.line(overlay, (sx2, sy2), (arrow_x2, arrow_y2), (255, 255, 0), 1)
|
|
cv2.addWeighted(overlay, self.OVERLAY_ALPHA_LOW, canvas, self.OVERLAY_ALPHA_HIGH, 0, canvas)
|
|
|
|
# Previous tracking point (red) - from the most recent frame with tracking points before current
|
|
if prev_result:
|
|
prev_frame, prev_pts = prev_result
|
|
for (rx, ry) in prev_pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
# Create overlay for alpha blending (more transparent)
|
|
overlay = canvas.copy()
|
|
cv2.circle(overlay, (sx, sy), 5, (0, 0, 255), -1) # Red circle
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 255, 255), 1) # White border
|
|
cv2.addWeighted(overlay, 0.4, canvas, 0.6, 0, canvas) # More transparent
|
|
|
|
# Next tracking point (magenta/purple) - from the next frame with tracking points after current
|
|
if next_result:
|
|
next_frame, next_pts = next_result
|
|
for (rx, ry) in next_pts:
|
|
sx, sy = self._map_rotated_to_screen(rx, ry)
|
|
# Create overlay for alpha blending (more transparent)
|
|
overlay = canvas.copy()
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 0, 255), -1) # Magenta circle
|
|
cv2.circle(overlay, (sx, sy), 5, (255, 255, 255), 1) # White border
|
|
cv2.addWeighted(overlay, 0.4, canvas, 0.6, 0, canvas) # More transparent
|
|
if self.tracking_enabled and not self.is_image_mode:
|
|
interp = self._get_interpolated_tracking_position(self.current_frame)
|
|
if interp:
|
|
sx, sy = self._map_rotated_to_screen(interp[0], interp[1])
|
|
cv2.line(canvas, (sx - 10, sy), (sx + 10, sy), (255, 0, 0), 2)
|
|
cv2.line(canvas, (sx, sy - 10), (sx, sy + 10), (255, 0, 0), 2)
|
|
# Draw a faint outline of the effective crop to confirm follow
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(self.current_frame)
|
|
# Map rotated crop corners to screen for debug outline
|
|
tlx, tly = self._map_rotated_to_screen(eff_x, eff_y)
|
|
brx, bry = self._map_rotated_to_screen(eff_x + eff_w, eff_y + eff_h)
|
|
cv2.rectangle(canvas, (tlx, tly), (brx, bry), (255, 0, 0), 1)
|
|
|
|
# Draw timeline
|
|
self.draw_timeline(canvas)
|
|
|
|
# Draw progress bar (if visible)
|
|
self.draw_progress_bar(canvas)
|
|
|
|
# Draw feedback message (if visible)
|
|
self.draw_feedback_message(canvas)
|
|
|
|
# Draw search progress (if searching for interesting point)
|
|
if self.searching_interesting_point and self.search_progress_text:
|
|
# Draw search progress overlay
|
|
height, width = canvas.shape[:2]
|
|
|
|
# Background for search progress
|
|
text_size = cv2.getTextSize(self.search_progress_text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
|
|
padding = 10
|
|
bg_x = (width - text_size[0]) // 2 - padding
|
|
bg_y = height // 2 - 50
|
|
bg_w = text_size[0] + 2 * padding
|
|
bg_h = 30
|
|
|
|
# Semi-transparent background
|
|
overlay = canvas.copy()
|
|
cv2.rectangle(overlay, (bg_x, bg_y), (bg_x + bg_w, bg_y + bg_h), (0, 0, 0), -1)
|
|
cv2.addWeighted(overlay, self.OVERLAY_ALPHA_HIGH, canvas, self.OVERLAY_ALPHA_LOW, 0, canvas)
|
|
|
|
# Border
|
|
cv2.rectangle(canvas, (bg_x, bg_y), (bg_x + bg_w, bg_y + bg_h), (255, 255, 0), 2)
|
|
|
|
# Text
|
|
cv2.putText(canvas, self.search_progress_text, (bg_x + padding, bg_y + 20),
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
|
|
|
# Progress bar
|
|
bar_width = 200
|
|
bar_height = 6
|
|
bar_x = (width - bar_width) // 2
|
|
bar_y = bg_y + bg_h + 5
|
|
|
|
# Background
|
|
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + bar_width, bar_y + bar_height), (100, 100, 100), -1)
|
|
|
|
# Progress fill
|
|
fill_width = int(bar_width * (self.search_progress_percent / 100.0))
|
|
if fill_width > 0:
|
|
cv2.rectangle(canvas, (bar_x, bar_y), (bar_x + fill_width, bar_y + bar_height), (0, 255, 0), -1)
|
|
|
|
# Draw interesting point region selection
|
|
if self.interesting_region is not None:
|
|
# Draw the selected region on screen
|
|
x, y, w, h = self.interesting_region
|
|
|
|
# Convert frame coordinates to screen coordinates
|
|
sx1, sy1 = self._map_rotated_to_screen(x, y)
|
|
sx2, sy2 = self._map_rotated_to_screen(x + w, y + h)
|
|
|
|
# Draw region outline (cyan color for interesting point region)
|
|
cv2.rectangle(canvas, (sx1, sy1), (sx2, sy2), (0, 255, 255), 2)
|
|
|
|
# Draw corner indicators
|
|
corner_size = 8
|
|
corners = [
|
|
(sx1, sy1), (sx2, sy1), (sx1, sy2), (sx2, sy2)
|
|
]
|
|
for cx, cy in corners:
|
|
cv2.line(canvas, (cx - corner_size//2, cy), (cx + corner_size//2, cy), (0, 255, 255), 2)
|
|
cv2.line(canvas, (cx, cy - corner_size//2), (cx, cy + corner_size//2), (0, 255, 255), 2)
|
|
|
|
# Draw region selection in progress
|
|
if self.selecting_interesting_region and self.region_selection_start and self.region_selection_current:
|
|
x1, y1 = self.region_selection_start
|
|
x2, y2 = self.region_selection_current
|
|
|
|
# Calculate selection rectangle
|
|
sel_x = min(x1, x2)
|
|
sel_y = min(y1, y2)
|
|
sel_w = abs(x2 - x1)
|
|
sel_h = abs(y2 - y1)
|
|
|
|
# Draw selection rectangle (yellow dashed line)
|
|
cv2.rectangle(canvas, (sel_x, sel_y), (sel_x + sel_w, sel_y + sel_h), (0, 255, 255), 2)
|
|
|
|
# Draw selection info
|
|
info_text = f"Region: {sel_w}x{sel_h}"
|
|
cv2.putText(canvas, info_text, (sel_x, sel_y - 5), cv2.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE_SMALL, (0, 255, 255), 1)
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.imshow(window_title, canvas)
|
|
|
|
def mouse_callback(self, event, x, y, flags, _):
|
|
"""Handle mouse events"""
|
|
# Handle timeline interaction (not for images)
|
|
if self.timeline_rect and not self.is_image_mode:
|
|
bar_x_start, bar_y, bar_width, bar_height = self.timeline_rect
|
|
bar_x_end = bar_x_start + bar_width
|
|
|
|
if bar_y <= y <= bar_y + bar_height + 10:
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.mouse_dragging = True
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.mouse_dragging:
|
|
if bar_x_start <= x <= bar_x_end:
|
|
self.seek_to_timeline_position(x, bar_x_start, bar_width)
|
|
elif event == cv2.EVENT_LBUTTONUP:
|
|
self.mouse_dragging = False
|
|
return
|
|
|
|
# Handle crop border dragging (only when Shift and Ctrl are NOT pressed)
|
|
if not (flags & cv2.EVENT_FLAG_SHIFTKEY) and not (flags & cv2.EVENT_FLAG_CTRLKEY) and self.crop_rect:
|
|
# Get effective crop in rotated coords and map to screen
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0))
|
|
sx1, sy1 = self._map_rotated_to_screen(eff_x, eff_y)
|
|
sx2, sy2 = self._map_rotated_to_screen(eff_x + eff_w, eff_y + eff_h)
|
|
|
|
# Check if cursor is inside crop area
|
|
inside_crop = sx1 <= x <= sx2 and sy1 <= y <= sy2
|
|
|
|
# Determine which side we're on if outside (for better contraction logic)
|
|
outside_side = None
|
|
if not inside_crop:
|
|
if x < sx1:
|
|
outside_side = 'left'
|
|
elif x > sx2:
|
|
outside_side = 'right'
|
|
elif y < sy1:
|
|
outside_side = 'top'
|
|
elif y > sy2:
|
|
outside_side = 'bottom'
|
|
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
# Start dragging - record position and whether we're inside/outside
|
|
self.crop_border_dragging = True
|
|
self.crop_border_drag_start_pos = (x, y)
|
|
self.crop_border_drag_start_rect = (eff_x, eff_y, eff_w, eff_h)
|
|
self.crop_border_drag_inside = inside_crop
|
|
self.crop_border_drag_outside_side = outside_side
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.crop_border_dragging:
|
|
if self.crop_border_drag_start_pos and self.crop_border_drag_start_rect and self.crop_border_drag_inside is not None:
|
|
# Convert mouse movement from screen to rotated coords
|
|
start_sx, start_sy = self.crop_border_drag_start_pos
|
|
start_rx, start_ry = self._map_screen_to_rotated(start_sx, start_sy)
|
|
curr_rx, curr_ry = self._map_screen_to_rotated(x, y)
|
|
|
|
dx_r = curr_rx - start_rx
|
|
dy_r = curr_ry - start_ry
|
|
|
|
# Check minimum drag distance
|
|
drag_distance = (dx_r ** 2 + dy_r ** 2) ** 0.5
|
|
if drag_distance < self.CROP_DRAG_MIN_DISTANCE:
|
|
return
|
|
|
|
# Determine primary direction (horizontal vs vertical)
|
|
abs_dx = abs(dx_r)
|
|
abs_dy = abs(dy_r)
|
|
|
|
# Adjust the appropriate border based on direction and inside/outside
|
|
new_x, new_y, new_w, new_h = self.crop_border_drag_start_rect
|
|
|
|
# Get rotated frame dimensions
|
|
if self.rotation_angle in (90, 270):
|
|
rot_w, rot_h = self.frame_height, self.frame_width
|
|
else:
|
|
rot_w, rot_h = self.frame_width, self.frame_height
|
|
|
|
# Determine which border to adjust based on movement direction
|
|
if abs_dx > abs_dy:
|
|
# Horizontal movement
|
|
if self.crop_border_drag_inside:
|
|
# Inside crop: drag left -> adjust left border, drag right -> adjust right border
|
|
if dx_r < 0:
|
|
# Dragging left -> move left border left (expand left)
|
|
new_x = max(0, new_x + dx_r)
|
|
new_w = new_w - dx_r
|
|
if new_w < self.CROP_MIN_SIZE:
|
|
new_w = self.CROP_MIN_SIZE
|
|
new_x = self.crop_border_drag_start_rect[0] + self.crop_border_drag_start_rect[2] - self.CROP_MIN_SIZE
|
|
else:
|
|
# Dragging right -> move right border right (expand right)
|
|
new_w = max(self.CROP_MIN_SIZE, new_w + dx_r)
|
|
if new_x + new_w > rot_w:
|
|
new_w = rot_w - new_x
|
|
else:
|
|
# Outside crop: always contract based on drag direction
|
|
# Drag left -> contract right (move right border left)
|
|
# Drag right -> contract left (move left border right)
|
|
if dx_r < 0:
|
|
# Dragging left -> move right border left (contract right)
|
|
new_w = max(self.CROP_MIN_SIZE, new_w + dx_r)
|
|
if new_w < self.CROP_MIN_SIZE:
|
|
new_w = self.CROP_MIN_SIZE
|
|
else:
|
|
# Dragging right -> move left border right (contract left)
|
|
new_x = max(0, new_x + dx_r)
|
|
new_w = new_w - dx_r
|
|
if new_w < self.CROP_MIN_SIZE:
|
|
new_w = self.CROP_MIN_SIZE
|
|
new_x = self.crop_border_drag_start_rect[0] + self.crop_border_drag_start_rect[2] - self.CROP_MIN_SIZE
|
|
else:
|
|
# Vertical movement
|
|
if self.crop_border_drag_inside:
|
|
# Inside crop: drag up -> adjust top border, drag down -> adjust bottom border
|
|
if dy_r < 0:
|
|
# Dragging up -> move top border up (expand up)
|
|
new_y = max(0, new_y + dy_r)
|
|
new_h = new_h - dy_r
|
|
if new_h < self.CROP_MIN_SIZE:
|
|
new_h = self.CROP_MIN_SIZE
|
|
new_y = self.crop_border_drag_start_rect[1] + self.crop_border_drag_start_rect[3] - self.CROP_MIN_SIZE
|
|
else:
|
|
# Dragging down -> move bottom border down (expand down)
|
|
new_h = max(self.CROP_MIN_SIZE, new_h + dy_r)
|
|
if new_y + new_h > rot_h:
|
|
new_h = rot_h - new_y
|
|
else:
|
|
# Outside crop: always contract based on drag direction
|
|
# Drag up -> contract bottom (move bottom border up)
|
|
# Drag down -> contract top (move top border down)
|
|
if dy_r < 0:
|
|
# Dragging up -> move bottom border up (contract bottom)
|
|
new_h = max(self.CROP_MIN_SIZE, new_h + dy_r)
|
|
if new_h < self.CROP_MIN_SIZE:
|
|
new_h = self.CROP_MIN_SIZE
|
|
else:
|
|
# Dragging down -> move top border down (contract top)
|
|
new_y = max(0, new_y + dy_r)
|
|
new_h = new_h - dy_r
|
|
if new_h < self.CROP_MIN_SIZE:
|
|
new_h = self.CROP_MIN_SIZE
|
|
new_y = self.crop_border_drag_start_rect[1] + self.crop_border_drag_start_rect[3] - self.CROP_MIN_SIZE
|
|
|
|
# Convert back from rotated to original frame coords
|
|
self._set_crop_from_rotated_rect((new_x, new_y, new_w, new_h))
|
|
self.clear_transformation_cache()
|
|
self.display_current_frame()
|
|
elif event == cv2.EVENT_LBUTTONUP and self.crop_border_dragging:
|
|
self.crop_border_dragging = False
|
|
self.crop_border_drag_start_pos = None
|
|
self.crop_border_drag_start_rect = None
|
|
self.crop_border_drag_inside = None
|
|
self.crop_border_drag_outside_side = None
|
|
self.save_state()
|
|
|
|
# Handle crop selection (Shift + click and drag)
|
|
if flags & cv2.EVENT_FLAG_SHIFTKEY:
|
|
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
print(f"DEBUG: Crop start at screen=({x},{y}) frame={getattr(self, 'current_frame', -1)}")
|
|
self.crop_selecting = True
|
|
self.crop_start_point = (x, y)
|
|
self.crop_preview_rect = None
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.crop_selecting:
|
|
if self.crop_start_point:
|
|
start_x, start_y = self.crop_start_point
|
|
width = abs(x - start_x)
|
|
height = abs(y - start_y)
|
|
crop_x = min(start_x, x)
|
|
crop_y = min(start_y, y)
|
|
self.crop_preview_rect = (crop_x, crop_y, width, height)
|
|
elif event == cv2.EVENT_LBUTTONUP and self.crop_selecting:
|
|
if self.crop_start_point and self.crop_preview_rect:
|
|
print(f"DEBUG: Crop end screen_rect={self.crop_preview_rect}")
|
|
# Convert screen coordinates to video coordinates
|
|
self.set_crop_from_screen_coords(self.crop_preview_rect)
|
|
self.crop_selecting = False
|
|
self.crop_start_point = None
|
|
self.crop_preview_rect = None
|
|
|
|
# Handle zoom center (Ctrl + click)
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY and event == cv2.EVENT_LBUTTONDOWN:
|
|
self.zoom_center = (x, y)
|
|
|
|
# Handle shift+right-click for placing tracking point at previous tracking point position
|
|
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_SHIFTKEY) and not (flags & cv2.EVENT_FLAG_CTRLKEY):
|
|
if not self.is_image_mode:
|
|
# Get previous tracking point position
|
|
prev_result = self._get_previous_tracking_point()
|
|
if prev_result:
|
|
prev_frame, prev_points = prev_result
|
|
if prev_points:
|
|
# Use the first tracking point from the previous frame
|
|
prev_x, prev_y = prev_points[0]
|
|
|
|
# Add tracking point at same position on current frame
|
|
self.tracking_points.setdefault(self.current_frame, []).append((int(prev_x), int(prev_y)))
|
|
print(f"DEBUG: Added tracking point at previous position ({prev_x}, {prev_y}) on frame {self.current_frame}")
|
|
self.show_feedback_message("Tracking point added at previous position")
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
self.display_current_frame()
|
|
else:
|
|
self.show_feedback_message("No previous tracking points found")
|
|
else:
|
|
self.show_feedback_message("No previous tracking points found")
|
|
|
|
# Handle Ctrl+Right-click+drag for selective feature deletion
|
|
if event == cv2.EVENT_RBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY):
|
|
if not self.is_image_mode and self.feature_tracker.tracking_enabled:
|
|
self.selective_feature_deletion_start = (x, y)
|
|
self.selective_feature_deletion_rect = None
|
|
print(f"DEBUG: Started selective feature deletion at ({x}, {y})")
|
|
|
|
# Handle Ctrl+Right-click+drag for selective feature deletion
|
|
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.selective_feature_deletion_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.selective_feature_deletion_start
|
|
self.selective_feature_deletion_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
|
|
# Handle Ctrl+Right-click release for selective feature deletion
|
|
if event == cv2.EVENT_RBUTTONUP and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.selective_feature_deletion_start:
|
|
if not self.is_image_mode and self.feature_tracker.tracking_enabled and self.selective_feature_deletion_rect:
|
|
self._delete_features_from_region(self.selective_feature_deletion_rect)
|
|
self.selective_feature_deletion_start = None
|
|
self.selective_feature_deletion_rect = None
|
|
|
|
# Handle Ctrl+Left-click+drag for template region selection
|
|
if event == cv2.EVENT_LBUTTONDOWN and (flags & cv2.EVENT_FLAG_CTRLKEY):
|
|
if not self.is_image_mode:
|
|
self.template_selection_start = (x, y)
|
|
self.template_selection_rect = None
|
|
print(f"DEBUG: Started template selection at ({x}, {y})")
|
|
|
|
# Handle Ctrl+Left-click+drag for template region selection
|
|
if event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.template_selection_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.template_selection_start
|
|
self.template_selection_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
|
|
# Handle Ctrl+Left-click release for template region selection
|
|
if event == cv2.EVENT_LBUTTONUP and (flags & cv2.EVENT_FLAG_CTRLKEY) and self.template_selection_start:
|
|
if not self.is_image_mode and self.template_selection_rect:
|
|
self._set_template_from_region(self.template_selection_rect)
|
|
self.template_selection_start = None
|
|
self.template_selection_rect = None
|
|
|
|
# Handle interesting point region selection
|
|
if self.selecting_interesting_region:
|
|
if event == cv2.EVENT_LBUTTONDOWN:
|
|
self.region_selection_start = (x, y)
|
|
self.region_selection_current = (x, y)
|
|
self.display_needs_update = True
|
|
elif event == cv2.EVENT_MOUSEMOVE and self.region_selection_start:
|
|
self.region_selection_current = (x, y)
|
|
self.display_needs_update = True
|
|
elif event == cv2.EVENT_LBUTTONUP and self.region_selection_start:
|
|
self.region_selection_current = (x, y)
|
|
self.toggle_interesting_region_selection()
|
|
self.display_needs_update = True
|
|
|
|
|
|
# Handle right-click for selective feature extraction when mode is active
|
|
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
|
if not self.is_image_mode and hasattr(self, 'selective_feature_extraction_mode') and self.selective_feature_extraction_mode:
|
|
# Start selective feature extraction
|
|
self.selective_feature_extraction_start = (x, y)
|
|
self.selective_feature_extraction_rect = None
|
|
print(f"DEBUG: Started selective feature extraction at ({x}, {y})")
|
|
return # Don't process regular right-click functionality
|
|
|
|
# Handle mouse move for selective feature extraction
|
|
if event == cv2.EVENT_MOUSEMOVE and hasattr(self, 'selective_feature_extraction_start') and self.selective_feature_extraction_start:
|
|
if not self.is_image_mode:
|
|
start_x, start_y = self.selective_feature_extraction_start
|
|
self.selective_feature_extraction_rect = (min(start_x, x), min(start_y, y), abs(x - start_x), abs(y - start_y))
|
|
self.display_needs_update = True
|
|
|
|
# Handle mouse release for selective feature extraction
|
|
if event == cv2.EVENT_RBUTTONUP and hasattr(self, 'selective_feature_extraction_start') and self.selective_feature_extraction_start:
|
|
if not self.is_image_mode and self.selective_feature_extraction_rect:
|
|
self._extract_features_from_region(self.selective_feature_extraction_rect)
|
|
self.selective_feature_extraction_start = None
|
|
self.selective_feature_extraction_rect = None
|
|
self.display_needs_update = True
|
|
|
|
# Handle right-click for tracking points (no modifiers)
|
|
if event == cv2.EVENT_RBUTTONDOWN and not (flags & (cv2.EVENT_FLAG_CTRLKEY | cv2.EVENT_FLAG_SHIFTKEY)):
|
|
if not self.is_image_mode:
|
|
# First check for template removal (like motion tracking points)
|
|
if self.templates:
|
|
screen_x, screen_y = x, y
|
|
raw_x, raw_y = self._map_screen_to_rotated(screen_x, screen_y)
|
|
|
|
for i, (start_frame, region, template_image) in enumerate(self.templates):
|
|
tx, ty, tw, th = region
|
|
center_x = tx + tw // 2
|
|
center_y = ty + th // 2
|
|
|
|
# Check if click is within 10px of template center
|
|
distance = ((raw_x - center_x) ** 2 + (raw_y - center_y) ** 2) ** 0.5
|
|
if distance <= 40:
|
|
self.remove_template(i) # Pass index instead of ID
|
|
self.save_state()
|
|
return
|
|
|
|
# Store tracking points in ROTATED frame coordinates (pre-crop)
|
|
rx, ry = self._map_screen_to_rotated(x, y)
|
|
threshold = self.TRACKING_POINT_THRESHOLD
|
|
removed = False
|
|
|
|
# First check for removal of existing points on current frame
|
|
if self.current_frame in self.tracking_points:
|
|
pts_screen = []
|
|
for idx, (px, py) in enumerate(self.tracking_points[self.current_frame]):
|
|
sxp, syp = self._map_rotated_to_screen(px, py)
|
|
pts_screen.append((idx, sxp, syp))
|
|
for idx, sxp, syp in pts_screen:
|
|
if (sxp - x) ** 2 + (syp - y) ** 2 <= threshold ** 2:
|
|
del self.tracking_points[self.current_frame][idx]
|
|
if not self.tracking_points[self.current_frame]:
|
|
del self.tracking_points[self.current_frame]
|
|
# self.show_feedback_message("Tracking point removed")
|
|
removed = True
|
|
break
|
|
|
|
# If not removed, check for snapping to nearby points or lines from other frames
|
|
if not removed:
|
|
snapped = False
|
|
best_snap_distance = float('inf')
|
|
best_snap_point = None
|
|
|
|
# Check all tracking points from all frames for point snapping
|
|
for _, points in self.tracking_points.items():
|
|
for (px, py) in points:
|
|
sxp, syp = self._map_rotated_to_screen(px, py)
|
|
distance = ((sxp - x) ** 2 + (syp - y) ** 2) ** 0.5
|
|
if distance <= threshold and distance < best_snap_distance:
|
|
best_snap_distance = distance
|
|
best_snap_point = (int(px), int(py))
|
|
|
|
# Check for line snapping - either previous→next OR previous→current
|
|
prev_result = self._get_previous_tracking_point()
|
|
next_result = self._get_next_tracking_point()
|
|
|
|
print(f"DEBUG: Line snapping - prev_result: {prev_result}, next_result: {next_result}")
|
|
|
|
# Determine which line to check: previous→current OR previous→next
|
|
line_to_check = None
|
|
if prev_result and self.current_frame in self.tracking_points:
|
|
# Check previous→current line (we're on a frame with tracking points)
|
|
line_to_check = ("prev_current", prev_result, (self.current_frame, self.tracking_points[self.current_frame]))
|
|
print(f"DEBUG: Checking prev->current line")
|
|
elif prev_result and next_result:
|
|
# Check previous→next line (we're between frames)
|
|
line_to_check = ("prev_next", prev_result, next_result)
|
|
print(f"DEBUG: Checking prev->next line")
|
|
|
|
if line_to_check:
|
|
line_type, (_, pts1), (_, pts2) = line_to_check
|
|
|
|
# Check each corresponding pair of points
|
|
for j in range(min(len(pts1), len(pts2))):
|
|
px1, py1 = pts1[j]
|
|
px2, py2 = pts2[j]
|
|
|
|
# Convert to screen coordinates
|
|
sx1, sy1 = self._map_rotated_to_screen(px1, py1)
|
|
sx2, sy2 = self._map_rotated_to_screen(px2, py2)
|
|
|
|
# Calculate distance to infinite line and foot of perpendicular
|
|
line_distance, (foot_x, foot_y) = self._point_to_line_distance_and_foot(x, y, sx1, sy1, sx2, sy2)
|
|
|
|
print(f"DEBUG: {line_type} Line {j}: ({sx1},{sy1}) to ({sx2},{sy2}), distance to click ({x},{y}) = {line_distance:.2f}, foot = ({foot_x:.1f}, {foot_y:.1f})")
|
|
|
|
if line_distance <= threshold and line_distance < best_snap_distance:
|
|
print(f"DEBUG: Line snap found! Distance {line_distance:.2f} <= threshold {threshold}")
|
|
|
|
# Convert foot of perpendicular back to rotated coordinates (no clamping - infinite line)
|
|
closest_rx, closest_ry = self._map_screen_to_rotated(int(foot_x), int(foot_y))
|
|
|
|
best_snap_distance = line_distance
|
|
best_snap_point = (int(closest_rx), int(closest_ry))
|
|
print(f"DEBUG: Best line snap point: ({closest_rx}, {closest_ry})")
|
|
else:
|
|
print(f"DEBUG: No line found for snapping")
|
|
|
|
# Apply the best snap if found
|
|
if best_snap_point:
|
|
print(f"DEBUG: Final best_snap_point: {best_snap_point} (distance: {best_snap_distance:.2f})")
|
|
self.tracking_points.setdefault(self.current_frame, []).append(best_snap_point)
|
|
snapped = True
|
|
else:
|
|
print(f"DEBUG: No snap found, adding new point at: ({rx}, {ry})")
|
|
|
|
# If no snapping, add new point at clicked location
|
|
if not snapped:
|
|
print(f"DEBUG: No snap found, adding new point at: ({rx}, {ry})")
|
|
print(f"DEBUG: Click was at screen coords: ({x}, {y})")
|
|
print(f"DEBUG: Converted to rotated coords: ({rx}, {ry})")
|
|
# Verify the conversion
|
|
verify_sx, verify_sy = self._map_rotated_to_screen(rx, ry)
|
|
print(f"DEBUG: Verification - rotated ({rx}, {ry}) -> screen ({verify_sx}, {verify_sy})")
|
|
self.tracking_points.setdefault(self.current_frame, []).append((int(rx), int(ry)))
|
|
# self.show_feedback_message("Tracking point added")
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
|
|
# Force immediate display update to recalculate previous/next points and arrows
|
|
self.display_current_frame()
|
|
|
|
# Handle scroll wheel: Ctrl+scroll -> zoom; Shift+scroll -> expand/contract crop; plain scroll -> seek ±1 frame
|
|
if event == cv2.EVENT_MOUSEWHEEL:
|
|
if flags & cv2.EVENT_FLAG_CTRLKEY:
|
|
if flags > 0: # Scroll up -> zoom in
|
|
self.zoom_factor = min(self.MAX_ZOOM, self.zoom_factor + self.ZOOM_INCREMENT)
|
|
else: # Scroll down -> zoom out
|
|
self.zoom_factor = max(self.MIN_ZOOM, self.zoom_factor - self.ZOOM_INCREMENT)
|
|
self.clear_transformation_cache()
|
|
elif flags & cv2.EVENT_FLAG_SHIFTKEY:
|
|
# Shift+scroll -> expand/contract crop uniformly
|
|
if flags > 0: # Scroll up -> expand
|
|
self.adjust_crop_size_uniform(expand=True)
|
|
else: # Scroll down -> contract
|
|
self.adjust_crop_size_uniform(expand=False)
|
|
else:
|
|
if not self.is_image_mode:
|
|
direction = 1 if flags > 0 else -1
|
|
self.seek_video_exact_frame(direction)
|
|
|
|
def _set_crop_from_rotated_rect(self, rotated_rect):
|
|
"""Set crop_rect from a rectangle in rotated frame coordinates"""
|
|
rx, ry, rw, rh = rotated_rect
|
|
|
|
# Convert from rotated coords to original frame coords
|
|
# Rotation is applied clockwise: 90° means ROTATE_90_CLOCKWISE
|
|
if self.rotation_angle == 0:
|
|
self.crop_rect = (rx, ry, rw, rh)
|
|
elif self.rotation_angle == 90:
|
|
# 90° clockwise: (rx, ry, rw, rh) rotated -> (ry, frame_height - rx - rw, rh, rw) original
|
|
self.crop_rect = (ry, self.frame_height - rx - rw, rh, rw)
|
|
elif self.rotation_angle == 180:
|
|
# 180°: (rx, ry, rw, rh) rotated -> (frame_width - rx - rw, frame_height - ry - rh, rw, rh) original
|
|
self.crop_rect = (self.frame_width - rx - rw, self.frame_height - ry - rh, rw, rh)
|
|
elif self.rotation_angle == 270:
|
|
# 270° (90° counterclockwise): (rx, ry, rw, rh) rotated -> (frame_width - ry - rh, rx, rh, rw) original
|
|
self.crop_rect = (self.frame_width - ry - rh, rx, rh, rw)
|
|
|
|
# Clamp to frame bounds
|
|
x, y, w, h = self.crop_rect
|
|
x = max(0, min(x, self.frame_width - 1))
|
|
y = max(0, min(y, self.frame_height - 1))
|
|
w = min(w, self.frame_width - x)
|
|
h = min(h, self.frame_height - y)
|
|
self.crop_rect = (x, y, w, h)
|
|
|
|
def set_crop_from_screen_coords(self, screen_rect):
|
|
"""Convert screen coordinates to video frame coordinates and set crop"""
|
|
x, y, w, h = screen_rect
|
|
|
|
if self.current_display_frame is None:
|
|
return
|
|
|
|
# Debug context for crop mapping
|
|
print("DEBUG: set_crop_from_screen_coords")
|
|
print(f"DEBUG: input screen_rect=({x},{y},{w},{h})")
|
|
print(f"DEBUG: state rotation={self.rotation_angle} zoom={self.zoom_factor} window=({self.window_width},{self.window_height})")
|
|
print(f"DEBUG: display_offset={self.display_offset} is_image_mode={self.is_image_mode}")
|
|
print(f"DEBUG: current crop_rect={self.crop_rect}")
|
|
eff = self._get_effective_crop_rect_for_frame(getattr(self, 'current_frame', 0)) if self.crop_rect else None
|
|
print(f"DEBUG: effective_crop_for_frame={eff}")
|
|
|
|
# Map both corners from screen to ROTATED space, then derive crop in rotated coords
|
|
x2 = x + w
|
|
y2 = y + h
|
|
rx1, ry1 = self._map_screen_to_rotated(x, y)
|
|
rx2, ry2 = self._map_screen_to_rotated(x2, y2)
|
|
print(f"DEBUG: mapped ROTATED corners -> ({rx1},{ry1}) and ({rx2},{ry2})")
|
|
left_r = min(rx1, rx2)
|
|
top_r = min(ry1, ry2)
|
|
right_r = max(rx1, rx2)
|
|
bottom_r = max(ry1, ry2)
|
|
crop_x = left_r
|
|
crop_y = top_r
|
|
crop_w = max(10, right_r - left_r)
|
|
crop_h = max(10, bottom_r - top_r)
|
|
|
|
# Clamp to rotated frame bounds
|
|
if self.rotation_angle in (90, 270):
|
|
rot_w, rot_h = self.frame_height, self.frame_width
|
|
else:
|
|
rot_w, rot_h = self.frame_width, self.frame_height
|
|
crop_x = max(0, min(crop_x, rot_w - 1))
|
|
crop_y = max(0, min(crop_y, rot_h - 1))
|
|
crop_w = min(crop_w, rot_w - crop_x)
|
|
crop_h = min(crop_h, rot_h - crop_y)
|
|
|
|
print(f"DEBUG: final ROTATED_rect=({crop_x},{crop_y},{crop_w},{crop_h}) rotated_size=({rot_w},{rot_h})")
|
|
|
|
# Snap to full rotated frame if selection covers it
|
|
if crop_w >= int(0.9 * rot_w) and crop_h >= int(0.9 * rot_h):
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
print("DEBUG: selection ~full frame -> clearing crop (use full frame)")
|
|
return
|
|
|
|
if crop_w > 10 and crop_h > 10:
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
# Store crop in ROTATED frame coordinates
|
|
self.crop_rect = (crop_x, crop_y, crop_w, crop_h)
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
print(f"DEBUG: crop_rect (ROTATED space) set -> {self.crop_rect}")
|
|
# Disable motion tracking upon explicit crop set to avoid unintended offsets
|
|
if self.tracking_enabled:
|
|
self.tracking_enabled = False
|
|
print("DEBUG: tracking disabled due to manual crop set")
|
|
self.save_state()
|
|
else:
|
|
print("DEBUG: rejected small crop (<=10px)")
|
|
|
|
def seek_to_timeline_position(self, mouse_x, bar_x_start, bar_width):
|
|
"""Seek to position based on mouse click on timeline"""
|
|
relative_x = mouse_x - bar_x_start
|
|
position_ratio = max(0, min(1, relative_x / bar_width))
|
|
target_frame = int(position_ratio * (self.total_frames - 1))
|
|
self.seek_to_frame(target_frame)
|
|
|
|
def undo_crop(self):
|
|
"""Undo the last crop operation"""
|
|
if self.crop_history:
|
|
self.crop_rect = self.crop_history.pop()
|
|
else:
|
|
self.crop_rect = None
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is undone
|
|
|
|
def complete_reset(self):
|
|
"""Complete reset of all transformations and settings"""
|
|
# Reset crop
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
|
|
# Reset zoom
|
|
self.zoom_factor = 1.0
|
|
self.zoom_center = None
|
|
|
|
# Reset rotation
|
|
self.rotation_angle = 0
|
|
|
|
# Reset brightness and contrast
|
|
self.brightness = 0
|
|
self.contrast = 1.0
|
|
|
|
# Reset motion tracking
|
|
self.tracking_enabled = False
|
|
self.tracking_points = {}
|
|
|
|
# Reset feature tracking
|
|
self.feature_tracker.clear_features()
|
|
|
|
# Reset templates
|
|
self.templates.clear()
|
|
|
|
# Reset cut markers
|
|
self.cut_start_frame = None
|
|
self.cut_end_frame = None
|
|
self.looping_between_markers = False
|
|
|
|
# Reset display offset
|
|
self.display_offset = [0, 0]
|
|
|
|
# Clear transformation cache
|
|
self.clear_transformation_cache()
|
|
|
|
# Save state
|
|
self.save_state()
|
|
|
|
print("Complete reset applied - all transformations and markers cleared")
|
|
|
|
def toggle_marker_looping(self):
|
|
"""Toggle looping between cut markers"""
|
|
# Check if both markers are set
|
|
if self.cut_start_frame is None or self.cut_end_frame is None:
|
|
print("Both markers must be set to enable looping. Use '1' and '2' to set markers.")
|
|
return False
|
|
|
|
if self.cut_start_frame >= self.cut_end_frame:
|
|
print("Invalid marker range - start frame must be before end frame")
|
|
return False
|
|
|
|
self.looping_between_markers = not self.looping_between_markers
|
|
|
|
if self.looping_between_markers:
|
|
print(f"Marker looping ENABLED: frames {self.cut_start_frame} - {self.cut_end_frame}")
|
|
# Jump to start marker when enabling
|
|
self.seek_to_frame(self.cut_start_frame)
|
|
else:
|
|
print("Marker looping DISABLED")
|
|
|
|
self.save_state() # Save state when looping is toggled
|
|
return True
|
|
|
|
|
|
|
|
def adjust_crop_size(self, direction: str, expand: bool, amount: int = None):
|
|
"""
|
|
Adjust crop size in given direction
|
|
direction: 'up', 'down', 'left', 'right'
|
|
expand: True to expand, False to contract
|
|
amount: pixels to adjust by (uses self.crop_size_step if None)
|
|
"""
|
|
if amount is None:
|
|
amount = self.crop_size_step
|
|
if not self.crop_rect:
|
|
# If no crop exists, create a default one in the center
|
|
center_x = self.frame_width // 2
|
|
center_y = self.frame_height // 2
|
|
default_size = min(self.frame_width, self.frame_height) // 4
|
|
self.crop_rect = (
|
|
center_x - default_size // 2,
|
|
center_y - default_size // 2,
|
|
default_size,
|
|
default_size
|
|
)
|
|
return
|
|
|
|
x, y, w, h = self.crop_rect
|
|
|
|
if direction == 'up':
|
|
if expand:
|
|
# Expand upward - decrease y, increase height
|
|
new_y = max(0, y - amount)
|
|
new_h = h + (y - new_y)
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
else:
|
|
# Contract from bottom - decrease height
|
|
new_h = max(self.CROP_MIN_SIZE, h - amount)
|
|
self.crop_rect = (x, y, w, new_h)
|
|
|
|
elif direction == 'down':
|
|
if expand:
|
|
# Expand downward - increase height
|
|
new_h = min(self.frame_height - y, h + amount)
|
|
self.crop_rect = (x, y, w, new_h)
|
|
else:
|
|
# Contract from top - increase y, decrease height
|
|
amount = min(amount, h - self.CROP_MIN_SIZE) # Don't make it smaller than 10 pixels
|
|
new_y = y + amount
|
|
new_h = h - amount
|
|
self.crop_rect = (x, new_y, w, new_h)
|
|
|
|
elif direction == 'left':
|
|
if expand:
|
|
# Expand leftward - decrease x, increase width
|
|
new_x = max(0, x - amount)
|
|
new_w = w + (x - new_x)
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
else:
|
|
# Contract from right - decrease width
|
|
new_w = max(self.CROP_MIN_SIZE, w - amount)
|
|
self.crop_rect = (x, y, new_w, h)
|
|
|
|
elif direction == 'right':
|
|
if expand:
|
|
# Expand rightward - increase width
|
|
new_w = min(self.frame_width - x, w + amount)
|
|
self.crop_rect = (x, y, new_w, h)
|
|
else:
|
|
# Contract from left - increase x, decrease width
|
|
amount = min(amount, w - self.CROP_MIN_SIZE) # Don't make it smaller than 10 pixels
|
|
new_x = x + amount
|
|
new_w = w - amount
|
|
self.crop_rect = (new_x, y, new_w, h)
|
|
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is adjusted
|
|
|
|
def adjust_crop_size_uniform(self, expand: bool, amount: int = None):
|
|
"""Expand or contract crop uniformly in all directions
|
|
expand=False: expand (like uppercase HJKL)
|
|
expand=True: contract (like lowercase hjkl)
|
|
"""
|
|
if amount is None:
|
|
amount = self.crop_size_step
|
|
if not self.crop_rect:
|
|
center_x = self.frame_width // 2
|
|
center_y = self.frame_height // 2
|
|
default_size = min(self.frame_width, self.frame_height) // 4
|
|
self.crop_rect = (
|
|
center_x - default_size // 2,
|
|
center_y - default_size // 2,
|
|
default_size,
|
|
default_size
|
|
)
|
|
return
|
|
|
|
x, y, w, h = self.crop_rect
|
|
|
|
if not expand: # expand=False means expand
|
|
# Expand uniformly from center
|
|
new_x = max(0, x - amount)
|
|
new_y = max(0, y - amount)
|
|
new_w = min(self.frame_width - new_x, w + (x - new_x) + amount)
|
|
new_h = min(self.frame_height - new_y, h + (y - new_y) + amount)
|
|
else: # expand=True means contract
|
|
# Contract uniformly toward center
|
|
contract_amount = min(amount, (w - 10) // 2, (h - 10) // 2)
|
|
new_x = x + contract_amount
|
|
new_y = y + contract_amount
|
|
new_w = max(10, w - contract_amount * 2)
|
|
new_h = max(10, h - contract_amount * 2)
|
|
|
|
self.crop_rect = (new_x, new_y, new_w, new_h)
|
|
self.clear_transformation_cache()
|
|
self.save_state()
|
|
|
|
def render_video(self, output_path: str):
|
|
"""Render video or save image with current edits applied"""
|
|
if self.is_image_mode:
|
|
return self._render_image(output_path)
|
|
else:
|
|
return self._render_video_threaded(output_path)
|
|
|
|
def _render_video_threaded(self, output_path: str):
|
|
"""Start video rendering in a separate thread"""
|
|
# Check if already rendering
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
print("Render already in progress! Use 'X' to cancel first.")
|
|
return False
|
|
|
|
# Reset render state
|
|
self.render_cancelled = False
|
|
|
|
# Start render thread
|
|
self.render_thread = threading.Thread(
|
|
target=self._render_video_worker,
|
|
args=(output_path,),
|
|
daemon=True
|
|
)
|
|
self.render_thread.start()
|
|
|
|
print(f"Started rendering to {output_path} in background thread...")
|
|
print("You can continue editing while rendering. Press 'X' to cancel.")
|
|
return True
|
|
|
|
def _render_video_worker(self, output_path: str):
|
|
"""Worker method that runs in the render thread"""
|
|
try:
|
|
if not output_path.endswith(".mp4"):
|
|
output_path += ".mp4"
|
|
|
|
# Send progress update to main thread
|
|
self.render_progress_queue.put(("init", "Initializing render...", 0.0, 0.0))
|
|
|
|
# No need to create VideoCapture since we use FFmpeg directly
|
|
|
|
# Determine frame range
|
|
start_frame = self.cut_start_frame if self.cut_start_frame is not None else 0
|
|
end_frame = (
|
|
self.cut_end_frame
|
|
if self.cut_end_frame is not None
|
|
else self.total_frames - 1
|
|
)
|
|
|
|
if start_frame >= end_frame:
|
|
self.render_progress_queue.put(("error", "Invalid cut range!", 1.0, 0.0))
|
|
return False
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Calculating output dimensions...", 0.05, 0.0))
|
|
|
|
# Calculate output dimensions to MATCH preview visible region
|
|
params = self._get_display_params()
|
|
output_width = max(2, params['visible_w'] - (params['visible_w'] % 2))
|
|
output_height = max(2, params['visible_h'] - (params['visible_h'] % 2))
|
|
|
|
# Ensure dimensions are divisible by 2 for H.264 encoding
|
|
output_width = output_width - (output_width % 2)
|
|
output_height = output_height - (output_height % 2)
|
|
|
|
# Send progress update
|
|
self.render_progress_queue.put(("progress", "Setting up FFmpeg encoder...", 0.1, 0.0))
|
|
|
|
# Debug output dimensions
|
|
print(f"Output dimensions (match preview): {output_width}x{output_height}")
|
|
print(f"Zoom factor: {self.zoom_factor}")
|
|
eff_x, eff_y, eff_w, eff_h = self._get_effective_crop_rect_for_frame(start_frame)
|
|
print(f"Effective crop (rotated): {eff_x},{eff_y} {eff_w}x{eff_h}")
|
|
|
|
# Skip all the OpenCV codec bullshit and go straight to FFmpeg
|
|
print("Using FFmpeg for encoding with OpenCV transformations...")
|
|
return self._render_with_ffmpeg_pipe(output_path, start_frame, end_frame, output_width, output_height)
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
# Handle specific FFmpeg threading errors
|
|
if "async_lock" in error_msg or "pthread_frame" in error_msg:
|
|
error_msg = "FFmpeg threading error - try restarting the application"
|
|
elif "Assertion" in error_msg:
|
|
error_msg = "Video codec error - the video file may be corrupted or incompatible"
|
|
|
|
self.render_progress_queue.put(("error", f"Render error: {error_msg}", 1.0, 0.0))
|
|
print(f"Render error: {error_msg}")
|
|
return False
|
|
finally:
|
|
# No cleanup needed since we don't create VideoCapture
|
|
pass
|
|
|
|
def update_render_progress(self):
|
|
"""Process progress updates from the render thread"""
|
|
try:
|
|
while True:
|
|
# Non-blocking get from queue
|
|
update_type, text, progress, fps = self.render_progress_queue.get_nowait()
|
|
|
|
if update_type == "init":
|
|
self.show_progress_bar(text)
|
|
elif update_type == "progress":
|
|
self.update_progress_bar(progress, text, fps)
|
|
elif update_type == "complete":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Handle file overwrite if this was an overwrite operation
|
|
if hasattr(self, 'overwrite_temp_path') and self.overwrite_temp_path:
|
|
self._handle_overwrite_completion()
|
|
elif update_type == "error":
|
|
self.update_progress_bar(progress, text, fps)
|
|
# Also show error as feedback message for better visibility
|
|
self.show_feedback_message(f"ERROR: {text}")
|
|
elif update_type == "cancelled":
|
|
self.hide_progress_bar()
|
|
self.show_feedback_message("Render cancelled")
|
|
|
|
except queue.Empty:
|
|
# No more updates in queue
|
|
pass
|
|
|
|
def _handle_overwrite_completion(self):
|
|
"""Handle file replacement after successful render"""
|
|
try:
|
|
print("Replacing original file...")
|
|
# Release current video capture before replacing the file
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
|
|
# Replace the original file with the temporary file
|
|
import shutil
|
|
print(f"DEBUG: Moving {self.overwrite_temp_path} to {self.overwrite_target_path}")
|
|
try:
|
|
shutil.move(self.overwrite_temp_path, self.overwrite_target_path)
|
|
print("DEBUG: File move successful")
|
|
except Exception as e:
|
|
print(f"DEBUG: File move failed: {e}")
|
|
# Try to clean up temp file
|
|
if os.path.exists(self.overwrite_temp_path):
|
|
os.remove(self.overwrite_temp_path)
|
|
raise
|
|
|
|
# Small delay to ensure file system operations are complete
|
|
time.sleep(0.1)
|
|
|
|
try:
|
|
self._load_video(self.video_path)
|
|
self.load_current_frame()
|
|
print("File reloaded successfully")
|
|
except Exception as e:
|
|
print(f"Warning: Could not reload file after overwrite: {e}")
|
|
print("The file was saved successfully, but you may need to restart the editor to continue editing it.")
|
|
except Exception as e:
|
|
print(f"Error during file overwrite: {e}")
|
|
finally:
|
|
# Clean up overwrite state
|
|
self.overwrite_temp_path = None
|
|
self.overwrite_target_path = None
|
|
|
|
def cancel_render(self):
|
|
"""Cancel the current render operation"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
print("Render cancellation requested...")
|
|
return True
|
|
return False
|
|
|
|
def is_rendering(self):
|
|
"""Check if a render operation is currently active"""
|
|
return self.render_thread and self.render_thread.is_alive()
|
|
|
|
def cleanup_render_thread(self):
|
|
"""Clean up render thread resources"""
|
|
if self.render_thread and self.render_thread.is_alive():
|
|
self.render_cancelled = True
|
|
# Terminate FFmpeg process if running
|
|
if self.ffmpeg_process:
|
|
try:
|
|
self.ffmpeg_process.terminate()
|
|
self.ffmpeg_process.wait(timeout=1.0)
|
|
except:
|
|
try:
|
|
self.ffmpeg_process.kill()
|
|
except:
|
|
pass
|
|
self.ffmpeg_process = None
|
|
# Wait a bit for the thread to finish gracefully
|
|
self.render_thread.join(timeout=2.0)
|
|
if self.render_thread.is_alive():
|
|
print("Warning: Render thread did not finish gracefully")
|
|
self.render_thread = None
|
|
self.render_cancelled = False
|
|
|
|
def _render_image(self, output_path: str):
|
|
"""Save image with current edits applied"""
|
|
# Get the appropriate file extension
|
|
original_ext = self.video_path.suffix.lower()
|
|
if not output_path.endswith(original_ext):
|
|
output_path += original_ext
|
|
|
|
print(f"Saving image to {output_path}...")
|
|
|
|
# Apply all transformations to the image
|
|
processed_image = self.apply_crop_zoom_and_rotation(self.static_image.copy())
|
|
|
|
if processed_image is not None:
|
|
# Save the image with high quality settings
|
|
success = cv2.imwrite(output_path, processed_image, [cv2.IMWRITE_JPEG_QUALITY, self.JPEG_QUALITY])
|
|
if success:
|
|
print(f"Image saved successfully to {output_path}")
|
|
return True
|
|
else:
|
|
print(f"Error: Could not save image to {output_path}")
|
|
return False
|
|
else:
|
|
print("Error: Could not process image")
|
|
return False
|
|
|
|
|
|
def _process_frame_for_render(self, frame, output_width: int, output_height: int, frame_number: int = None):
|
|
"""Process a single frame for rendering (optimized for speed)"""
|
|
try:
|
|
# Apply rotation first to work in rotated space
|
|
if self.rotation_angle != 0:
|
|
frame = self.apply_rotation(frame)
|
|
|
|
# Apply EFFECTIVE crop regardless of whether a base crop exists, to enable follow and out-of-frame pad
|
|
x, y, w, h = self._get_effective_crop_rect_for_frame(frame_number or self.current_frame)
|
|
|
|
# Allow out-of-bounds by padding with black so center can remain when near edges
|
|
h_frame, w_frame = frame.shape[:2]
|
|
pad_left = max(0, -x)
|
|
pad_top = max(0, -y)
|
|
pad_right = max(0, (x + w) - w_frame)
|
|
pad_bottom = max(0, (y + h) - h_frame)
|
|
if any(p > 0 for p in (pad_left, pad_top, pad_right, pad_bottom)):
|
|
frame = cv2.copyMakeBorder(
|
|
frame,
|
|
pad_top,
|
|
pad_bottom,
|
|
pad_left,
|
|
pad_right,
|
|
borderType=cv2.BORDER_CONSTANT,
|
|
value=(0, 0, 0),
|
|
)
|
|
x = x + pad_left
|
|
y = y + pad_top
|
|
w_frame, h_frame = frame.shape[1], frame.shape[0]
|
|
|
|
# Clamp crop to padded frame
|
|
x = max(0, min(x, w_frame - 1))
|
|
y = max(0, min(y, h_frame - 1))
|
|
w = min(w, w_frame - x)
|
|
h = min(h, h_frame - y)
|
|
if w <= 0 or h <= 0:
|
|
return None
|
|
frame = frame[y : y + h, x : x + w]
|
|
|
|
# Apply brightness and contrast
|
|
frame = self.apply_brightness_contrast(frame)
|
|
|
|
# Apply zoom and resize directly to final output dimensions
|
|
if self.zoom_factor != 1.0:
|
|
height, width = frame.shape[:2]
|
|
# Calculate what the zoomed dimensions would be
|
|
zoomed_width = int(width * self.zoom_factor)
|
|
zoomed_height = int(height * self.zoom_factor)
|
|
|
|
# If zoomed dimensions match output, use them; otherwise resize directly to output
|
|
if zoomed_width == output_width and zoomed_height == output_height:
|
|
frame = cv2.resize(
|
|
frame, (zoomed_width, zoomed_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# Resize directly to final output dimensions
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
else:
|
|
# No zoom, just resize to output dimensions if needed
|
|
if frame.shape[1] != output_width or frame.shape[0] != output_height:
|
|
frame = cv2.resize(
|
|
frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR
|
|
)
|
|
|
|
return frame
|
|
|
|
except Exception as e:
|
|
print(f"Error processing frame: {e}")
|
|
return None
|
|
|
|
def _render_with_ffmpeg_pipe(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int):
|
|
"""Render video with transformations"""
|
|
try:
|
|
# Test FFmpeg with a simple command first
|
|
try:
|
|
test_result = subprocess.run(['ffmpeg', '-version'], capture_output=True, text=True, timeout=10)
|
|
if test_result.returncode != 0:
|
|
print(f"FFmpeg test failed with return code {test_result.returncode}")
|
|
print(f"FFmpeg stderr: {test_result.stderr}")
|
|
error_msg = "FFmpeg is not working properly"
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired) as e:
|
|
error_msg = f"FFmpeg not found or not working: {e}"
|
|
print(error_msg)
|
|
self.render_progress_queue.put(("error", error_msg, 1.0, 0.0))
|
|
return False
|
|
|
|
self.render_progress_queue.put(("progress", "Starting encoder...", 0.0, 0.0))
|
|
|
|
import tempfile
|
|
import os
|
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.raw')
|
|
temp_file.close()
|
|
|
|
# Use a simpler, more Windows-compatible FFmpeg command
|
|
ffmpeg_cmd = [
|
|
'ffmpeg', '-y',
|
|
'-f', 'rawvideo',
|
|
'-s', f'{output_width}x{output_height}',
|
|
'-pix_fmt', 'bgr24',
|
|
'-r', str(self.fps),
|
|
'-i', temp_file.name,
|
|
'-c:v', 'libx264',
|
|
'-preset', 'veryslow',
|
|
'-crf', '12',
|
|
'-pix_fmt', 'yuv420p',
|
|
'-profile:v', 'high',
|
|
'-level', '4.2',
|
|
'-x264-params', 'ref=5:bframes=8:deblock=1,1',
|
|
output_path
|
|
]
|
|
self.temp_file_name = temp_file.name
|
|
|
|
render_cap = cv2.VideoCapture(str(self.video_path))
|
|
render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
|
|
|
total_frames = end_frame - start_frame + 1
|
|
frames_written = 0
|
|
start_time = time.time()
|
|
last_progress_update = 0
|
|
|
|
self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0))
|
|
with open(self.temp_file_name, 'wb') as temp_file:
|
|
for i in range(total_frames):
|
|
if self.render_cancelled:
|
|
render_cap.release()
|
|
self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0))
|
|
return False
|
|
|
|
ret, frame = render_cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
# Set current display frame for motion tracking during rendering
|
|
self.current_display_frame = frame.copy()
|
|
self.current_frame = start_frame + i
|
|
|
|
processed_frame = self._process_frame_for_render(frame, output_width, output_height, start_frame + i)
|
|
if processed_frame is not None:
|
|
if i == 0:
|
|
print(f"Processed frame dimensions: {processed_frame.shape[1]}x{processed_frame.shape[0]}")
|
|
print(f"Expected dimensions: {output_width}x{output_height}")
|
|
|
|
temp_file.write(processed_frame.tobytes())
|
|
frames_written += 1
|
|
|
|
current_time = time.time()
|
|
progress = 0.1 + (0.8 * (i + 1) / total_frames)
|
|
|
|
if current_time - last_progress_update > 0.5:
|
|
elapsed = current_time - start_time
|
|
fps_rate = frames_written / elapsed if elapsed > 0 else 0
|
|
self.render_progress_queue.put(("progress", f"Processed {i+1}/{total_frames} frames", progress, fps_rate))
|
|
last_progress_update = current_time
|
|
|
|
render_cap.release()
|
|
|
|
self.render_progress_queue.put(("progress", "Encoding...", 0.9, 0.0))
|
|
|
|
# Use subprocess.run() with timeout for better Windows reliability
|
|
result = subprocess.run(
|
|
ffmpeg_cmd,
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=300, # 5 minute timeout
|
|
creationflags=subprocess.CREATE_NO_WINDOW if hasattr(subprocess, 'CREATE_NO_WINDOW') else 0
|
|
)
|
|
|
|
return_code = result.returncode
|
|
stdout = result.stdout
|
|
stderr = result.stderr
|
|
|
|
# Debug output
|
|
print(f"FFmpeg return code: {return_code}")
|
|
if stdout:
|
|
print(f"FFmpeg stdout: {stdout}")
|
|
if stderr:
|
|
print(f"FFmpeg stderr: {stderr}")
|
|
|
|
if os.path.exists(self.temp_file_name):
|
|
try:
|
|
os.unlink(self.temp_file_name)
|
|
except OSError:
|
|
pass
|
|
|
|
if return_code == 0:
|
|
total_time = time.time() - start_time
|
|
avg_fps = frames_written / total_time if total_time > 0 else 0
|
|
self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames", 1.0, avg_fps))
|
|
print(f"Successfully rendered {frames_written} frames (avg {avg_fps:.1f} FPS)")
|
|
return True
|
|
else:
|
|
error_details = stderr if stderr else "No error details available"
|
|
print(f"Encoding failed with return code {return_code}")
|
|
print(f"Error: {error_details}")
|
|
self.render_progress_queue.put(("error", f"Encoding failed: {error_details}", 1.0, 0.0))
|
|
return False
|
|
|
|
except Exception as e:
|
|
error_msg = str(e)
|
|
print(f"Rendering exception: {error_msg}")
|
|
print(f"Exception type: {type(e).__name__}")
|
|
|
|
if "Errno 22" in error_msg or "invalid argument" in error_msg.lower():
|
|
error_msg = "File system error - try using a different output path"
|
|
elif "BrokenPipeError" in error_msg:
|
|
error_msg = "Process terminated unexpectedly"
|
|
elif "FileNotFoundError" in error_msg or "ffmpeg" in error_msg.lower():
|
|
error_msg = "FFmpeg not found - please install FFmpeg and ensure it's in your PATH"
|
|
|
|
self.render_progress_queue.put(("error", f"Rendering failed: {error_msg}", 1.0, 0.0))
|
|
return False
|
|
|
|
def run(self):
|
|
"""Main editor loop"""
|
|
if self.is_image_mode:
|
|
print("Image Editor Controls:")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" c: Clear crop")
|
|
print(" C: Complete reset (crop, zoom, rotation, brightness, contrast, tracking)")
|
|
print()
|
|
print("Motion Tracking:")
|
|
print(" Right-click: Add/remove tracking point (at current frame)")
|
|
print(" v: Toggle motion tracking on/off")
|
|
print(" V: Clear all tracking points")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next file")
|
|
print(" n: Previous file")
|
|
print(" Enter: Save image (overwrites if '_edited_' in name)")
|
|
print(" b: Save image as _edited_edited")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
else:
|
|
print("Video Editor Controls:")
|
|
print(" Space: Play/Pause")
|
|
print(" A/D: Seek backward/forward (1 frame)")
|
|
print(" Shift+A/D: Seek backward/forward (10 frames)")
|
|
print(" Ctrl+A/D: Seek backward/forward (60 frames)")
|
|
print(" W/S: Increase/Decrease speed")
|
|
print(" Q/Y: Increase/Decrease seek multiplier")
|
|
print(" E/Shift+E: Increase/Decrease brightness")
|
|
print(" R/Shift+R: Increase/Decrease contrast")
|
|
print(" -: Rotate clockwise 90°")
|
|
print()
|
|
print("Crop Controls:")
|
|
print(" Shift+Click+Drag: Select crop area")
|
|
print(" h/j/k/l: Contract crop (left/down/up/right)")
|
|
print(" H/J/K/L: Expand crop (left/down/up/right)")
|
|
print(" U: Undo crop")
|
|
print(" c: Clear crop")
|
|
print(" C: Complete reset (crop, zoom, rotation, brightness, contrast, tracking)")
|
|
print()
|
|
print("Other Controls:")
|
|
print(" Ctrl+Scroll: Zoom in/out")
|
|
print(" Shift+S: Save screenshot")
|
|
print(" f: Toggle fullscreen")
|
|
print(" p: Toggle project view")
|
|
print(" 1: Set cut start point")
|
|
print(" 2: Set cut end point")
|
|
print(" t: Toggle loop between markers")
|
|
print(" ,: Jump to previous marker")
|
|
print(" .: Jump to next marker")
|
|
print(" F: Toggle feature tracking")
|
|
print(" Shift+T: Extract features from current frame")
|
|
print(" g: Toggle auto feature extraction")
|
|
print(" G: Clear all feature data")
|
|
print(" H: Switch detector (SIFT/ORB)")
|
|
print(" o: Toggle optical flow tracking")
|
|
print(" m: Toggle template matching tracking")
|
|
print(" M: Toggle multi-scale template matching")
|
|
print(" Shift+Right-click+drag: Extract features from selected region")
|
|
print(" Ctrl+Right-click+drag: Delete features from selected region")
|
|
print(" Ctrl+Left-click+drag: Set template region for tracking")
|
|
if len(self.video_files) > 1:
|
|
print(" N: Next video")
|
|
print(" n: Previous video")
|
|
print(" Enter: Render video (overwrites if '_edited_' in name)")
|
|
print(" b: Render video")
|
|
print(" x: Cancel render")
|
|
print(" Q/ESC: Quit")
|
|
print()
|
|
|
|
window_title = "Image Editor" if self.is_image_mode else "Video Editor"
|
|
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
|
|
cv2.resizeWindow(window_title, self.window_width, self.window_height)
|
|
cv2.setMouseCallback(window_title, self.mouse_callback)
|
|
|
|
self.load_current_frame()
|
|
|
|
while True:
|
|
# Update auto-repeat seeking if active
|
|
self.update_auto_repeat_seek()
|
|
|
|
# Update render progress from background thread
|
|
self.update_render_progress()
|
|
|
|
# Update display
|
|
self.display_current_frame()
|
|
|
|
# Handle project view window if it exists
|
|
if self.project_view_mode and self.project_view:
|
|
# Draw project view in its own window
|
|
project_canvas = self.project_view.draw()
|
|
cv2.imshow("Project View", project_canvas)
|
|
|
|
# Calculate appropriate delay based on playback state
|
|
if self.is_playing and not self.is_image_mode:
|
|
# Use calculated frame delay for proper playback speed
|
|
delay_ms = self.calculate_frame_delay()
|
|
else:
|
|
# Use minimal delay for immediate responsiveness when not playing
|
|
delay_ms = 1
|
|
|
|
# Auto advance frame when playing (videos only)
|
|
if self.is_playing and not self.is_image_mode:
|
|
self.advance_frame()
|
|
|
|
# Continue non-blocking search if active
|
|
if self.searching_interesting_point and self.search_state:
|
|
self.continue_interesting_point_search()
|
|
|
|
# Check if search completed or was cancelled
|
|
if self.search_state and self.search_state.get('completed', False):
|
|
# Clean up completed search
|
|
self.search_state = None
|
|
self.searching_interesting_point = False
|
|
self.search_progress_text = ""
|
|
self.show_feedback_message("Search completed - no interesting point found")
|
|
self.display_needs_update = True
|
|
|
|
# Key capture with appropriate delay
|
|
key = cv2.waitKey(delay_ms) & 0xFF
|
|
|
|
# Route keys based on window focus
|
|
if key != 255: # Key was pressed
|
|
active_window = get_active_window_title()
|
|
|
|
if "Project View" in active_window:
|
|
# Project view window has focus - handle project view keys
|
|
if self.project_view_mode and self.project_view:
|
|
action = self.project_view.handle_key(key)
|
|
if action == "back_to_editor":
|
|
self.toggle_project_view()
|
|
elif action == "quit":
|
|
return # Exit the main loop
|
|
elif action.startswith("open_video:"):
|
|
video_path_str = action.split(":", 1)[1]
|
|
video_path = Path(video_path_str)
|
|
self.open_video_from_project_view(video_path)
|
|
continue # Skip main window key handling
|
|
|
|
elif "Video Editor" in active_window or "Image Editor" in active_window:
|
|
# Main window has focus - handle editor keys
|
|
pass # Continue to main window key handling below
|
|
else:
|
|
# Neither window has focus, ignore key
|
|
continue
|
|
|
|
# Handle auto-repeat - stop if no key is pressed
|
|
if key == 255 and self.auto_repeat_active: # 255 means no key pressed
|
|
self.stop_auto_repeat_seek()
|
|
|
|
if key == ord("q") or key == 27: # ESC
|
|
self.stop_auto_repeat_seek()
|
|
# If search is active, cancel it first
|
|
if self.searching_interesting_point:
|
|
self.searching_interesting_point = False
|
|
self.search_progress_text = ""
|
|
print("Search cancelled")
|
|
self.show_feedback_message("Search cancelled")
|
|
self.save_state()
|
|
break
|
|
elif key == ord("p"): # P - Toggle project view
|
|
self.toggle_project_view()
|
|
elif key == ord(" "):
|
|
# Don't allow play/pause for images
|
|
if not self.is_image_mode:
|
|
self.stop_auto_repeat_seek() # Stop seeking when toggling play/pause
|
|
self.is_playing = not self.is_playing
|
|
elif key == ord("a") or key == ord("A"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase A (Shift+A)
|
|
if key == ord("A"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, True, False) # Shift+A: -10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, False) # A: -1 frame
|
|
elif key == ord("d") or key == ord("D"):
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
# Check if it's uppercase D (Shift+D)
|
|
if key == ord("D"):
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, True, False) # Shift+D: +10 frames
|
|
else:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, False) # D: +1 frame
|
|
elif key == 1: # Ctrl+A
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(-1, False, True) # Ctrl+A: -60 frames
|
|
elif key == 4: # Ctrl+D
|
|
# Seeking only for videos
|
|
if not self.is_image_mode:
|
|
if not self.auto_repeat_active:
|
|
self.start_auto_repeat_seek(1, False, True) # Ctrl+D: +60 frames
|
|
elif key == ord(","):
|
|
# Jump to previous marker (cut start or end)
|
|
if not self.is_image_mode:
|
|
self.jump_to_previous_marker()
|
|
elif key == ord("."):
|
|
# Jump to next marker (cut start or end)
|
|
if not self.is_image_mode:
|
|
self.jump_to_next_marker()
|
|
elif key == ord("-") or key == ord("_"):
|
|
self.rotate_clockwise()
|
|
print(f"Rotated to {self.rotation_angle}°")
|
|
elif key == ord("f"):
|
|
self.toggle_fullscreen()
|
|
elif key == ord("S"): # Shift+S - Save screenshot
|
|
self.save_current_frame()
|
|
elif key == ord("w"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = min(
|
|
self.MAX_PLAYBACK_SPEED, self.playback_speed + self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("s"):
|
|
# Speed control only for videos
|
|
if not self.is_image_mode:
|
|
self.playback_speed = max(
|
|
self.MIN_PLAYBACK_SPEED, self.playback_speed - self.SPEED_INCREMENT
|
|
)
|
|
elif key == ord("Q"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = min(
|
|
self.MAX_SEEK_MULTIPLIER, self.seek_multiplier + self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("Y"):
|
|
# Seek multiplier control only for videos
|
|
if not self.is_image_mode:
|
|
self.seek_multiplier = max(
|
|
self.MIN_SEEK_MULTIPLIER, self.seek_multiplier - self.SEEK_MULTIPLIER_INCREMENT
|
|
)
|
|
print(f"Seek multiplier: {self.seek_multiplier:.1f}x")
|
|
elif key == ord("e") or key == ord("E"):
|
|
# Brightness adjustment: E (increase), Shift+E (decrease)
|
|
if key == ord("E"):
|
|
self.adjust_brightness(-5)
|
|
print(f"Brightness: {self.brightness}")
|
|
else:
|
|
self.adjust_brightness(5)
|
|
print(f"Brightness: {self.brightness}")
|
|
elif key == ord("r") or key == ord("R"):
|
|
# Contrast adjustment: R (increase), Shift+R (decrease)
|
|
if key == ord("R"):
|
|
self.adjust_contrast(-0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
else:
|
|
self.adjust_contrast(0.1)
|
|
print(f"Contrast: {self.contrast:.1f}")
|
|
elif key == ord("u"):
|
|
self.undo_crop()
|
|
elif key == ord("c"):
|
|
if self.crop_rect:
|
|
self.crop_history.append(self.crop_rect)
|
|
self.crop_rect = None
|
|
self.zoom_factor = 1.0
|
|
self.clear_transformation_cache()
|
|
self.save_state() # Save state when crop is cleared
|
|
elif key == ord("C"):
|
|
self.complete_reset()
|
|
elif key == ord("1"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_start_frame = self.current_frame
|
|
print(f"Set cut start at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut start is set
|
|
elif key == ord("2"):
|
|
# Cut markers only for videos
|
|
if not self.is_image_mode:
|
|
self.cut_end_frame = self.current_frame
|
|
print(f"Set cut end at frame {self.current_frame}")
|
|
self.save_state() # Save state when cut end is set
|
|
elif key == ord("!"): # Shift+1 - Jump to cut start marker
|
|
if not self.is_image_mode and self.cut_start_frame is not None:
|
|
self.seek_to_frame(self.cut_start_frame)
|
|
print(f"Jumped to cut start marker at frame {self.cut_start_frame}")
|
|
elif key == ord("\""): # Shift+2 - Jump to cut end marker
|
|
if not self.is_image_mode and self.cut_end_frame is not None:
|
|
self.seek_to_frame(self.cut_end_frame)
|
|
print(f"Jumped to cut end marker at frame {self.cut_end_frame}")
|
|
elif key == ord(";"): # ; - Go to next interesting point or cancel search
|
|
if not self.is_image_mode:
|
|
if self.searching_interesting_point and self.search_state:
|
|
# Cancel ongoing search
|
|
self.search_state = None
|
|
self.searching_interesting_point = False
|
|
self.search_progress_text = ""
|
|
print("Search cancelled")
|
|
self.show_feedback_message("Search cancelled")
|
|
self.display_needs_update = True
|
|
else:
|
|
self.go_to_next_interesting_point()
|
|
elif key == ord("'"): # ' (apostrophe) - Toggle region selection mode
|
|
self.toggle_interesting_region_selection()
|
|
elif key == ord("9"): # 0 - Decrease frame difference threshold
|
|
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 1.0)
|
|
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
|
|
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}%")
|
|
elif key == ord("0"): # 9 - Increase frame difference threshold
|
|
self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 1.0)
|
|
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}%")
|
|
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}%")
|
|
elif key == ord(")"): # Shift+9 - Decrease frame difference threshold by 10 percentage points
|
|
self.frame_difference_threshold = max(1.0, self.frame_difference_threshold - 10.0)
|
|
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (-10pp)")
|
|
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (-10pp)")
|
|
elif key == ord("="): # Shift+0 - Increase frame difference threshold by 10 percentage points
|
|
self.frame_difference_threshold = min(100.0, self.frame_difference_threshold + 10.0) # Max 100%
|
|
print(f"Frame difference threshold: {self.frame_difference_threshold:.1f}% (+10pp)")
|
|
self.show_feedback_message(f"Threshold: {self.frame_difference_threshold:.1f}% (+10pp)")
|
|
elif key == ord("7"): # 7 - Decrease frame difference gap
|
|
self.frame_difference_gap = max(1, self.frame_difference_gap - 1)
|
|
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
|
elif key == ord("8"): # 8 - Increase frame difference gap
|
|
self.frame_difference_gap = self.frame_difference_gap + 1
|
|
print(f"Frame difference gap: {self.frame_difference_gap} frames")
|
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames")
|
|
elif key == ord("/"): # Shift+7 - Decrease frame difference gap by 60 frames
|
|
self.frame_difference_gap = max(1, self.frame_difference_gap - 60)
|
|
print(f"Frame difference gap: {self.frame_difference_gap} frames (-60)")
|
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (-60)")
|
|
elif key == ord("("): # Shift+8 - Increase frame difference gap by 60 frames
|
|
self.frame_difference_gap = self.frame_difference_gap + 60
|
|
print(f"Frame difference gap: {self.frame_difference_gap} frames (+60)")
|
|
self.show_feedback_message(f"Gap: {self.frame_difference_gap} frames (+60)")
|
|
elif key == ord("N"):
|
|
if len(self.video_files) > 1:
|
|
self.previous_video()
|
|
elif key == ord("n"):
|
|
if len(self.video_files) > 1:
|
|
self.next_video()
|
|
elif key == ord("b"):
|
|
directory = self.video_path.parent
|
|
base_name = self.video_path.stem
|
|
extension = self.video_path.suffix
|
|
|
|
# Remove any existing _edited_ suffix to get clean base name
|
|
clean_base = re.sub(r"_edited_\d{5}", "", base_name)
|
|
|
|
# Find next available number by scanning directory for existing files
|
|
pattern = re.compile(rf"^{re.escape(clean_base)}_edited_(\d{{5}})\.")
|
|
max_num = 0
|
|
for file_path in directory.iterdir():
|
|
if file_path.is_file():
|
|
match = pattern.match(file_path.name)
|
|
if match:
|
|
num = int(match.group(1))
|
|
max_num = max(max_num, num)
|
|
|
|
counter = max_num + 1
|
|
new_name = f"{clean_base}_edited_{counter:05d}{extension}"
|
|
output_path = directory / new_name
|
|
|
|
success = self.render_video(str(output_path))
|
|
elif key == 13: # Enter
|
|
# Only overwrite if file already contains "_edited_" in name
|
|
print(f"DEBUG: Checking if '{self.video_path.stem}' contains '_edited_'")
|
|
if "_edited_" in self.video_path.stem:
|
|
print("DEBUG: File contains '_edited_', proceeding with overwrite")
|
|
print(f"DEBUG: Original file path: {self.video_path}")
|
|
print(f"DEBUG: Original file exists: {self.video_path.exists()}")
|
|
output_path = str(self.video_path)
|
|
|
|
# If we're overwriting the same file, use a temporary file first
|
|
import tempfile
|
|
temp_dir = self.video_path.parent
|
|
temp_fd, temp_path = tempfile.mkstemp(suffix=self.video_path.suffix, dir=temp_dir)
|
|
os.close(temp_fd) # Close the file descriptor, we just need the path
|
|
|
|
print(f"DEBUG: Created temp file: {temp_path}")
|
|
print("Rendering to temporary file first...")
|
|
|
|
success = self.render_video(temp_path)
|
|
|
|
# Store the temp path so we can replace the file when render completes
|
|
self.overwrite_temp_path = temp_path
|
|
self.overwrite_target_path = str(self.video_path)
|
|
else:
|
|
print(f"DEBUG: File '{self.video_path.stem}' does not contain '_edited_'")
|
|
print("Enter key only overwrites files with '_edited_' in the name. Use 'n' to create new files.")
|
|
elif key == ord("v"):
|
|
# Toggle motion tracking on/off
|
|
self.tracking_enabled = not self.tracking_enabled
|
|
self.show_feedback_message(f"Motion tracking {'ON' if self.tracking_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("V"):
|
|
# Clear all tracking points
|
|
self.tracking_points = {}
|
|
self.show_feedback_message("Tracking points cleared")
|
|
self.save_state()
|
|
elif key == ord("F"):
|
|
# Toggle feature tracking on/off
|
|
self.feature_tracker.tracking_enabled = not self.feature_tracker.tracking_enabled
|
|
self.show_feedback_message(f"Feature tracking {'ON' if self.feature_tracker.tracking_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("T"):
|
|
# Extract features from current frame (Shift+T)
|
|
if not self.is_image_mode and self.current_display_frame is not None:
|
|
# Extract features from the transformed frame (what user sees)
|
|
# This handles all transformations (crop, zoom, rotation) correctly
|
|
display_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame)
|
|
if display_frame is not None:
|
|
# Map coordinates from transformed frame to rotated frame coordinates
|
|
# Use the existing coordinate transformation system
|
|
def coord_mapper(x, y):
|
|
# The transformed frame coordinates are in the display frame space
|
|
# We need to map them to screen coordinates first, then use the existing
|
|
# _map_screen_to_rotated function
|
|
|
|
# Map from transformed frame coordinates to screen coordinates
|
|
# The transformed frame is centered on the canvas
|
|
frame_height, frame_width = display_frame.shape[:2]
|
|
available_height = self.window_height - (0 if self.is_image_mode else self.TIMELINE_HEIGHT)
|
|
start_y = (available_height - frame_height) // 2
|
|
start_x = (self.window_width - frame_width) // 2
|
|
|
|
# Convert to screen coordinates
|
|
screen_x = x + start_x
|
|
screen_y = y + start_y
|
|
|
|
# Use the existing coordinate transformation system
|
|
return self._map_screen_to_rotated(screen_x, screen_y)
|
|
|
|
success = self.feature_tracker.extract_features(display_frame, self.current_frame, coord_mapper)
|
|
if success:
|
|
count = self.feature_tracker.get_feature_count(self.current_frame)
|
|
self.show_feedback_message(f"Extracted {count} features from visible area")
|
|
else:
|
|
self.show_feedback_message("Failed to extract features")
|
|
else:
|
|
self.show_feedback_message("No display frame available")
|
|
self.save_state()
|
|
else:
|
|
self.show_feedback_message("No frame data available")
|
|
elif key == ord("g"):
|
|
# Toggle auto tracking
|
|
self.feature_tracker.auto_tracking = not self.feature_tracker.auto_tracking
|
|
print(f"DEBUG: Auto tracking toggled to {self.feature_tracker.auto_tracking}")
|
|
self.show_feedback_message(f"Auto tracking {'ON' if self.feature_tracker.auto_tracking else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("G"):
|
|
# Clear all feature tracking data
|
|
self.feature_tracker.clear_features()
|
|
self.show_feedback_message("Feature tracking data cleared")
|
|
self.save_state()
|
|
elif key == ord("Z"):
|
|
# Switch detector type (SIFT -> ORB -> SIFT) - SURF not available
|
|
current_type = self.feature_tracker.detector_type
|
|
if current_type == 'SIFT':
|
|
new_type = 'ORB'
|
|
elif current_type == 'ORB':
|
|
new_type = 'SIFT'
|
|
else:
|
|
new_type = 'SIFT'
|
|
self.feature_tracker.set_detector_type(new_type)
|
|
self.show_feedback_message(f"Detector switched to {new_type}")
|
|
self.save_state()
|
|
elif key == ord("z"):
|
|
# Toggle selective feature extraction mode
|
|
if not self.is_image_mode:
|
|
if not hasattr(self, 'selective_feature_extraction_mode'):
|
|
self.selective_feature_extraction_mode = False
|
|
|
|
self.selective_feature_extraction_mode = not self.selective_feature_extraction_mode
|
|
if self.selective_feature_extraction_mode:
|
|
self.show_feedback_message("Selective feature extraction mode ON - Right-click and drag to select region")
|
|
# Enable feature tracking if not already enabled
|
|
if not self.feature_tracker.tracking_enabled:
|
|
self.feature_tracker.tracking_enabled = True
|
|
self.show_feedback_message("Feature tracking enabled")
|
|
else:
|
|
self.show_feedback_message("Selective feature extraction mode OFF")
|
|
self.save_state()
|
|
elif key == ord("o"):
|
|
# Toggle optical flow tracking
|
|
self.optical_flow_enabled = not self.optical_flow_enabled
|
|
print(f"DEBUG: Optical flow toggled to {self.optical_flow_enabled}")
|
|
|
|
# If enabling optical flow, fill all gaps between existing features
|
|
if self.optical_flow_enabled:
|
|
self._fill_all_gaps_with_interpolation()
|
|
|
|
self.show_feedback_message(f"Optical flow {'ON' if self.optical_flow_enabled else 'OFF'}")
|
|
self.save_state()
|
|
elif key == ord("m"):
|
|
# Clear all templates
|
|
if self.templates:
|
|
self.templates.clear()
|
|
print("DEBUG: All templates cleared")
|
|
self.show_feedback_message("All templates cleared")
|
|
else:
|
|
print("DEBUG: No templates to clear")
|
|
self.show_feedback_message("No templates to clear")
|
|
self.save_state()
|
|
elif key == ord("M"): # Shift+M - Toggle multi-scale template matching
|
|
self.template_matching_full_frame = not self.template_matching_full_frame
|
|
print(f"DEBUG: Template matching full frame toggled to {self.template_matching_full_frame}")
|
|
self.show_feedback_message(f"Template matching: {'Full Frame' if self.template_matching_full_frame else 'Cropped'}")
|
|
self.save_state()
|
|
elif key == ord(";"): # Semicolon - Jump to previous template marker
|
|
self.jump_to_previous_template()
|
|
elif key == ord(":"): # Colon - Jump to next template marker
|
|
self.jump_to_next_template()
|
|
elif key == ord("t"):
|
|
# Marker looping only for videos
|
|
if not self.is_image_mode:
|
|
self.toggle_marker_looping()
|
|
elif key == ord("x"):
|
|
# Cancel render if active
|
|
if self.is_rendering():
|
|
self.cancel_render()
|
|
print("Render cancellation requested")
|
|
else:
|
|
print("No render operation to cancel")
|
|
|
|
# Individual direction controls using shift combinations we can detect
|
|
elif key == ord("J"): # Shift+i - expand up
|
|
self.adjust_crop_size('up', False)
|
|
print(f"Expanded crop upward by {self.crop_size_step}px")
|
|
elif key == ord("K"): # Shift+k - expand down
|
|
self.adjust_crop_size('down', False)
|
|
print(f"Expanded crop downward by {self.crop_size_step}px")
|
|
elif key == ord("L"): # Shift+j - expand left
|
|
self.adjust_crop_size('left', False)
|
|
print(f"Expanded crop leftward by {self.crop_size_step}px")
|
|
elif key == ord("H"): # Shift+l - expand right
|
|
self.adjust_crop_size('right', False)
|
|
print(f"Expanded crop rightward by {self.crop_size_step}px")
|
|
|
|
# Contract in specific directions
|
|
elif key == ord("k"): # i - contract from bottom (reduce height from bottom)
|
|
self.adjust_crop_size('up', True)
|
|
print(f"Contracted crop from bottom by {self.crop_size_step}px")
|
|
elif key == ord("j"): # k - contract from top (reduce height from top)
|
|
self.adjust_crop_size('down', True)
|
|
print(f"Contracted crop from top by {self.crop_size_step}px")
|
|
elif key == ord("h"): # j - contract from right (reduce width from right)
|
|
self.adjust_crop_size('left', True)
|
|
print(f"Contracted crop from right by {self.crop_size_step}px")
|
|
elif key == ord("l"): # l - contract from left (reduce width from left)
|
|
self.adjust_crop_size('right', True)
|
|
print(f"Contracted crop from left by {self.crop_size_step}px")
|
|
|
|
|
|
self.save_state()
|
|
self.cleanup_render_thread()
|
|
if hasattr(self, 'cap') and self.cap:
|
|
self.cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Fast Media Editor - Crop, Zoom, and Edit videos and images"
|
|
)
|
|
parser.add_argument(
|
|
"media", help="Path to media file or directory containing videos/images"
|
|
)
|
|
|
|
try:
|
|
args = parser.parse_args()
|
|
except SystemExit:
|
|
# If launched from context menu without arguments, this might fail
|
|
input("Argument parsing failed. Press Enter to exit...")
|
|
return
|
|
|
|
if not os.path.exists(args.media):
|
|
error_msg = f"Error: {args.media} does not exist"
|
|
print(error_msg)
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
try:
|
|
editor = VideoEditor(args.media)
|
|
editor.run()
|
|
except Exception as e:
|
|
error_msg = f"Error initializing media editor: {e}"
|
|
print(error_msg)
|
|
import traceback
|
|
traceback.print_exc() # Full error trace for debugging
|
|
input("Press Enter to exit...") # Keep window open in context menu
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|