Maybe make faster render says claude
This commit is contained in:
157
croppa/main.py
157
croppa/main.py
@@ -5,6 +5,10 @@ import argparse
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple, List
|
||||
import time
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from queue import Queue
|
||||
|
||||
|
||||
class VideoEditor:
|
||||
@@ -30,7 +34,7 @@ class VideoEditor:
|
||||
# Zoom and crop settings
|
||||
MIN_ZOOM = 0.1
|
||||
MAX_ZOOM = 10.0
|
||||
ZOOM_INCREMENT = 0.25
|
||||
ZOOM_INCREMENT = 0.1
|
||||
|
||||
# Supported video extensions
|
||||
VIDEO_EXTENSIONS = {'.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm', '.m4v'}
|
||||
@@ -512,11 +516,12 @@ class VideoEditor:
|
||||
self.crop_rect = None
|
||||
|
||||
def render_video(self, output_path: str):
|
||||
"""Render the video with current crop, zoom, and cut settings"""
|
||||
"""Optimized video rendering with multithreading and batch processing"""
|
||||
if not output_path.endswith('.mp4'):
|
||||
output_path += '.mp4'
|
||||
|
||||
print(f"Rendering video to {output_path}...")
|
||||
start_time = time.time()
|
||||
|
||||
# Determine frame range
|
||||
start_frame = self.cut_start_frame if self.cut_start_frame is not None else 0
|
||||
@@ -534,8 +539,7 @@ class VideoEditor:
|
||||
output_width = int(self.frame_width * self.zoom_factor)
|
||||
output_height = int(self.frame_height * self.zoom_factor)
|
||||
|
||||
|
||||
# Initialize video writer
|
||||
# Use mp4v codec (most compatible with MP4)
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||||
out = cv2.VideoWriter(output_path, fourcc, self.fps, (output_width, output_height))
|
||||
|
||||
@@ -543,55 +547,138 @@ class VideoEditor:
|
||||
print("Error: Could not open video writer!")
|
||||
return False
|
||||
|
||||
# Process frames
|
||||
total_output_frames = end_frame - start_frame + 1
|
||||
frames_written = 0
|
||||
last_progress_update = 0
|
||||
|
||||
for frame_idx in range(start_frame, end_frame + 1):
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
||||
ret, frame = self.cap.read()
|
||||
|
||||
if not ret:
|
||||
# Batch processing configuration
|
||||
batch_size = min(50, max(10, total_output_frames // 20)) # Adaptive batch size
|
||||
frame_queue = Queue(maxsize=batch_size * 2)
|
||||
processed_queue = Queue(maxsize=batch_size * 2)
|
||||
|
||||
def frame_reader():
|
||||
"""Background thread for reading frames"""
|
||||
try:
|
||||
for frame_idx in range(start_frame, end_frame + 1):
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
||||
ret, frame = self.cap.read()
|
||||
if ret:
|
||||
frame_queue.put((frame_idx, frame))
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
frame_queue.put(None) # Signal end of frames
|
||||
|
||||
def frame_processor():
|
||||
"""Background thread for processing frames"""
|
||||
try:
|
||||
while True:
|
||||
item = frame_queue.get()
|
||||
if item is None: # End signal
|
||||
break
|
||||
|
||||
frame_idx, frame = item
|
||||
processed_frame = self._process_frame_for_render(frame, output_width, output_height)
|
||||
|
||||
if processed_frame is not None:
|
||||
processed_queue.put((frame_idx, processed_frame))
|
||||
|
||||
frame_queue.task_done()
|
||||
finally:
|
||||
processed_queue.put(None) # Signal end of processing
|
||||
|
||||
# Start background threads
|
||||
reader_thread = threading.Thread(target=frame_reader, daemon=True)
|
||||
processor_thread = threading.Thread(target=frame_processor, daemon=True)
|
||||
|
||||
reader_thread.start()
|
||||
processor_thread.start()
|
||||
|
||||
# Main thread writes frames in order
|
||||
expected_frame = start_frame
|
||||
frame_buffer = {} # Buffer for out-of-order frames
|
||||
|
||||
while frames_written < total_output_frames:
|
||||
item = processed_queue.get()
|
||||
if item is None: # End signal
|
||||
break
|
||||
|
||||
# Apply crop
|
||||
frame_idx, processed_frame = item
|
||||
frame_buffer[frame_idx] = processed_frame
|
||||
|
||||
# Write frames in order
|
||||
while expected_frame in frame_buffer:
|
||||
out.write(frame_buffer.pop(expected_frame))
|
||||
frames_written += 1
|
||||
expected_frame += 1
|
||||
|
||||
# Progress update (throttled to avoid too frequent updates)
|
||||
current_time = time.time()
|
||||
if current_time - last_progress_update > 0.5: # Update every 0.5 seconds
|
||||
progress = frames_written / total_output_frames * 100
|
||||
elapsed = current_time - start_time
|
||||
if frames_written > 0:
|
||||
eta = (elapsed / frames_written) * (total_output_frames - frames_written)
|
||||
fps_rate = frames_written / elapsed
|
||||
print(f"Progress: {progress:.1f}% | {frames_written}/{total_output_frames} | "
|
||||
f"FPS: {fps_rate:.1f} | ETA: {eta:.1f}s\r", end="")
|
||||
last_progress_update = current_time
|
||||
|
||||
processed_queue.task_done()
|
||||
|
||||
# Wait for threads to complete
|
||||
reader_thread.join()
|
||||
processor_thread.join()
|
||||
|
||||
out.release()
|
||||
|
||||
total_time = time.time() - start_time
|
||||
avg_fps = frames_written / total_time if total_time > 0 else 0
|
||||
|
||||
print(f"\nVideo rendered successfully to {output_path}")
|
||||
print(f"Rendered {frames_written} frames in {total_time:.2f}s (avg {avg_fps:.1f} FPS)")
|
||||
return True
|
||||
|
||||
def _process_frame_for_render(self, frame, output_width: int, output_height: int):
|
||||
"""Process a single frame for rendering (optimized for speed)"""
|
||||
try:
|
||||
# Apply crop (vectorized operation)
|
||||
if self.crop_rect:
|
||||
x, y, w, h = self.crop_rect
|
||||
x, y, w, h = int(x), int(y), int(w), int(h)
|
||||
|
||||
|
||||
# Ensure crop coordinates are within frame bounds
|
||||
x = max(0, min(x, frame.shape[1] - 1))
|
||||
y = max(0, min(y, frame.shape[0] - 1))
|
||||
w = min(w, frame.shape[1] - x)
|
||||
h = min(h, frame.shape[0] - y)
|
||||
x, y, w, h = map(int, self.crop_rect)
|
||||
|
||||
# Clamp coordinates to frame bounds
|
||||
h_frame, w_frame = frame.shape[:2]
|
||||
x = max(0, min(x, w_frame - 1))
|
||||
y = max(0, min(y, h_frame - 1))
|
||||
w = min(w, w_frame - x)
|
||||
h = min(h, h_frame - y)
|
||||
|
||||
if w > 0 and h > 0:
|
||||
frame = frame[y:y+h, x:x+w]
|
||||
else:
|
||||
print(f"ERROR: Invalid crop dimensions, skipping frame")
|
||||
continue
|
||||
return None
|
||||
|
||||
# Apply zoom
|
||||
# Apply zoom and resize in one step for efficiency
|
||||
if self.zoom_factor != 1.0:
|
||||
height, width = frame.shape[:2]
|
||||
new_width = int(width * self.zoom_factor)
|
||||
new_height = int(height * self.zoom_factor)
|
||||
frame = cv2.resize(frame, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
|
||||
intermediate_width = int(width * self.zoom_factor)
|
||||
intermediate_height = int(height * self.zoom_factor)
|
||||
|
||||
# If zoom results in different dimensions than output, resize directly to output
|
||||
if intermediate_width != output_width or intermediate_height != output_height:
|
||||
frame = cv2.resize(frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR)
|
||||
else:
|
||||
frame = cv2.resize(frame, (intermediate_width, intermediate_height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
# Ensure frame matches output dimensions
|
||||
# Final size check and resize if needed
|
||||
if frame.shape[1] != output_width or frame.shape[0] != output_height:
|
||||
frame = cv2.resize(frame, (output_width, output_height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
out.write(frame)
|
||||
return frame
|
||||
|
||||
# Progress indicator
|
||||
progress = (frame_idx - start_frame + 1) / total_output_frames * 100
|
||||
print(f"Progress: {progress:.1f}%\r", end="")
|
||||
|
||||
out.release()
|
||||
print(f"\nVideo rendered successfully to {output_path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error processing frame: {e}")
|
||||
return None
|
||||
|
||||
def run(self):
|
||||
"""Main editor loop"""
|
||||
|
Reference in New Issue
Block a user