From 15382b8fe7d2d9d64db556082f8e180c356de516 Mon Sep 17 00:00:00 2001 From: PhatPhuckDave Date: Mon, 8 Sep 2025 16:39:43 +0200 Subject: [PATCH] Some fucking bullshit --- croppa/main.py | 355 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 336 insertions(+), 19 deletions(-) diff --git a/croppa/main.py b/croppa/main.py index 10e147e..be26e89 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -10,6 +10,8 @@ import re import json import threading import queue +import subprocess +import tempfile class VideoEditor: # Configuration constants @@ -491,7 +493,7 @@ class VideoEditor: self.current_display_frame = self.static_image.copy() return True else: - # For videos, seek and read frame + # For videos, use OpenCV for reliable seeking self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame) ret, frame = self.cap.read() if ret: @@ -499,6 +501,50 @@ class VideoEditor: return True return False + def _load_frame_with_ffmpeg(self, frame_number: int) -> bool: + """Load a specific frame using FFmpeg - much faster than OpenCV seeking""" + try: + # Calculate timestamp + timestamp = frame_number / self.fps + + # Use FFmpeg to extract the specific frame + cmd = [ + 'ffmpeg', '-y', '-v', 'quiet', + '-ss', str(timestamp), + '-i', str(self.video_path), + '-vframes', '1', + '-f', 'image2pipe', + '-vcodec', 'png', + '-' + ] + + result = subprocess.run(cmd, capture_output=True) + if result.returncode == 0: + # Decode the PNG data + nparr = np.frombuffer(result.stdout, np.uint8) + frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + if frame is not None: + self.current_display_frame = frame + return True + + # Fallback to OpenCV if FFmpeg fails + self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) + ret, frame = self.cap.read() + if ret: + self.current_display_frame = frame + return True + return False + + except Exception as e: + print(f"FFmpeg frame loading failed: {e}, falling back to OpenCV") + # Fallback to OpenCV + self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) + ret, frame = self.cap.read() + if ret: + self.current_display_frame = frame + return True + return False + def calculate_frame_delay(self) -> int: """Calculate frame delay in milliseconds based on playback speed""" delay_ms = int(self.BASE_FRAME_DELAY_MS / self.playback_speed) @@ -1602,38 +1648,125 @@ class VideoEditor: # Send progress update self.render_progress_queue.put(("progress", "Setting up video writer...", 0.1, 0.0)) + + # Check for AMD GPU + try: + import subprocess + result = subprocess.run(['wmic', 'path', 'win32_VideoController', 'get', 'name'], + capture_output=True, text=True, timeout=5) + if 'AMD' in result.stdout or 'Radeon' in result.stdout: + print("AMD GPU detected - attempting hardware acceleration") + else: + print(" AMD GPU not detected in system info") + except: + print(" Could not detect GPU info, proceeding with codec tests") - # Try hardware-accelerated codecs first, fallback to mp4v + # Use codecs that are actually available in your OpenCV installation + # Based on the codec list, these should work codecs_to_try = [ - ("h264_nvenc", "H264"), # NVIDIA hardware acceleration - ("h264_qsv", "H264"), # Intel Quick Sync - ("h264", "H264"), # Software H.264 - ("mp4v", "MP4V") # Fallback + ("mjpg", "MJPG"), # Motion JPEG - very reliable + ("xvid", "XVID"), # Xvid codec + ("divx", "DIVX"), # DivX codec + ("mp4v", "mp4v"), # MPEG-4 Part 2 (lowercase) + ("mp4v", "MP4V"), # MPEG-4 Part 2 (uppercase) + ("h264", "avc1"), # H.264 with avc1 fourcc (from the list) ] out = None + successful_codec = None + for codec_name, fourcc_code in codecs_to_try: try: + print(f"Trying {codec_name} codec with {fourcc_code} fourcc...") fourcc = cv2.VideoWriter_fourcc(*fourcc_code) - out = cv2.VideoWriter( + test_writer = cv2.VideoWriter( output_path, fourcc, self.fps, (output_width, output_height) ) - if out.isOpened(): - print(f"Using {codec_name} codec for rendering") - break + + if test_writer.isOpened(): + # Test if the writer actually works by trying to write a test frame + import numpy as np + test_frame = np.zeros((output_height, output_width, 3), dtype=np.uint8) + test_success = test_writer.write(test_frame) + + if test_success: + out = test_writer + successful_codec = f"{codec_name} ({fourcc_code})" + print(f"✅ Successfully initialized {codec_name} codec with {fourcc_code} fourcc") + if "amf" in codec_name.lower(): + print("🚀 Using AMD hardware acceleration!") + break + else: + test_writer.release() + print(f"❌ {codec_name} codec opened but test write failed") else: - out.release() - out = None - except Exception: + test_writer.release() + print(f"❌ Failed to open {codec_name} codec") + except Exception as e: + print(f"❌ Error with {codec_name} codec: {e}") + if 'test_writer' in locals(): + test_writer.release() continue if out is None: - # Final fallback to mp4v - fourcc = cv2.VideoWriter_fourcc(*"mp4v") - out = cv2.VideoWriter( - output_path, fourcc, self.fps, (output_width, output_height) - ) - print("Using mp4v codec (fallback)") + # Try AVI format as last resort + print("MP4 format failed, trying AVI format...") + avi_path = output_path.replace('.mp4', '.avi') + + # Try basic codecs with AVI + avi_codecs = [("mjpg", "MJPG"), ("xvid", "XVID"), ("mp4v", "MP4V")] + + for codec_name, fourcc_code in avi_codecs: + try: + print(f"Trying {codec_name} codec with AVI format...") + fourcc = cv2.VideoWriter_fourcc(*fourcc_code) + test_writer = cv2.VideoWriter( + avi_path, fourcc, self.fps, (output_width, output_height) + ) + + # Test write + import numpy as np + test_frame = np.zeros((output_height, output_width, 3), dtype=np.uint8) + test_success = test_writer.write(test_frame) + + if test_writer.isOpened() and test_success: + out = test_writer + successful_codec = f"{codec_name} (AVI)" + output_path = avi_path # Update output path + print(f"Successfully initialized {codec_name} codec with AVI format") + break + else: + test_writer.release() + print(f"Failed to initialize {codec_name} codec with AVI") + except Exception as e: + print(f"Error with {codec_name} AVI codec: {e}") + if 'test_writer' in locals(): + test_writer.release() + continue + + if out is None: + # Last resort: try the most basic approach without specifying codec + print("All codecs failed, trying basic VideoWriter without codec specification...") + try: + # Try without specifying fourcc - let OpenCV choose + out = cv2.VideoWriter( + avi_path, -1, self.fps, (output_width, output_height) + ) + if out.isOpened(): + successful_codec = "auto (AVI)" + output_path = avi_path + print("Successfully initialized auto codec with AVI format") + else: + out.release() + out = None + except Exception as e: + print(f"Auto codec failed: {e}") + out = None + + if out is None: + # Use FFmpeg instead of OpenCV for rendering + print("OpenCV codecs failed. Using FFmpeg for rendering...") + return self._render_with_ffmpeg(output_path, start_frame, end_frame, output_width, output_height) if not out.isOpened(): self.render_progress_queue.put(("error", "Could not open video writer!", 1.0, 0.0)) @@ -1887,6 +2020,190 @@ class VideoEditor: print(f"Error processing frame: {e}") return None + def _render_with_ffmpeg(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): + """Use FFmpeg directly for rendering - much faster than OpenCV""" + try: + # Calculate time range + start_time = start_frame / self.fps + duration = (end_frame - start_frame + 1) / self.fps + + # Create temporary directory for processed frames + temp_dir = tempfile.mkdtemp() + frame_pattern = os.path.join(temp_dir, "frame_%06d.png") + + self.render_progress_queue.put(("progress", "Extracting frames with FFmpeg...", 0.1, 0.0)) + + # Extract frames using FFmpeg + extract_cmd = [ + 'ffmpeg', '-y', '-v', 'quiet', + '-ss', str(start_time), + '-i', str(self.video_path), + '-t', str(duration), + '-vf', f'scale={output_width}:{output_height}', + frame_pattern + ] + + result = subprocess.run(extract_cmd, capture_output=True, text=True) + if result.returncode != 0: + self.render_progress_queue.put(("error", f"FFmpeg extraction failed: {result.stderr}", 1.0, 0.0)) + return False + + # Count extracted frames + frame_files = sorted([f for f in os.listdir(temp_dir) if f.startswith('frame_')]) + total_frames = len(frame_files) + + if total_frames == 0: + self.render_progress_queue.put(("error", "No frames extracted", 1.0, 0.0)) + return False + + self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.2, 0.0)) + + # Process frames with OpenCV (for transformations) + processed_frames = [] + for i, frame_file in enumerate(frame_files): + if self.render_cancelled: + self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0)) + return False + + frame_path = os.path.join(temp_dir, frame_file) + frame = cv2.imread(frame_path) + + if frame is not None: + # Apply transformations + processed_frame = self._process_frame_for_render(frame, output_width, output_height) + if processed_frame is not None: + processed_frames.append(processed_frame) + + # Update progress + if i % 10 == 0: + progress = 0.2 + (0.6 * (i / total_frames)) + self.render_progress_queue.put(("progress", f"Processing frame {i}/{total_frames}", progress, 0.0)) + + self.render_progress_queue.put(("progress", "Encoding with FFmpeg...", 0.8, 0.0)) + + # Save processed frames and encode with FFmpeg + processed_dir = os.path.join(temp_dir, "processed") + os.makedirs(processed_dir, exist_ok=True) + + for i, frame in enumerate(processed_frames): + cv2.imwrite(os.path.join(processed_dir, f"frame_{i:06d}.png"), frame) + + # Encode final video with FFmpeg + processed_pattern = os.path.join(processed_dir, "frame_%06d.png") + encode_cmd = [ + 'ffmpeg', '-y', '-v', 'info', # Changed from quiet to info for progress + '-framerate', str(self.fps), + '-i', processed_pattern, + '-c:v', 'libx264', # Use x264 encoder + '-preset', 'fast', # Fast encoding + '-crf', '18', # Good quality + '-pix_fmt', 'yuv420p', + '-progress', 'pipe:1', # Output progress to stdout + output_path + ] + + # Run FFmpeg with progress monitoring + process = subprocess.Popen(encode_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + # Monitor progress + while True: + if self.render_cancelled: + process.terminate() + self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0)) + return False + + output = process.stdout.readline() + if output == '' and process.poll() is not None: + break + + if 'out_time_ms=' in output: + # Parse progress from FFmpeg output + try: + time_ms = int(output.split('out_time_ms=')[1].split()[0]) + total_time_ms = len(processed_frames) * (1000 / self.fps) + progress = min(0.95, 0.8 + (0.15 * (time_ms / total_time_ms))) + self.render_progress_queue.put(("progress", f"Encoding: {progress*100:.1f}%", progress, 0.0)) + except: + pass + + stdout, stderr = process.communicate() + result = type('Result', (), {'returncode': process.returncode, 'stderr': stderr})() + + # Cleanup + import shutil + shutil.rmtree(temp_dir, ignore_errors=True) + + if result.returncode == 0: + self.render_progress_queue.put(("complete", f"Rendered {len(processed_frames)} frames with FFmpeg", 1.0, 0.0)) + print(f"Successfully rendered {len(processed_frames)} frames using FFmpeg") + return True + else: + self.render_progress_queue.put(("error", f"FFmpeg encoding failed: {result.stderr}", 1.0, 0.0)) + return False + + except Exception as e: + self.render_progress_queue.put(("error", f"FFmpeg rendering failed: {e}", 1.0, 0.0)) + return False + + def _render_as_frames(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): + """Fallback method to save individual frames when video encoding fails""" + try: + # Create output directory for frames + frames_dir = output_path.replace('.mp4', '_frames').replace('.avi', '_frames') + import os + os.makedirs(frames_dir, exist_ok=True) + + self.render_progress_queue.put(("progress", "Saving individual frames...", 0.1, 0.0)) + + # Create a separate VideoCapture for the render thread + render_cap = cv2.VideoCapture(str(self.video_path)) + if not render_cap.isOpened(): + self.render_progress_queue.put(("error", "Could not open video for frame extraction!", 1.0, 0.0)) + return False + + # Seek once to the start frame + render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) + + total_frames = end_frame - start_frame + 1 + frames_saved = 0 + + for frame_idx in range(start_frame, end_frame + 1): + # Check for cancellation + if self.render_cancelled: + render_cap.release() + self.render_progress_queue.put(("cancelled", "Frame extraction cancelled", 0.0, 0.0)) + return False + + # Read frame sequentially + ret, frame = render_cap.read() + if not ret: + break + + # Process frame + processed_frame = self._process_frame_for_render(frame, output_width, output_height) + if processed_frame is not None: + # Save as individual frame + frame_filename = f"frame_{frame_idx:06d}.jpg" + frame_path = os.path.join(frames_dir, frame_filename) + cv2.imwrite(frame_path, processed_frame) + frames_saved += 1 + + # Update progress + if frames_saved % 10 == 0: # Update every 10 frames + progress = 0.1 + (0.9 * (frames_saved / total_frames)) + fps_rate = frames_saved / (time.time() - start_time) if frames_saved > 0 else 0 + self.render_progress_queue.put(("progress", f"Saving frames: {frames_saved}/{total_frames}", progress, fps_rate)) + + render_cap.release() + + self.render_progress_queue.put(("complete", f"Saved {frames_saved} frames to {frames_dir}", 1.0, 0.0)) + print(f"Saved {frames_saved} individual frames to: {frames_dir}") + return True + + except Exception as e: + self.render_progress_queue.put(("error", f"Frame extraction failed: {e}", 1.0, 0.0)) + return False + def run(self): """Main editor loop""" if self.is_image_mode: