diff --git a/croppa/main.py b/croppa/main.py index be26e89..6c51158 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -19,12 +19,12 @@ class VideoEditor: SPEED_INCREMENT = 0.2 MIN_PLAYBACK_SPEED = 0.1 MAX_PLAYBACK_SPEED = 10.0 - + # Seek multiplier configuration SEEK_MULTIPLIER_INCREMENT = 2.0 MIN_SEEK_MULTIPLIER = 1.0 MAX_SEEK_MULTIPLIER = 100.0 - + # Auto-repeat seeking configuration AUTO_REPEAT_DISPLAY_RATE = 1.0 @@ -56,10 +56,10 @@ class VideoEditor: # Supported video extensions VIDEO_EXTENSIONS = {".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v"} - + # Supported image extensions IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".webp", ".jp2", ".pbm", ".pgm", ".ppm", ".sr", ".ras"} - + # Crop adjustment settings CROP_SIZE_STEP = 15 # pixels to expand/contract crop @@ -69,7 +69,7 @@ class VideoEditor: # Video file management self.video_files = [] self.current_video_index = 0 - + # Media type tracking self.is_image_mode = False # True if current file is an image @@ -131,20 +131,21 @@ class VideoEditor: self.progress_bar_complete_time = None self.progress_bar_text = "" self.progress_bar_fps = 0.0 # Current rendering FPS - + # Feedback message state self.feedback_message = "" self.feedback_message_time = None self.feedback_message_duration = 0.5 # seconds to show message - + # Crop adjustment settings self.crop_size_step = self.CROP_SIZE_STEP - + # Render thread management self.render_thread = None self.render_cancelled = False self.render_progress_queue = queue.Queue() - + self.ffmpeg_process = None # Track FFmpeg process for cancellation + # Display optimization - track when redraw is needed self.display_needs_update = True self.last_display_state = None @@ -183,7 +184,7 @@ class VideoEditor: 'seek_multiplier': getattr(self, 'seek_multiplier', 1.0), 'is_playing': getattr(self, 'is_playing', False) } - + with open(state_file, 'w') as f: json.dump(state, f, indent=2) print(f"State saved to {state_file}") @@ -206,9 +207,9 @@ class VideoEditor: try: with open(state_file, 'r') as f: state = json.load(f) - + print(f"State file contents: {state}") - + # Restore state values if 'current_frame' in state: self.current_frame = state['current_frame'] @@ -231,7 +232,7 @@ class VideoEditor: if 'cut_end_frame' in state: self.cut_end_frame = state['cut_end_frame'] print(f"Restored cut_end_frame: {self.cut_end_frame}") - + # Validate cut markers against current video length if self.cut_start_frame is not None and self.cut_start_frame >= self.total_frames: print(f"DEBUG: cut_start_frame {self.cut_start_frame} is beyond video length {self.total_frames}, clearing") @@ -239,7 +240,7 @@ class VideoEditor: if self.cut_end_frame is not None and self.cut_end_frame >= self.total_frames: print(f"DEBUG: cut_end_frame {self.cut_end_frame} is beyond video length {self.total_frames}, clearing") self.cut_end_frame = None - + # Calculate and show marker positions on timeline if self.cut_start_frame is not None and self.cut_end_frame is not None: start_progress = self.cut_start_frame / max(1, self.total_frames - 1) @@ -255,7 +256,7 @@ class VideoEditor: self.seek_multiplier = state['seek_multiplier'] if 'is_playing' in state: self.is_playing = state['is_playing'] - + # Validate and clamp values self.current_frame = max(0, min(self.current_frame, getattr(self, 'total_frames', 1) - 1)) self.zoom_factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, self.zoom_factor)) @@ -263,7 +264,7 @@ class VideoEditor: self.contrast = max(0.1, min(3.0, self.contrast)) self.playback_speed = max(self.MIN_PLAYBACK_SPEED, min(self.MAX_PLAYBACK_SPEED, self.playback_speed)) self.seek_multiplier = max(self.MIN_SEEK_MULTIPLIER, min(self.MAX_SEEK_MULTIPLIER, self.seek_multiplier)) - + return True except Exception as e: print(f"Error loading state: {e}") @@ -272,11 +273,11 @@ class VideoEditor: def _is_video_file(self, file_path: Path) -> bool: """Check if file is a supported video format""" return file_path.suffix.lower() in self.VIDEO_EXTENSIONS - + def _is_image_file(self, file_path: Path) -> bool: """Check if file is a supported image format""" return file_path.suffix.lower() in self.IMAGE_EXTENSIONS - + def _is_media_file(self, file_path: Path) -> bool: """Check if file is a supported media format (video or image)""" return self._is_video_file(file_path) or self._is_image_file(file_path) @@ -286,22 +287,22 @@ class VideoEditor: """Generate the next available screenshot filename: video_frame_00001.jpg, video_frame_00002.jpg, etc.""" directory = video_path.parent base_name = video_path.stem - + # Pattern to match existing screenshot files: video_frame_00001.jpg, video_frame_00002.jpg, etc. pattern = re.compile(rf"^{re.escape(base_name)}_frame_(\d{{5}})\.(jpg|jpeg|png)$") - + existing_numbers = set() for file_path in directory.iterdir(): if file_path.is_file(): match = pattern.match(file_path.name) if match: existing_numbers.add(int(match.group(1))) - + # Find the next available number starting from 1 next_number = 1 while next_number in existing_numbers: next_number += 1 - + return f"{base_name}_frame_{next_number:05d}.jpg" def save_current_frame(self): @@ -309,14 +310,14 @@ class VideoEditor: if self.current_display_frame is None: print("No frame to save") return False - + # Generate the next available screenshot filename screenshot_name = self._get_next_screenshot_filename(self.video_path) screenshot_path = self.video_path.parent / screenshot_name - + # Apply current transformations (crop, zoom, rotation, brightness/contrast) to the frame processed_frame = self.apply_crop_zoom_and_rotation(self.current_display_frame.copy()) - + if processed_frame is not None: # Save the processed frame success = cv2.imwrite(str(screenshot_path), processed_frame) @@ -345,7 +346,7 @@ class VideoEditor: # Pattern to match edited files: basename_edited_001.ext, basename_edited_002.ext, etc. edited_pattern = re.compile(r"^(.+)_edited_\d{3}$") - + edited_base_names = set() for file_path in media_files: match = edited_pattern.match(file_path.stem) @@ -357,11 +358,11 @@ class VideoEditor: # Skip if this is an edited file if edited_pattern.match(file_path.stem): continue - + # Skip if there's already an edited version of this file if file_path.stem in edited_base_names: continue - + non_edited_media.add(file_path) return sorted(non_edited_media) @@ -372,19 +373,19 @@ class VideoEditor: self.video_path = media_path self.is_image_mode = self._is_image_file(media_path) - + if self.is_image_mode: # Load static image self.static_image = cv2.imread(str(media_path)) if self.static_image is None: raise ValueError(f"Could not load image file: {media_path}") - + # Set up image properties to mimic video interface self.frame_height, self.frame_width = self.static_image.shape[:2] self.total_frames = 1 self.fps = 30 # Dummy FPS for image mode self.cap = None - + print(f"Loaded image: {self.video_path.name}") print(f" Resolution: {self.frame_width}x{self.frame_height}") else: @@ -396,7 +397,7 @@ class VideoEditor: if hasattr(cv2, 'CAP_DSHOW'): # DirectShow - usually for cameras backends_to_try.append(cv2.CAP_DSHOW) backends_to_try.append(cv2.CAP_ANY) # Fallback - + self.cap = None for backend in backends_to_try: try: @@ -411,10 +412,10 @@ class VideoEditor: self.cap.release() except Exception: continue - + if not self.cap or not self.cap.isOpened(): raise ValueError(f"Could not open video file: {media_path}") - + # Video properties self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.fps = self.cap.get(cv2.CAP_PROP_FPS) @@ -424,14 +425,14 @@ class VideoEditor: # Get codec information for debugging fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC)) codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)]) - + # Get backend information backend = self.cap.getBackendName() - + print(f"Loaded video: {self.video_path.name} ({self.current_video_index + 1}/{len(self.video_files)})") print(f" Codec: {codec} | Backend: {backend} | Resolution: {self.frame_width}x{self.frame_height}") print(f" FPS: {self.fps:.2f} | Frames: {self.total_frames} | Duration: {self.total_frames/self.fps:.1f}s") - + # Performance warning for known problematic cases if codec in ['H264', 'H.264', 'AVC1', 'avc1'] and self.total_frames > 10000: print(" Warning: Large H.264 video detected - seeking may be slow") @@ -506,7 +507,7 @@ class VideoEditor: try: # Calculate timestamp timestamp = frame_number / self.fps - + # Use FFmpeg to extract the specific frame cmd = [ 'ffmpeg', '-y', '-v', 'quiet', @@ -517,7 +518,7 @@ class VideoEditor: '-vcodec', 'png', '-' ] - + result = subprocess.run(cmd, capture_output=True) if result.returncode == 0: # Decode the PNG data @@ -526,7 +527,7 @@ class VideoEditor: if frame is not None: self.current_display_frame = frame return True - + # Fallback to OpenCV if FFmpeg fails self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) ret, frame = self.cap.read() @@ -534,7 +535,7 @@ class VideoEditor: self.current_display_frame = frame return True return False - + except Exception as e: print(f"FFmpeg frame loading failed: {e}, falling back to OpenCV") # Fallback to OpenCV @@ -579,15 +580,15 @@ class VideoEditor: """Start auto-repeat seeking""" if self.is_image_mode: return - + self.auto_repeat_active = True self.auto_repeat_direction = direction self.auto_repeat_shift = shift_pressed self.auto_repeat_ctrl = ctrl_pressed - + # Initialize last_display_update to prevent immediate auto-repeat self.last_display_update = time.time() - + self.seek_video_with_modifier(direction, shift_pressed, ctrl_pressed) def stop_auto_repeat_seek(self): @@ -601,13 +602,13 @@ class VideoEditor: """Update auto-repeat seeking""" if not self.auto_repeat_active or self.is_image_mode: return - + current_time = time.time() - + if current_time - self.last_display_update >= self.AUTO_REPEAT_DISPLAY_RATE: self.seek_video_with_modifier( - self.auto_repeat_direction, - self.auto_repeat_shift, + self.auto_repeat_direction, + self.auto_repeat_shift, self.auto_repeat_ctrl ) self.last_display_update = current_time @@ -626,9 +627,9 @@ class VideoEditor: # Calculate how many frames to advance based on speed # For speeds > 1.0, we skip frames. For speeds < 1.0, we delay in main loop frames_to_advance = max(1, int(self.playback_speed)) - + new_frame = self.current_frame + frames_to_advance - + # Handle marker looping bounds if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None: if new_frame >= self.cut_end_frame: @@ -673,14 +674,14 @@ class VideoEditor: self.current_frame = 0 # Loop back to start self.load_current_frame() return True - + # Handle marker looping after successful frame load if self.looping_between_markers and self.cut_start_frame is not None and self.cut_end_frame is not None: if self.current_frame >= self.cut_end_frame: self.current_frame = self.cut_start_frame self.load_current_frame() return True - + return success def apply_crop_zoom_and_rotation(self, frame): @@ -694,7 +695,7 @@ class VideoEditor: # Apply brightness/contrast first (to original frame for best quality) processed_frame = self.apply_brightness_contrast(processed_frame) - + # Apply crop if self.crop_rect: x, y, w, h = self.crop_rect @@ -811,39 +812,39 @@ class VideoEditor: """Draw feedback message on frame if visible""" if not self.feedback_message or not self.feedback_message_time: return - + # Check if message should still be shown elapsed = time.time() - self.feedback_message_time if elapsed > self.feedback_message_duration: self.feedback_message = "" self.feedback_message_time = None return - + height, width = frame.shape[:2] - + # Calculate message position (center of frame) font = cv2.FONT_HERSHEY_SIMPLEX font_scale = 1.0 thickness = 2 - + # Get text size text_size = cv2.getTextSize(self.feedback_message, font, font_scale, thickness)[0] text_x = (width - text_size[0]) // 2 text_y = (height + text_size[1]) // 2 - + # Draw background rectangle padding = 10 rect_x1 = text_x - padding rect_y1 = text_y - text_size[1] - padding rect_x2 = text_x + text_size[0] + padding rect_y2 = text_y + padding - + # Semi-transparent background overlay = frame.copy() cv2.rectangle(overlay, (rect_x1, rect_y1), (rect_x2, rect_y2), (0, 0, 0), -1) alpha = 0.7 cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame) - + # Draw text with shadow cv2.putText(frame, self.feedback_message, (text_x + 2, text_y + 2), font, font_scale, (0, 0, 0), thickness + 1) cv2.putText(frame, self.feedback_message, (text_x, text_y), font, font_scale, (255, 255, 255), thickness) @@ -998,7 +999,7 @@ class VideoEditor: # Don't draw timeline for images if self.is_image_mode: return - + height, width = frame.shape[:2] # Timeline background area @@ -1120,10 +1121,11 @@ class VideoEditor: self.progress_bar_visible, self.feedback_message ) - - if not self.display_needs_update and current_state == self.last_display_state: - return # Skip redraw if nothing changed - + + # Always update display when paused to ensure UI elements are visible + if not self.display_needs_update and current_state == self.last_display_state and self.is_playing: + return # Skip redraw if nothing changed and playing + self.last_display_state = current_state self.display_needs_update = False @@ -1385,7 +1387,7 @@ class VideoEditor: # Now we need to convert from the display frame coordinates back to original frame coordinates # The display frame is the result of: original -> crop -> rotation -> zoom - + # Step 1: Reverse zoom if self.zoom_factor != 1.0: display_x = display_x / self.zoom_factor @@ -1400,7 +1402,7 @@ class VideoEditor: crop_w, crop_h = int(self.crop_rect[2]), int(self.crop_rect[3]) else: crop_w, crop_h = original_width, original_height - + # Apply inverse rotation to coordinates # The key insight: we need to use the dimensions of the ROTATED frame for the coordinate transformation # because the coordinates we have are in the rotated coordinate system @@ -1428,7 +1430,7 @@ class VideoEditor: new_h = display_w else: new_x, new_y, new_w, new_h = display_x, display_y, display_w, display_h - + display_x, display_y, display_w, display_h = new_x, new_y, new_w, new_h # Step 3: Convert from cropped frame coordinates to original frame coordinates @@ -1477,20 +1479,20 @@ class VideoEditor: if self.cut_start_frame is None or self.cut_end_frame is None: print("Both markers must be set to enable looping. Use '1' and '2' to set markers.") return False - + if self.cut_start_frame >= self.cut_end_frame: print("Invalid marker range - start frame must be before end frame") return False - + self.looping_between_markers = not self.looping_between_markers - + if self.looping_between_markers: print(f"Marker looping ENABLED: frames {self.cut_start_frame} - {self.cut_end_frame}") # Jump to start marker when enabling self.seek_to_frame(self.cut_start_frame) else: print("Marker looping DISABLED") - + self.save_state() # Save state when looping is toggled return True @@ -1519,7 +1521,7 @@ class VideoEditor: return x, y, w, h = self.crop_rect - + if direction == 'up': if expand: # Expand upward - decrease y, increase height @@ -1530,7 +1532,7 @@ class VideoEditor: # Contract from bottom - decrease height new_h = max(10, h - amount) # Minimum size of 10 pixels self.crop_rect = (x, y, w, new_h) - + elif direction == 'down': if expand: # Expand downward - increase height @@ -1542,7 +1544,7 @@ class VideoEditor: new_y = y + amount new_h = h - amount self.crop_rect = (x, new_y, w, new_h) - + elif direction == 'left': if expand: # Expand leftward - decrease x, increase width @@ -1553,7 +1555,7 @@ class VideoEditor: # Contract from right - decrease width new_w = max(10, w - amount) # Minimum size of 10 pixels self.crop_rect = (x, y, new_w, h) - + elif direction == 'right': if expand: # Expand rightward - increase width @@ -1565,7 +1567,7 @@ class VideoEditor: new_x = x + amount new_w = w - amount self.crop_rect = (new_x, y, new_w, h) - + self.save_state() # Save state when crop is adjusted def render_video(self, output_path: str): @@ -1574,17 +1576,17 @@ class VideoEditor: return self._render_image(output_path) else: return self._render_video_threaded(output_path) - + def _render_video_threaded(self, output_path: str): """Start video rendering in a separate thread""" # Check if already rendering if self.render_thread and self.render_thread.is_alive(): print("Render already in progress! Use 'X' to cancel first.") return False - + # Reset render state self.render_cancelled = False - + # Start render thread self.render_thread = threading.Thread( target=self._render_video_worker, @@ -1592,11 +1594,11 @@ class VideoEditor: daemon=True ) self.render_thread.start() - + print(f"Started rendering to {output_path} in background thread...") print("You can continue editing while rendering. Press 'X' to cancel.") return True - + def _render_video_worker(self, output_path: str): """Worker method that runs in the render thread""" render_cap = None @@ -1647,193 +1649,11 @@ class VideoEditor: output_height = int(crop_height * self.zoom_factor) # Send progress update - self.render_progress_queue.put(("progress", "Setting up video writer...", 0.1, 0.0)) + self.render_progress_queue.put(("progress", "Setting up FFmpeg encoder...", 0.1, 0.0)) - # Check for AMD GPU - try: - import subprocess - result = subprocess.run(['wmic', 'path', 'win32_VideoController', 'get', 'name'], - capture_output=True, text=True, timeout=5) - if 'AMD' in result.stdout or 'Radeon' in result.stdout: - print("AMD GPU detected - attempting hardware acceleration") - else: - print(" AMD GPU not detected in system info") - except: - print(" Could not detect GPU info, proceeding with codec tests") - - # Use codecs that are actually available in your OpenCV installation - # Based on the codec list, these should work - codecs_to_try = [ - ("mjpg", "MJPG"), # Motion JPEG - very reliable - ("xvid", "XVID"), # Xvid codec - ("divx", "DIVX"), # DivX codec - ("mp4v", "mp4v"), # MPEG-4 Part 2 (lowercase) - ("mp4v", "MP4V"), # MPEG-4 Part 2 (uppercase) - ("h264", "avc1"), # H.264 with avc1 fourcc (from the list) - ] - - out = None - successful_codec = None - - for codec_name, fourcc_code in codecs_to_try: - try: - print(f"Trying {codec_name} codec with {fourcc_code} fourcc...") - fourcc = cv2.VideoWriter_fourcc(*fourcc_code) - test_writer = cv2.VideoWriter( - output_path, fourcc, self.fps, (output_width, output_height) - ) - - if test_writer.isOpened(): - # Test if the writer actually works by trying to write a test frame - import numpy as np - test_frame = np.zeros((output_height, output_width, 3), dtype=np.uint8) - test_success = test_writer.write(test_frame) - - if test_success: - out = test_writer - successful_codec = f"{codec_name} ({fourcc_code})" - print(f"✅ Successfully initialized {codec_name} codec with {fourcc_code} fourcc") - if "amf" in codec_name.lower(): - print("🚀 Using AMD hardware acceleration!") - break - else: - test_writer.release() - print(f"❌ {codec_name} codec opened but test write failed") - else: - test_writer.release() - print(f"❌ Failed to open {codec_name} codec") - except Exception as e: - print(f"❌ Error with {codec_name} codec: {e}") - if 'test_writer' in locals(): - test_writer.release() - continue - - if out is None: - # Try AVI format as last resort - print("MP4 format failed, trying AVI format...") - avi_path = output_path.replace('.mp4', '.avi') - - # Try basic codecs with AVI - avi_codecs = [("mjpg", "MJPG"), ("xvid", "XVID"), ("mp4v", "MP4V")] - - for codec_name, fourcc_code in avi_codecs: - try: - print(f"Trying {codec_name} codec with AVI format...") - fourcc = cv2.VideoWriter_fourcc(*fourcc_code) - test_writer = cv2.VideoWriter( - avi_path, fourcc, self.fps, (output_width, output_height) - ) - - # Test write - import numpy as np - test_frame = np.zeros((output_height, output_width, 3), dtype=np.uint8) - test_success = test_writer.write(test_frame) - - if test_writer.isOpened() and test_success: - out = test_writer - successful_codec = f"{codec_name} (AVI)" - output_path = avi_path # Update output path - print(f"Successfully initialized {codec_name} codec with AVI format") - break - else: - test_writer.release() - print(f"Failed to initialize {codec_name} codec with AVI") - except Exception as e: - print(f"Error with {codec_name} AVI codec: {e}") - if 'test_writer' in locals(): - test_writer.release() - continue - - if out is None: - # Last resort: try the most basic approach without specifying codec - print("All codecs failed, trying basic VideoWriter without codec specification...") - try: - # Try without specifying fourcc - let OpenCV choose - out = cv2.VideoWriter( - avi_path, -1, self.fps, (output_width, output_height) - ) - if out.isOpened(): - successful_codec = "auto (AVI)" - output_path = avi_path - print("Successfully initialized auto codec with AVI format") - else: - out.release() - out = None - except Exception as e: - print(f"Auto codec failed: {e}") - out = None - - if out is None: - # Use FFmpeg instead of OpenCV for rendering - print("OpenCV codecs failed. Using FFmpeg for rendering...") - return self._render_with_ffmpeg(output_path, start_frame, end_frame, output_width, output_height) - - if not out.isOpened(): - self.render_progress_queue.put(("error", "Could not open video writer!", 1.0, 0.0)) - return False - - # Optimized sequential processing - seek once, then read sequentially - total_output_frames = end_frame - start_frame + 1 - last_progress_update = 0 - - # Seek once to the start frame - render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) - - for frame_idx in range(start_frame, end_frame + 1): - # Check for cancellation - if self.render_cancelled: - out.release() - self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0)) - return False - - # Read frame sequentially - ret, frame = render_cap.read() - - if not ret: - break - - # Process and write frame directly (minimize memory copies) - processed_frame = self._process_frame_for_render( - frame, output_width, output_height - ) - - if processed_frame is not None: - out.write(processed_frame) - - frames_written = frame_idx - start_frame + 1 - current_time = time.time() - - # Update progress bar (10% to 95% of progress reserved for frame processing) - progress = 0.1 + (0.85 * (frames_written / total_output_frames)) - - # Throttled progress update - if current_time - last_progress_update > 0.5: - elapsed = current_time - start_time - fps_rate = frames_written / elapsed - eta = (elapsed / frames_written) * ( - total_output_frames - frames_written - ) - - progress_text = f"Rendering {frames_written}/{total_output_frames} frames (ETA: {eta:.1f}s)" - self.render_progress_queue.put(("progress", progress_text, progress, fps_rate)) - last_progress_update = current_time - - out.release() - - # Ensure the video writer is completely closed and file handles are freed - del out - time.sleep(0.1) # Small delay to ensure file is unlocked - - total_time = time.time() - start_time - total_frames_written = end_frame - start_frame + 1 - avg_fps = total_frames_written / total_time if total_time > 0 else 0 - - # Complete the progress bar - self.render_progress_queue.put(("complete", f"Complete! Rendered {total_frames_written} frames in {total_time:.1f}s", 1.0, avg_fps)) - - print(f"\nVideo rendered successfully to {output_path}") - print(f"Rendered {total_frames_written} frames in {total_time:.2f}s (avg {avg_fps:.1f} FPS)") - return True + # Skip all the OpenCV codec bullshit and go straight to FFmpeg + print("Using FFmpeg for encoding with OpenCV transformations...") + return self._render_with_ffmpeg_pipe(output_path, start_frame, end_frame, output_width, output_height) except Exception as e: error_msg = str(e) @@ -1842,7 +1662,7 @@ class VideoEditor: error_msg = "FFmpeg threading error - try restarting the application" elif "Assertion" in error_msg: error_msg = "Video codec error - the video file may be corrupted or incompatible" - + self.render_progress_queue.put(("error", f"Render error: {error_msg}", 1.0, 0.0)) print(f"Render error: {error_msg}") return False @@ -1850,14 +1670,14 @@ class VideoEditor: # Always clean up the render VideoCapture if render_cap: render_cap.release() - + def update_render_progress(self): """Process progress updates from the render thread""" try: while True: # Non-blocking get from queue update_type, text, progress, fps = self.render_progress_queue.get_nowait() - + if update_type == "init": self.show_progress_bar(text) elif update_type == "progress": @@ -1872,11 +1692,11 @@ class VideoEditor: elif update_type == "cancelled": self.hide_progress_bar() self.show_feedback_message("Render cancelled") - + except queue.Empty: # No more updates in queue pass - + def _handle_overwrite_completion(self): """Handle file replacement after successful render""" try: @@ -1884,7 +1704,7 @@ class VideoEditor: # Release current video capture before replacing the file if hasattr(self, 'cap') and self.cap: self.cap.release() - + # Replace the original file with the temporary file import shutil print(f"DEBUG: Moving {self.overwrite_temp_path} to {self.overwrite_target_path}") @@ -1897,10 +1717,10 @@ class VideoEditor: if os.path.exists(self.overwrite_temp_path): os.remove(self.overwrite_temp_path) raise - + # Small delay to ensure file system operations are complete time.sleep(0.1) - + try: self._load_video(self.video_path) self.load_current_frame() @@ -1914,7 +1734,7 @@ class VideoEditor: # Clean up overwrite state self.overwrite_temp_path = None self.overwrite_target_path = None - + def cancel_render(self): """Cancel the current render operation""" if self.render_thread and self.render_thread.is_alive(): @@ -1922,22 +1742,33 @@ class VideoEditor: print("Render cancellation requested...") return True return False - + def is_rendering(self): """Check if a render operation is currently active""" return self.render_thread and self.render_thread.is_alive() - + def cleanup_render_thread(self): """Clean up render thread resources""" if self.render_thread and self.render_thread.is_alive(): self.render_cancelled = True + # Terminate FFmpeg process if running + if self.ffmpeg_process: + try: + self.ffmpeg_process.terminate() + self.ffmpeg_process.wait(timeout=1.0) + except: + try: + self.ffmpeg_process.kill() + except: + pass + self.ffmpeg_process = None # Wait a bit for the thread to finish gracefully self.render_thread.join(timeout=2.0) if self.render_thread.is_alive(): print("Warning: Render thread did not finish gracefully") self.render_thread = None self.render_cancelled = False - + def _render_image(self, output_path: str): """Save image with current edits applied""" # Get the appropriate file extension @@ -1949,7 +1780,7 @@ class VideoEditor: # Apply all transformations to the image processed_image = self.apply_crop_zoom_and_rotation(self.static_image.copy()) - + if processed_image is not None: # Save the image success = cv2.imwrite(output_path, processed_image) @@ -1996,7 +1827,7 @@ class VideoEditor: # Calculate what the zoomed dimensions would be zoomed_width = int(width * self.zoom_factor) zoomed_height = int(height * self.zoom_factor) - + # If zoomed dimensions match output, use them; otherwise resize directly to output if zoomed_width == output_width and zoomed_height == output_height: frame = cv2.resize( @@ -2020,129 +1851,111 @@ class VideoEditor: print(f"Error processing frame: {e}") return None - def _render_with_ffmpeg(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): - """Use FFmpeg directly for rendering - much faster than OpenCV""" + def _render_with_ffmpeg_pipe(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): + """Hybrid approach: OpenCV transformations + FFmpeg encoding via pipe""" try: - # Calculate time range - start_time = start_frame / self.fps - duration = (end_frame - start_frame + 1) / self.fps - - # Create temporary directory for processed frames - temp_dir = tempfile.mkdtemp() - frame_pattern = os.path.join(temp_dir, "frame_%06d.png") - - self.render_progress_queue.put(("progress", "Extracting frames with FFmpeg...", 0.1, 0.0)) - - # Extract frames using FFmpeg - extract_cmd = [ + self.render_progress_queue.put(("progress", "Starting FFmpeg encoder...", 0.0, 0.0)) + + # Start FFmpeg process to receive frames via pipe + # Use Windows-friendly approach with explicit binary mode + ffmpeg_cmd = [ 'ffmpeg', '-y', '-v', 'quiet', - '-ss', str(start_time), - '-i', str(self.video_path), - '-t', str(duration), - '-vf', f'scale={output_width}:{output_height}', - frame_pattern - ] - - result = subprocess.run(extract_cmd, capture_output=True, text=True) - if result.returncode != 0: - self.render_progress_queue.put(("error", f"FFmpeg extraction failed: {result.stderr}", 1.0, 0.0)) - return False - - # Count extracted frames - frame_files = sorted([f for f in os.listdir(temp_dir) if f.startswith('frame_')]) - total_frames = len(frame_files) - - if total_frames == 0: - self.render_progress_queue.put(("error", "No frames extracted", 1.0, 0.0)) - return False - - self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.2, 0.0)) - - # Process frames with OpenCV (for transformations) - processed_frames = [] - for i, frame_file in enumerate(frame_files): - if self.render_cancelled: - self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0)) - return False - - frame_path = os.path.join(temp_dir, frame_file) - frame = cv2.imread(frame_path) - - if frame is not None: - # Apply transformations - processed_frame = self._process_frame_for_render(frame, output_width, output_height) - if processed_frame is not None: - processed_frames.append(processed_frame) - - # Update progress - if i % 10 == 0: - progress = 0.2 + (0.6 * (i / total_frames)) - self.render_progress_queue.put(("progress", f"Processing frame {i}/{total_frames}", progress, 0.0)) - - self.render_progress_queue.put(("progress", "Encoding with FFmpeg...", 0.8, 0.0)) - - # Save processed frames and encode with FFmpeg - processed_dir = os.path.join(temp_dir, "processed") - os.makedirs(processed_dir, exist_ok=True) - - for i, frame in enumerate(processed_frames): - cv2.imwrite(os.path.join(processed_dir, f"frame_{i:06d}.png"), frame) - - # Encode final video with FFmpeg - processed_pattern = os.path.join(processed_dir, "frame_%06d.png") - encode_cmd = [ - 'ffmpeg', '-y', '-v', 'info', # Changed from quiet to info for progress - '-framerate', str(self.fps), - '-i', processed_pattern, - '-c:v', 'libx264', # Use x264 encoder - '-preset', 'fast', # Fast encoding - '-crf', '18', # Good quality + '-f', 'rawvideo', + '-vcodec', 'rawvideo', + '-s', f'{output_width}x{output_height}', + '-pix_fmt', 'bgr24', + '-r', str(self.fps), + '-i', '-', # Read from stdin + '-c:v', 'libx264', + '-preset', 'fast', + '-crf', '18', '-pix_fmt', 'yuv420p', - '-progress', 'pipe:1', # Output progress to stdout output_path ] - - # Run FFmpeg with progress monitoring - process = subprocess.Popen(encode_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - - # Monitor progress - while True: + + # Start FFmpeg process with Windows-friendly settings + self.ffmpeg_process = subprocess.Popen( + ffmpeg_cmd, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + bufsize=0, # Unbuffered for better pipe performance + universal_newlines=False # Binary mode for Windows + ) + + # OpenCV for frame reading and transformations + render_cap = cv2.VideoCapture(str(self.video_path)) + render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) + + total_frames = end_frame - start_frame + 1 + frames_written = 0 + start_time = time.time() + last_progress_update = 0 + + self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0)) + + for i in range(total_frames): if self.render_cancelled: - process.terminate() + self.ffmpeg_process.stdin.close() + self.ffmpeg_process.terminate() + self.ffmpeg_process.wait() + render_cap.release() + self.ffmpeg_process = None self.render_progress_queue.put(("cancelled", "Render cancelled", 0.0, 0.0)) return False - - output = process.stdout.readline() - if output == '' and process.poll() is not None: + + ret, frame = render_cap.read() + if not ret: break - - if 'out_time_ms=' in output: - # Parse progress from FFmpeg output + + # Apply transformations with OpenCV + processed_frame = self._process_frame_for_render(frame, output_width, output_height) + if processed_frame is not None: + # Write frame to FFmpeg via pipe try: - time_ms = int(output.split('out_time_ms=')[1].split()[0]) - total_time_ms = len(processed_frames) * (1000 / self.fps) - progress = min(0.95, 0.8 + (0.15 * (time_ms / total_time_ms))) - self.render_progress_queue.put(("progress", f"Encoding: {progress*100:.1f}%", progress, 0.0)) - except: - pass - - stdout, stderr = process.communicate() - result = type('Result', (), {'returncode': process.returncode, 'stderr': stderr})() - - # Cleanup - import shutil - shutil.rmtree(temp_dir, ignore_errors=True) - - if result.returncode == 0: - self.render_progress_queue.put(("complete", f"Rendered {len(processed_frames)} frames with FFmpeg", 1.0, 0.0)) - print(f"Successfully rendered {len(processed_frames)} frames using FFmpeg") + self.ffmpeg_process.stdin.write(processed_frame.tobytes()) + frames_written += 1 + except BrokenPipeError: + # FFmpeg process died + break + + # Update progress with FPS calculation + current_time = time.time() + progress = 0.1 + (0.8 * (i + 1) / total_frames) + + # Calculate FPS and update progress (throttled) + if current_time - last_progress_update > 0.5: + elapsed = current_time - start_time + fps_rate = frames_written / elapsed if elapsed > 0 else 0 + self.render_progress_queue.put(("progress", f"Processed {i+1}/{total_frames} frames", progress, fps_rate)) + last_progress_update = current_time + + # Close FFmpeg input and wait for completion + self.ffmpeg_process.stdin.close() + stderr = self.ffmpeg_process.communicate()[1] + return_code = self.ffmpeg_process.returncode + self.ffmpeg_process = None + + render_cap.release() + + if return_code == 0: + total_time = time.time() - start_time + avg_fps = frames_written / total_time if total_time > 0 else 0 + self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames with FFmpeg", 1.0, avg_fps)) + print(f"Successfully rendered {frames_written} frames using FFmpeg pipe (avg {avg_fps:.1f} FPS)") return True else: - self.render_progress_queue.put(("error", f"FFmpeg encoding failed: {result.stderr}", 1.0, 0.0)) + self.render_progress_queue.put(("error", f"FFmpeg encoding failed: {stderr.decode()}", 1.0, 0.0)) return False - + except Exception as e: - self.render_progress_queue.put(("error", f"FFmpeg rendering failed: {e}", 1.0, 0.0)) + error_msg = str(e) + # Handle specific Windows pipe errors + if "Errno 22" in error_msg or "invalid argument" in error_msg.lower(): + error_msg = "Windows pipe error - try using a different output path or restart the application" + elif "BrokenPipeError" in error_msg: + error_msg = "FFmpeg process terminated unexpectedly - check if FFmpeg is installed correctly" + + self.render_progress_queue.put(("error", f"FFmpeg pipe rendering failed: {error_msg}", 1.0, 0.0)) return False def _render_as_frames(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): @@ -2152,9 +1965,9 @@ class VideoEditor: frames_dir = output_path.replace('.mp4', '_frames').replace('.avi', '_frames') import os os.makedirs(frames_dir, exist_ok=True) - + self.render_progress_queue.put(("progress", "Saving individual frames...", 0.1, 0.0)) - + # Create a separate VideoCapture for the render thread render_cap = cv2.VideoCapture(str(self.video_path)) if not render_cap.isOpened(): @@ -2163,10 +1976,10 @@ class VideoEditor: # Seek once to the start frame render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) - + total_frames = end_frame - start_frame + 1 frames_saved = 0 - + for frame_idx in range(start_frame, end_frame + 1): # Check for cancellation if self.render_cancelled: @@ -2195,11 +2008,11 @@ class VideoEditor: self.render_progress_queue.put(("progress", f"Saving frames: {frames_saved}/{total_frames}", progress, fps_rate)) render_cap.release() - + self.render_progress_queue.put(("complete", f"Saved {frames_saved} frames to {frames_dir}", 1.0, 0.0)) print(f"Saved {frames_saved} individual frames to: {frames_dir}") return True - + except Exception as e: self.render_progress_queue.put(("error", f"Frame extraction failed: {e}", 1.0, 0.0)) return False @@ -2273,17 +2086,17 @@ class VideoEditor: while True: # Update auto-repeat seeking if active self.update_auto_repeat_seek() - + # Update render progress from background thread self.update_render_progress() - + # Update display self.display_current_frame() delay = self.calculate_frame_delay() if self.is_playing else 1 # Very short delay for responsive key detection key = cv2.waitKey(delay) & 0xFF - - + + # Handle auto-repeat - stop if no key is pressed if key == 255 and self.auto_repeat_active: # 255 means no key pressed self.stop_auto_repeat_seek() @@ -2409,10 +2222,10 @@ class VideoEditor: directory = self.video_path.parent base_name = self.video_path.stem extension = self.video_path.suffix - + # Remove any existing _edited_ suffix to get clean base name clean_base = base_name.replace("_edited", "") - + # Find next available number counter = 1 while True: @@ -2421,7 +2234,7 @@ class VideoEditor: if not output_path.exists(): break counter += 1 - + success = self.render_video(str(output_path)) elif key == 13: # Enter # Only overwrite if file already contains "_edited_" in name @@ -2431,18 +2244,18 @@ class VideoEditor: print(f"DEBUG: Original file path: {self.video_path}") print(f"DEBUG: Original file exists: {self.video_path.exists()}") output_path = str(self.video_path) - + # If we're overwriting the same file, use a temporary file first import tempfile temp_dir = self.video_path.parent temp_fd, temp_path = tempfile.mkstemp(suffix=self.video_path.suffix, dir=temp_dir) os.close(temp_fd) # Close the file descriptor, we just need the path - + print(f"DEBUG: Created temp file: {temp_path}") print("Rendering to temporary file first...") - + success = self.render_video(temp_path) - + # Store the temp path so we can replace the file when render completes self.overwrite_temp_path = temp_path self.overwrite_target_path = str(self.video_path) @@ -2460,7 +2273,7 @@ class VideoEditor: print("Render cancellation requested") else: print("No render operation to cancel") - + # Individual direction controls using shift combinations we can detect elif key == ord("J"): # Shift+i - expand up self.adjust_crop_size('up', False) @@ -2474,7 +2287,7 @@ class VideoEditor: elif key == ord("H"): # Shift+l - expand right self.adjust_crop_size('right', False) print(f"Expanded crop rightward by {self.crop_size_step}px") - + # Contract in specific directions elif key == ord("k"): # i - contract from bottom (reduce height from bottom) self.adjust_crop_size('up', True) @@ -2488,7 +2301,7 @@ class VideoEditor: elif key == ord("l"): # l - contract from left (reduce width from left) self.adjust_crop_size('right', True) print(f"Contracted crop from left by {self.crop_size_step}px") - + # Auto advance frame when playing (videos only) if self.is_playing and not self.is_image_mode: