diff --git a/croppa/main.py b/croppa/main.py index 4fd2693..900dd11 100644 --- a/croppa/main.py +++ b/croppa/main.py @@ -1843,9 +1843,8 @@ class VideoEditor: return None def _render_with_ffmpeg_pipe(self, output_path: str, start_frame: int, end_frame: int, output_width: int, output_height: int): - """Hybrid approach: OpenCV transformations + FFmpeg encoding via pipe""" + """Render video with transformations""" try: - # Verify FFmpeg is available try: subprocess.run(['ffmpeg', '-version'], capture_output=True, check=True) except (subprocess.CalledProcessError, FileNotFoundError): @@ -1854,9 +1853,7 @@ class VideoEditor: self.render_progress_queue.put(("error", error_msg, 1.0, 0.0)) return False - self.render_progress_queue.put(("progress", "Starting FFmpeg encoder...", 0.0, 0.0)) - - # Start FFmpeg process to receive frames via pipe + self.render_progress_queue.put(("progress", "Starting encoder...", 0.0, 0.0)) ffmpeg_cmd = [ 'ffmpeg', '-y', '-v', 'quiet', '-f', 'rawvideo', @@ -1872,20 +1869,13 @@ class VideoEditor: output_path ] - # Use a more robust approach: write to a temporary file in chunks import tempfile import os - # Create a temporary file for the raw video data temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.raw') temp_file.close() - print(f"Using temporary file approach: {temp_file.name}") - - # Modify FFmpeg command to read from file ffmpeg_cmd[ffmpeg_cmd.index('-i') + 1] = temp_file.name - - # Start FFmpeg process self.ffmpeg_process = subprocess.Popen( ffmpeg_cmd, stderr=subprocess.PIPE, @@ -1895,7 +1885,6 @@ class VideoEditor: self.temp_file_name = temp_file.name - # OpenCV for frame reading and transformations render_cap = cv2.VideoCapture(str(self.video_path)) render_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) @@ -1905,8 +1894,6 @@ class VideoEditor: last_progress_update = 0 self.render_progress_queue.put(("progress", f"Processing {total_frames} frames...", 0.1, 0.0)) - - # Write all frames to temporary file first with open(self.temp_file_name, 'wb') as temp_file: for i in range(total_frames): if self.render_cancelled: @@ -1918,23 +1905,18 @@ class VideoEditor: if not ret: break - # Apply transformations with OpenCV processed_frame = self._process_frame_for_render(frame, output_width, output_height) if processed_frame is not None: - # Debug frame dimensions - if i == 0: # Only print for first frame + if i == 0: print(f"Processed frame dimensions: {processed_frame.shape[1]}x{processed_frame.shape[0]}") print(f"Expected dimensions: {output_width}x{output_height}") - # Write frame to temporary file temp_file.write(processed_frame.tobytes()) frames_written += 1 - # Update progress with FPS calculation current_time = time.time() progress = 0.1 + (0.8 * (i + 1) / total_frames) - # Calculate FPS and update progress (throttled) if current_time - last_progress_update > 0.5: elapsed = current_time - start_time fps_rate = frames_written / elapsed if elapsed > 0 else 0 @@ -1943,15 +1925,12 @@ class VideoEditor: render_cap.release() - # Now wait for FFmpeg to finish encoding - self.render_progress_queue.put(("progress", "Encoding with FFmpeg...", 0.9, 0.0)) + self.render_progress_queue.put(("progress", "Encoding...", 0.9, 0.0)) - # Wait for FFmpeg to finish encoding stderr = self.ffmpeg_process.communicate()[1] return_code = self.ffmpeg_process.returncode self.ffmpeg_process = None - # Clean up temporary file if hasattr(self, 'temp_file_name') and os.path.exists(self.temp_file_name): try: os.unlink(self.temp_file_name) @@ -1961,30 +1940,29 @@ class VideoEditor: if return_code == 0: total_time = time.time() - start_time avg_fps = frames_written / total_time if total_time > 0 else 0 - self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames with FFmpeg", 1.0, avg_fps)) - print(f"Successfully rendered {frames_written} frames using FFmpeg pipe (avg {avg_fps:.1f} FPS)") + self.render_progress_queue.put(("complete", f"Rendered {frames_written} frames", 1.0, avg_fps)) + print(f"Successfully rendered {frames_written} frames (avg {avg_fps:.1f} FPS)") return True else: error_details = stderr.decode() if stderr else "No error details available" - print(f"FFmpeg encoding failed with return code {return_code}") - print(f"FFmpeg stderr: {error_details}") - self.render_progress_queue.put(("error", f"FFmpeg encoding failed: {error_details}", 1.0, 0.0)) + print(f"Encoding failed with return code {return_code}") + print(f"Error: {error_details}") + self.render_progress_queue.put(("error", f"Encoding failed: {error_details}", 1.0, 0.0)) return False except Exception as e: error_msg = str(e) - print(f"FFmpeg pipe rendering exception: {error_msg}") + print(f"Rendering exception: {error_msg}") print(f"Exception type: {type(e).__name__}") - # Handle specific Windows pipe errors if "Errno 22" in error_msg or "invalid argument" in error_msg.lower(): - error_msg = "Windows pipe error - try using a different output path or restart the application" + error_msg = "File system error - try using a different output path" elif "BrokenPipeError" in error_msg: - error_msg = "FFmpeg process terminated unexpectedly - check if FFmpeg is installed correctly" + error_msg = "Process terminated unexpectedly" elif "FileNotFoundError" in error_msg or "ffmpeg" in error_msg.lower(): error_msg = "FFmpeg not found - please install FFmpeg and ensure it's in your PATH" - self.render_progress_queue.put(("error", f"FFmpeg pipe rendering failed: {error_msg}", 1.0, 0.0)) + self.render_progress_queue.put(("error", f"Rendering failed: {error_msg}", 1.0, 0.0)) return False def run(self):