Compare commits
108 Commits
b123b12d0d
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 035c4910a4 | |||
| b4b2921217 | |||
| 1aaf5259e2 | |||
| 6e8fd4aa42 | |||
| 00a17c9102 | |||
| 2898b649bb | |||
| c7c9012ef1 | |||
| a965987ab9 | |||
| bd8066c471 | |||
| 43feae622e | |||
| 88630bbcbc | |||
| a77edb5fa0 | |||
| 9647ae6345 | |||
| 53549ebee9 | |||
| 238b139b10 | |||
| bf32bb98ae | |||
| 73e72bcb3b | |||
| 4db35616af | |||
| 914ae29073 | |||
| 0dc724405b | |||
| 25112d496b | |||
| 958e066042 | |||
| 99fbfa3201 | |||
| a369b84d39 | |||
| 5c44d147b0 | |||
| 4b9e8ecf45 | |||
| 3d36a36f26 | |||
| 53af41b181 | |||
| cd7cc426ae | |||
| 2537a5ffe4 | |||
| c6b285ae18 | |||
| 91165056d7 | |||
| 24dc67b8ca | |||
| 66d3fa6893 | |||
| a78ad45013 | |||
| f27061b0ef | |||
| bd1824a7ca | |||
| 4806c95095 | |||
| 16c841d14d | |||
| bfb9ed54d9 | |||
| 3ac725c2aa | |||
| b5a0811cbd | |||
| 1ac8cd04b3 | |||
| 203d036a92 | |||
| fa2ac22f9f | |||
| 2013ccf627 | |||
| e1d94f2b24 | |||
| 9df6d73db8 | |||
| 01340a0a81 | |||
| 44ed4220b9 | |||
| 151744d144 | |||
| e823a11929 | |||
| c1c01e86ca | |||
| 184aceeee3 | |||
| db2aa57ce5 | |||
| 92c2e62166 | |||
| 86c31a49d9 | |||
| f5b8656bc2 | |||
| b9c60ffc25 | |||
| b6c7863b77 | |||
| 612d024161 | |||
| 840440eb1a | |||
| c3bf49f301 | |||
| 192a5c7124 | |||
| 2246ef9f45 | |||
| c52d9b9399 | |||
| 10284dad81 | |||
| a2dc4a2186 | |||
| 5d76681ded | |||
| f8acef2da4 | |||
| 65b80034cb | |||
| 5400592afd | |||
| e6616ed1b1 | |||
| 048e8ef033 | |||
| c08d5c5999 | |||
| 8c1efb1b05 | |||
| f942392fb3 | |||
| c749d9af80 | |||
| 71e5870306 | |||
| e813be2890 | |||
| 80fb35cced | |||
| d8b4439382 | |||
| 463228baf5 | |||
| e7571a78f4 | |||
| ea008ba23c | |||
| 366c338c5d | |||
| 0d26ffaca4 | |||
| aaf78bf0da | |||
| 43d350fff2 | |||
| d1b9e7c470 | |||
| c50234f5c1 | |||
| 171155e528 | |||
| 710a1f7de3 | |||
| 13fbc45b74 | |||
| 8b4f8026cc | |||
| 5c66935157 | |||
| bae760837c | |||
| 4a1649a568 | |||
| ea1a6e58f4 | |||
| 0c3e5e21bf | |||
| 472efbb9d9 | |||
| dd2f40460b | |||
| b2c7cf11e9 | |||
| e0e5c8d933 | |||
| 04e391551e | |||
| f9f442a2d0 | |||
| 0fd108bc9a | |||
| 83ef71934b |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@ __pycache__
|
||||
croppa/build/lib
|
||||
croppa/croppa.egg-info
|
||||
*.log
|
||||
*.mp4
|
||||
|
||||
@@ -1,3 +1,14 @@
|
||||
module tcleaner
|
||||
|
||||
go 1.23.6
|
||||
|
||||
require git.site.quack-lab.dev/dave/cylogger v1.4.0
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/hexops/valast v1.5.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
mvdan.cc/gofumpt v0.4.0 // indirect
|
||||
)
|
||||
|
||||
28
cleaner/go.sum
Normal file
28
cleaner/go.sum
Normal file
@@ -0,0 +1,28 @@
|
||||
git.site.quack-lab.dev/dave/cylogger v1.4.0 h1:3Ca7V5JWvruARJd5S8xDFwW9LnZ9QInqkYLRdrEFvuY=
|
||||
git.site.quack-lab.dev/dave/cylogger v1.4.0/go.mod h1:wctgZplMvroA4X6p8f4B/LaCKtiBcT1Pp+L14kcS8jk=
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/hexops/autogold v0.8.1 h1:wvyd/bAJ+Dy+DcE09BoLk6r4Fa5R5W+O+GUzmR985WM=
|
||||
github.com/hexops/autogold v0.8.1/go.mod h1:97HLDXyG23akzAoRYJh/2OBs3kd80eHyKPvZw0S5ZBY=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hexops/valast v1.5.0 h1:FBTuvVi0wjTngtXJRZXMbkN/Dn6DgsUsBwch2DUJU8Y=
|
||||
github.com/hexops/valast v1.5.0/go.mod h1:Jcy1pNH7LNraVaAZDLyv21hHg2WBv9Nf9FL6fGxU7o4=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM=
|
||||
mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ=
|
||||
@@ -1,7 +1,7 @@
|
||||
Windows Registry Editor Version 5.00
|
||||
|
||||
[HKEY_CURRENT_USER\Software\Classes\*\shell\Clean video name]
|
||||
@="Clean video name"
|
||||
[HKEY_CURRENT_USER\Software\Classes\*\shell\Clean name]
|
||||
@="Clean name"
|
||||
|
||||
[HKEY_CURRENT_USER\Software\Classes\*\shell\Clean video name\command]
|
||||
[HKEY_CURRENT_USER\Software\Classes\*\shell\Clean name\command]
|
||||
@="C:\\Users\\administrator\\go\\bin\\tcleaner.exe \"%1\""
|
||||
@@ -6,51 +6,71 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
logger "git.site.quack-lab.dev/dave/cylogger"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
logger.InitFlag()
|
||||
if flag.NArg() == 0 {
|
||||
fmt.Println("Usage: cleaner <files>")
|
||||
os.Exit(1)
|
||||
}
|
||||
// regex to match " - 2025-07-08 01h31m45s - "
|
||||
re := regexp.MustCompile(` - (\d{4}-\d{2}-\d{2} \d{2}h\d{2}m\d{2}s) - `)
|
||||
// regex to match "2025-07-08"
|
||||
re := regexp.MustCompile(`\d{4}-\d{2}-\d{2}`)
|
||||
editedRe := regexp.MustCompile(`_edited_\d{5}`)
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
filelog := logger.Default.WithPrefix(file)
|
||||
filelog.Info("Processing file")
|
||||
|
||||
info, err := os.Stat(file)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR: %v\n", err)
|
||||
filelog.Error("ERROR: %v\n", err)
|
||||
continue
|
||||
}
|
||||
if info.IsDir() {
|
||||
fmt.Printf("SKIP (directory): %s\n", file)
|
||||
filelog.Info("SKIP (directory): %s\n", file)
|
||||
continue
|
||||
}
|
||||
|
||||
name := filepath.Base(file)
|
||||
match := re.FindStringSubmatch(name)
|
||||
filelog.Debug("Match: %v", match)
|
||||
if match == nil {
|
||||
fmt.Printf("SKIP (no date pattern): %s\n", name)
|
||||
filelog.Info("SKIP (no date pattern): %s\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
newName := match[1] + filepath.Ext(name)
|
||||
namePart := match[0]
|
||||
editMatch := editedRe.FindStringSubmatch(name)
|
||||
filelog.Debug("Edit match: %v", editMatch)
|
||||
if editMatch != nil {
|
||||
namePart = namePart + editMatch[0]
|
||||
filelog.Info("Video has edited part, new name: %s", namePart)
|
||||
}
|
||||
|
||||
newName := namePart + filepath.Ext(name)
|
||||
filelog.Debug("New name: %s", newName)
|
||||
if name == newName {
|
||||
fmt.Printf("SKIP (already named): %s\n", name)
|
||||
filelog.Info("SKIP (already named): %s\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
filelog.Debug("Checking if target exists: %s", newName)
|
||||
if _, err := os.Stat(newName); err == nil {
|
||||
fmt.Printf("SKIP (target exists): %s -> %s\n", name, newName)
|
||||
filelog.Info("SKIP (target exists): %s -> %s\n", name, newName)
|
||||
continue
|
||||
}
|
||||
|
||||
filelog.Info("Renaming to: %s", newName)
|
||||
err = os.Rename(name, newName)
|
||||
if err != nil {
|
||||
fmt.Printf("ERROR renaming %s: %v\n", name, err)
|
||||
filelog.Error("ERROR renaming %s: %v\n", name, err)
|
||||
} else {
|
||||
fmt.Printf("RENAMED: %s -> %s\n", name, newName)
|
||||
filelog.Info("RENAMED: %s -> %s\n", name, newName)
|
||||
}
|
||||
filelog.Info("All done")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
68
croppa/capture.py
Normal file
68
croppa/capture.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import cv2
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
class Cv2BufferedCap:
|
||||
"""Buffered wrapper around cv2.VideoCapture that handles frame loading, seeking, and caching correctly"""
|
||||
|
||||
def __init__(self, video_path, backend=None, cache_size=10000):
|
||||
self.video_path = video_path
|
||||
self.cap = cv2.VideoCapture(str(video_path), backend)
|
||||
if not self.cap.isOpened():
|
||||
raise ValueError(f"Could not open video: {video_path}")
|
||||
|
||||
# Video properties
|
||||
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
||||
self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
# Current position tracking
|
||||
self.current_frame = 0
|
||||
|
||||
# Frame cache (LRU)
|
||||
self.cache_size = cache_size
|
||||
self.frame_cache = OrderedDict()
|
||||
|
||||
def get_frame(self, frame_number):
|
||||
"""Get frame at specific index - always accurate"""
|
||||
# Clamp frame number to valid range
|
||||
frame_number = max(0, min(frame_number, self.total_frames - 1))
|
||||
|
||||
# Check cache first
|
||||
if frame_number in self.frame_cache:
|
||||
self.frame_cache.move_to_end(frame_number)
|
||||
return self.frame_cache[frame_number]
|
||||
|
||||
# Optimize for sequential reading (next frame)
|
||||
if frame_number == self.current_frame + 1:
|
||||
ret, frame = self.cap.read()
|
||||
else:
|
||||
# Seek for non-sequential access
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
||||
ret, frame = self.cap.read()
|
||||
|
||||
if ret:
|
||||
self.current_frame = frame_number
|
||||
# Store in cache, evict least recently used if cache is full
|
||||
if len(self.frame_cache) >= self.cache_size:
|
||||
self.frame_cache.popitem(last=False)
|
||||
self.frame_cache[frame_number] = frame
|
||||
self.frame_cache.move_to_end(frame_number)
|
||||
return frame
|
||||
else:
|
||||
raise ValueError(f"Failed to read frame {frame_number}")
|
||||
|
||||
def advance_frame(self, frames=1):
|
||||
"""Advance by specified number of frames"""
|
||||
new_frame = self.current_frame + frames
|
||||
return self.get_frame(new_frame)
|
||||
|
||||
def release(self):
|
||||
"""Release the video capture"""
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
|
||||
def isOpened(self):
|
||||
"""Check if capture is opened"""
|
||||
return self.cap and self.cap.isOpened()
|
||||
3213
croppa/main.py
3213
croppa/main.py
File diff suppressed because it is too large
Load Diff
351
croppa/project_view.py
Normal file
351
croppa/project_view.py
Normal file
@@ -0,0 +1,351 @@
|
||||
import cv2
|
||||
import json
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class ProjectView:
|
||||
"""Project view that displays videos in current directory with progress bars"""
|
||||
|
||||
# Project view configuration
|
||||
THUMBNAIL_SIZE = (200, 150) # Width, Height
|
||||
THUMBNAIL_MARGIN = 20
|
||||
PROGRESS_BAR_HEIGHT = 8
|
||||
TEXT_HEIGHT = 30
|
||||
|
||||
# Colors
|
||||
BG_COLOR = (40, 40, 40)
|
||||
THUMBNAIL_BG_COLOR = (60, 60, 60)
|
||||
PROGRESS_BG_COLOR = (80, 80, 80)
|
||||
PROGRESS_FILL_COLOR = (0, 120, 255)
|
||||
TEXT_COLOR = (255, 255, 255)
|
||||
SELECTED_COLOR = (255, 165, 0)
|
||||
|
||||
def __init__(self, directory: Path, video_editor):
|
||||
self.directory = directory
|
||||
self.video_editor = video_editor
|
||||
self.video_files = []
|
||||
self.thumbnails = {}
|
||||
self.progress_data = {}
|
||||
self.selected_index = 0
|
||||
self.scroll_offset = 0
|
||||
self.items_per_row = 2 # Default to 2 items per row
|
||||
self.window_width = 1920 # Increased to accommodate 1080p videos
|
||||
self.window_height = 1200
|
||||
|
||||
self._load_video_files()
|
||||
self._load_progress_data()
|
||||
|
||||
def _calculate_thumbnail_size(self, window_width: int) -> tuple:
|
||||
"""Calculate thumbnail size based on items per row and window width"""
|
||||
available_width = window_width - self.THUMBNAIL_MARGIN
|
||||
item_width = (available_width - (self.items_per_row - 1) * self.THUMBNAIL_MARGIN) // self.items_per_row
|
||||
thumbnail_width = max(50, item_width) # Minimum 50px width
|
||||
thumbnail_height = int(thumbnail_width * self.THUMBNAIL_SIZE[1] / self.THUMBNAIL_SIZE[0]) # Maintain aspect ratio
|
||||
return (thumbnail_width, thumbnail_height)
|
||||
|
||||
def _load_video_files(self):
|
||||
"""Load all video files from directory"""
|
||||
self.video_files = []
|
||||
for file_path in self.directory.iterdir():
|
||||
if (file_path.is_file() and
|
||||
file_path.suffix.lower() in self.video_editor.VIDEO_EXTENSIONS):
|
||||
self.video_files.append(file_path)
|
||||
self.video_files.sort(key=lambda x: x.name)
|
||||
|
||||
def _load_progress_data(self):
|
||||
"""Load progress data from JSON state files"""
|
||||
self.progress_data = {}
|
||||
for video_path in self.video_files:
|
||||
state_file = video_path.with_suffix('.json')
|
||||
if state_file.exists():
|
||||
try:
|
||||
with open(state_file, 'r') as f:
|
||||
state = json.load(f)
|
||||
current_frame = state.get('current_frame', 0)
|
||||
|
||||
# Get total frames from video
|
||||
cap = cv2.VideoCapture(str(video_path))
|
||||
if cap.isOpened():
|
||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
cap.release()
|
||||
|
||||
if total_frames > 0:
|
||||
progress = current_frame / (total_frames - 1)
|
||||
self.progress_data[video_path] = {
|
||||
'current_frame': current_frame,
|
||||
'total_frames': total_frames,
|
||||
'progress': progress
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"Error loading progress for {video_path.name}: {e}")
|
||||
|
||||
def refresh_progress_data(self):
|
||||
"""Refresh progress data from JSON files (call when editor state changes)"""
|
||||
self._load_progress_data()
|
||||
|
||||
def get_progress_for_video(self, video_path: Path) -> float:
|
||||
"""Get progress (0.0 to 1.0) for a video"""
|
||||
if video_path in self.progress_data:
|
||||
return self.progress_data[video_path]['progress']
|
||||
return 0.0
|
||||
|
||||
def get_thumbnail_for_video(self, video_path: Path, size: tuple = None) -> np.ndarray:
|
||||
"""Get thumbnail for a video, generating it if needed"""
|
||||
if size is None:
|
||||
size = self.THUMBNAIL_SIZE
|
||||
|
||||
# Cache the original thumbnail by video path only (not size)
|
||||
if video_path in self.thumbnails:
|
||||
original_thumbnail = self.thumbnails[video_path]
|
||||
# Resize the cached thumbnail to the requested size
|
||||
return cv2.resize(original_thumbnail, size)
|
||||
|
||||
# Generate original thumbnail on demand (only once per video)
|
||||
try:
|
||||
cap = cv2.VideoCapture(str(video_path))
|
||||
if cap.isOpened():
|
||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
if total_frames > 0:
|
||||
middle_frame = total_frames // 2
|
||||
cap.set(cv2.CAP_PROP_POS_FRAMES, middle_frame)
|
||||
ret, frame = cap.read()
|
||||
if ret:
|
||||
# Store original thumbnail at original size
|
||||
original_thumbnail = cv2.resize(frame, self.THUMBNAIL_SIZE)
|
||||
self.thumbnails[video_path] = original_thumbnail
|
||||
cap.release()
|
||||
# Return resized version
|
||||
return cv2.resize(original_thumbnail, size)
|
||||
cap.release()
|
||||
except Exception as e:
|
||||
print(f"Error generating thumbnail for {video_path.name}: {e}")
|
||||
|
||||
# Return a placeholder if thumbnail generation failed
|
||||
placeholder = np.full((size[1], size[0], 3),
|
||||
self.THUMBNAIL_BG_COLOR, dtype=np.uint8)
|
||||
return placeholder
|
||||
|
||||
def draw(self) -> np.ndarray:
|
||||
"""Draw the project view"""
|
||||
# Get actual window size dynamically
|
||||
try:
|
||||
# Try to get the actual window size from OpenCV
|
||||
window_rect = cv2.getWindowImageRect("Project View")
|
||||
if window_rect[2] > 0 and window_rect[3] > 0: # width and height > 0
|
||||
actual_width = window_rect[2]
|
||||
actual_height = window_rect[3]
|
||||
else:
|
||||
# Fallback to default size
|
||||
actual_width = self.window_width
|
||||
actual_height = self.window_height
|
||||
except:
|
||||
# Fallback to default size
|
||||
actual_width = self.window_width
|
||||
actual_height = self.window_height
|
||||
|
||||
canvas = np.full((actual_height, actual_width, 3), self.BG_COLOR, dtype=np.uint8)
|
||||
|
||||
if not self.video_files:
|
||||
# No videos message
|
||||
text = "No videos found in directory"
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
text_size = cv2.getTextSize(text, font, 1.0, 2)[0]
|
||||
text_x = (actual_width - text_size[0]) // 2
|
||||
text_y = (actual_height - text_size[1]) // 2
|
||||
cv2.putText(canvas, text, (text_x, text_y), font, 1.0, self.TEXT_COLOR, 2)
|
||||
return canvas
|
||||
|
||||
# Calculate layout - use fixed items_per_row and calculate thumbnail size to fit
|
||||
items_per_row = min(self.items_per_row, len(self.video_files)) # Don't exceed number of videos
|
||||
|
||||
# Calculate thumbnail size to fit the desired number of items per row
|
||||
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(actual_width)
|
||||
|
||||
# Calculate item height dynamically based on thumbnail size
|
||||
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
||||
|
||||
item_width = (actual_width - (items_per_row + 1) * self.THUMBNAIL_MARGIN) // items_per_row
|
||||
|
||||
# Draw videos in grid
|
||||
for i, video_path in enumerate(self.video_files):
|
||||
row = i // items_per_row
|
||||
col = i % items_per_row
|
||||
|
||||
# Skip if scrolled out of view
|
||||
if row < self.scroll_offset:
|
||||
continue
|
||||
if row > self.scroll_offset + (actual_height // item_height):
|
||||
break
|
||||
|
||||
# Calculate position
|
||||
x = self.THUMBNAIL_MARGIN + col * (item_width + self.THUMBNAIL_MARGIN)
|
||||
y = self.THUMBNAIL_MARGIN + (row - self.scroll_offset) * item_height
|
||||
|
||||
# Draw thumbnail background
|
||||
cv2.rectangle(canvas,
|
||||
(x, y),
|
||||
(x + thumbnail_width, y + thumbnail_height),
|
||||
self.THUMBNAIL_BG_COLOR, -1)
|
||||
|
||||
# Draw selection highlight
|
||||
if i == self.selected_index:
|
||||
cv2.rectangle(canvas,
|
||||
(x - 2, y - 2),
|
||||
(x + thumbnail_width + 2, y + thumbnail_height + 2),
|
||||
self.SELECTED_COLOR, 3)
|
||||
|
||||
# Draw thumbnail
|
||||
thumbnail = self.get_thumbnail_for_video(video_path, (thumbnail_width, thumbnail_height))
|
||||
# Thumbnail is already the correct size, no need to resize
|
||||
resized_thumbnail = thumbnail
|
||||
|
||||
# Ensure thumbnail doesn't exceed canvas bounds
|
||||
end_y = min(y + thumbnail_height, actual_height)
|
||||
end_x = min(x + thumbnail_width, actual_width)
|
||||
thumb_height = end_y - y
|
||||
thumb_width = end_x - x
|
||||
|
||||
if thumb_height > 0 and thumb_width > 0:
|
||||
# Resize thumbnail to fit within bounds if necessary
|
||||
if thumb_height != thumbnail_height or thumb_width != thumbnail_width:
|
||||
resized_thumbnail = cv2.resize(thumbnail, (thumb_width, thumb_height))
|
||||
|
||||
canvas[y:end_y, x:end_x] = resized_thumbnail
|
||||
|
||||
# Draw progress bar
|
||||
progress_y = y + thumbnail_height + 5
|
||||
progress_width = thumbnail_width
|
||||
progress = self.get_progress_for_video(video_path)
|
||||
|
||||
# Progress background
|
||||
cv2.rectangle(canvas,
|
||||
(x, progress_y),
|
||||
(x + progress_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
||||
self.PROGRESS_BG_COLOR, -1)
|
||||
|
||||
# Progress fill
|
||||
if progress > 0:
|
||||
fill_width = int(progress_width * progress)
|
||||
cv2.rectangle(canvas,
|
||||
(x, progress_y),
|
||||
(x + fill_width, progress_y + self.PROGRESS_BAR_HEIGHT),
|
||||
self.PROGRESS_FILL_COLOR, -1)
|
||||
|
||||
# Draw filename
|
||||
filename = video_path.name
|
||||
# Truncate if too long
|
||||
if len(filename) > 25:
|
||||
filename = filename[:22] + "..."
|
||||
|
||||
text_y = progress_y + self.PROGRESS_BAR_HEIGHT + 20
|
||||
cv2.putText(canvas, filename, (x, text_y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.6, self.TEXT_COLOR, 2)
|
||||
|
||||
# Draw progress percentage
|
||||
if video_path in self.progress_data:
|
||||
progress_text = f"{progress * 100:.0f}%"
|
||||
text_size = cv2.getTextSize(progress_text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)[0]
|
||||
progress_text_x = x + progress_width - text_size[0]
|
||||
cv2.putText(canvas, progress_text, (progress_text_x, text_y),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.4, self.TEXT_COLOR, 1)
|
||||
|
||||
# Draw instructions
|
||||
instructions = [
|
||||
"Project View - Videos in current directory",
|
||||
"WASD: Navigate | E: Open video | Q: Fewer items per row | Y: More items per row | q: Quit | ESC: Back to editor",
|
||||
f"Showing {len(self.video_files)} videos | {items_per_row} per row | Thumbnail: {thumbnail_width}x{thumbnail_height}"
|
||||
]
|
||||
|
||||
for i, instruction in enumerate(instructions):
|
||||
y_pos = actual_height - 60 + i * 20
|
||||
cv2.putText(canvas, instruction, (10, y_pos),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.TEXT_COLOR, 1)
|
||||
|
||||
return canvas
|
||||
|
||||
def handle_key(self, key: int) -> str:
|
||||
"""Handle keyboard input, returns action taken"""
|
||||
if key == 27: # ESC
|
||||
return "back_to_editor"
|
||||
elif key == ord('q'): # lowercase q - Quit
|
||||
return "quit"
|
||||
elif key == ord('e') or key == ord('E'): # E - Open video
|
||||
if self.video_files and 0 <= self.selected_index < len(self.video_files):
|
||||
return f"open_video:{self.video_files[self.selected_index]}"
|
||||
elif key == ord('w') or key == ord('W'): # W - Up
|
||||
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
||||
if self.selected_index >= current_items_per_row:
|
||||
self.selected_index -= current_items_per_row
|
||||
else:
|
||||
self.selected_index = 0
|
||||
self._update_scroll()
|
||||
elif key == ord('s') or key == ord('S'): # S - Down
|
||||
current_items_per_row = min(self.items_per_row, len(self.video_files))
|
||||
if self.selected_index + current_items_per_row < len(self.video_files):
|
||||
self.selected_index += current_items_per_row
|
||||
else:
|
||||
self.selected_index = len(self.video_files) - 1
|
||||
self._update_scroll()
|
||||
elif key == ord('a') or key == ord('A'): # A - Left
|
||||
if self.selected_index > 0:
|
||||
self.selected_index -= 1
|
||||
self._update_scroll()
|
||||
elif key == ord('d') or key == ord('D'): # D - Right
|
||||
if self.selected_index < len(self.video_files) - 1:
|
||||
self.selected_index += 1
|
||||
self._update_scroll()
|
||||
elif key == ord('Q'): # uppercase Q - Fewer items per row (larger thumbnails)
|
||||
if self.items_per_row > 1:
|
||||
self.items_per_row -= 1
|
||||
print(f"Items per row: {self.items_per_row}")
|
||||
elif key == ord('y') or key == ord('Y'): # Y - More items per row (smaller thumbnails)
|
||||
self.items_per_row += 1
|
||||
print(f"Items per row: {self.items_per_row}")
|
||||
|
||||
return "none"
|
||||
|
||||
def _update_scroll(self):
|
||||
"""Update scroll offset based on selected item"""
|
||||
if not self.video_files:
|
||||
return
|
||||
|
||||
# Use fixed items per row
|
||||
items_per_row = min(self.items_per_row, len(self.video_files))
|
||||
|
||||
# Get window dimensions for calculations
|
||||
try:
|
||||
window_rect = cv2.getWindowImageRect("Project View")
|
||||
if window_rect[2] > 0 and window_rect[3] > 0:
|
||||
window_width = window_rect[2]
|
||||
window_height = window_rect[3]
|
||||
else:
|
||||
window_width = self.window_width
|
||||
window_height = self.window_height
|
||||
except:
|
||||
window_width = self.window_width
|
||||
window_height = self.window_height
|
||||
|
||||
# Calculate thumbnail size and item height dynamically
|
||||
thumbnail_width, thumbnail_height = self._calculate_thumbnail_size(window_width)
|
||||
item_height = thumbnail_height + self.PROGRESS_BAR_HEIGHT + self.TEXT_HEIGHT + self.THUMBNAIL_MARGIN
|
||||
|
||||
selected_row = self.selected_index // items_per_row
|
||||
visible_rows = max(1, window_height // item_height)
|
||||
|
||||
# Calculate how many rows we can actually show
|
||||
total_rows = (len(self.video_files) + items_per_row - 1) // items_per_row
|
||||
|
||||
# If we can show all rows, no scrolling needed
|
||||
if total_rows <= visible_rows:
|
||||
self.scroll_offset = 0
|
||||
return
|
||||
|
||||
# Update scroll to keep selected item visible
|
||||
if selected_row < self.scroll_offset:
|
||||
self.scroll_offset = selected_row
|
||||
elif selected_row >= self.scroll_offset + visible_rows:
|
||||
self.scroll_offset = selected_row - visible_rows + 1
|
||||
|
||||
# Ensure scroll offset doesn't go negative or beyond available content
|
||||
self.scroll_offset = max(0, min(self.scroll_offset, total_rows - visible_rows))
|
||||
@@ -2,12 +2,15 @@
|
||||
name = "croppa"
|
||||
version = "0.1.0"
|
||||
description = "Fast and lightweight video editor for cropping, zooming, and cutting"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.13"
|
||||
dependencies = [
|
||||
"opencv-python>=4.8.0",
|
||||
"numpy>=1.24.0"
|
||||
"numpy>=1.24.0",
|
||||
"Pillow>=10.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
croppa = "main:main"
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = ["capture", "main", "project_view", "tracking", "utils"]
|
||||
@@ -68,8 +68,8 @@ Be careful to save and load settings when navigating this way
|
||||
- **Display** Points are rendered as blue dots per frame, in addition the previous tracking point (red) and next tracking point (magenta) are shown with yellow arrows indicating motion direction
|
||||
|
||||
#### Motion Tracking Navigation
|
||||
- **,**: Jump to previous tracking marker (previous frame that has one or more tracking points). Wrap-around supported.
|
||||
- **.**: Jump to next tracking marker (next frame that has one or more tracking points). Wrap-around supported.
|
||||
- **,**: Jump to previous tracking marker (previous frame that has one or more tracking points). Goes to first marker if at beginning.
|
||||
- **.**: Jump to next tracking marker (next frame that has one or more tracking points). Goes to last marker if at end.
|
||||
|
||||
### Markers and Looping
|
||||
- **1**: Set cut start marker at current frame
|
||||
|
||||
248
croppa/tracking.py
Normal file
248
croppa/tracking.py
Normal file
@@ -0,0 +1,248 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from typing import Dict, Any
|
||||
|
||||
|
||||
class FeatureTracker:
|
||||
"""Semi-automatic feature tracking with SIFT/SURF/ORB support and full state serialization"""
|
||||
|
||||
def __init__(self):
|
||||
# Feature detection parameters
|
||||
self.detector_type = 'SIFT' # 'SIFT', 'SURF', 'ORB'
|
||||
self.max_features = 1000
|
||||
self.match_threshold = 0.7
|
||||
|
||||
# Tracking state
|
||||
self.features = {} # {frame_number: {'keypoints': [...], 'descriptors': [...], 'positions': [...]}}
|
||||
self.tracking_enabled = False
|
||||
self.auto_tracking = False
|
||||
|
||||
# Initialize detectors
|
||||
self._init_detectors()
|
||||
|
||||
def _init_detectors(self):
|
||||
"""Initialize feature detectors based on type"""
|
||||
try:
|
||||
if self.detector_type == 'SIFT':
|
||||
self.detector = cv2.SIFT_create(nfeatures=self.max_features)
|
||||
elif self.detector_type == 'SURF':
|
||||
# SURF requires opencv-contrib-python, fallback to SIFT
|
||||
print("Warning: SURF requires opencv-contrib-python package. Using SIFT instead.")
|
||||
self.detector = cv2.SIFT_create(nfeatures=self.max_features)
|
||||
self.detector_type = 'SIFT'
|
||||
elif self.detector_type == 'ORB':
|
||||
self.detector = cv2.ORB_create(nfeatures=self.max_features)
|
||||
else:
|
||||
raise ValueError(f"Unknown detector type: {self.detector_type}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not initialize {self.detector_type} detector: {e}")
|
||||
# Fallback to ORB
|
||||
self.detector_type = 'ORB'
|
||||
self.detector = cv2.ORB_create(nfeatures=self.max_features)
|
||||
|
||||
def set_detector_type(self, detector_type: str):
|
||||
"""Change detector type and reinitialize"""
|
||||
if detector_type in ['SIFT', 'SURF', 'ORB']:
|
||||
self.detector_type = detector_type
|
||||
self._init_detectors()
|
||||
print(f"Switched to {detector_type} detector")
|
||||
else:
|
||||
print(f"Invalid detector type: {detector_type}")
|
||||
|
||||
def extract_features(self, frame: np.ndarray, frame_number: int, coord_mapper=None) -> bool:
|
||||
"""Extract features from a frame and store them"""
|
||||
try:
|
||||
# Convert to grayscale if needed
|
||||
if len(frame.shape) == 3:
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
else:
|
||||
gray = frame
|
||||
|
||||
# Extract keypoints and descriptors
|
||||
keypoints, descriptors = self.detector.detectAndCompute(gray, None)
|
||||
|
||||
if keypoints is None or descriptors is None:
|
||||
return False
|
||||
|
||||
# Map coordinates back to original frame space if mapper provided
|
||||
if coord_mapper:
|
||||
mapped_positions = []
|
||||
for kp in keypoints:
|
||||
orig_x, orig_y = coord_mapper(kp.pt[0], kp.pt[1])
|
||||
mapped_positions.append((int(orig_x), int(orig_y)))
|
||||
else:
|
||||
mapped_positions = [(int(kp.pt[0]), int(kp.pt[1])) for kp in keypoints]
|
||||
|
||||
# Store features
|
||||
self.features[frame_number] = {
|
||||
'keypoints': keypoints,
|
||||
'descriptors': descriptors,
|
||||
'positions': mapped_positions
|
||||
}
|
||||
|
||||
print(f"Extracted {len(keypoints)} features from frame {frame_number}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error extracting features from frame {frame_number}: {e}")
|
||||
return False
|
||||
|
||||
def extract_features_from_region(self, frame: np.ndarray, frame_number: int, coord_mapper=None) -> bool:
|
||||
"""Extract features from a frame and ADD them to existing features"""
|
||||
try:
|
||||
# Convert to grayscale if needed
|
||||
if len(frame.shape) == 3:
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
else:
|
||||
gray = frame
|
||||
|
||||
# Extract keypoints and descriptors
|
||||
keypoints, descriptors = self.detector.detectAndCompute(gray, None)
|
||||
|
||||
if keypoints is None or descriptors is None:
|
||||
return False
|
||||
|
||||
# Map coordinates back to original frame space if mapper provided
|
||||
if coord_mapper:
|
||||
mapped_positions = []
|
||||
for kp in keypoints:
|
||||
orig_x, orig_y = coord_mapper(kp.pt[0], kp.pt[1])
|
||||
mapped_positions.append((int(orig_x), int(orig_y)))
|
||||
else:
|
||||
mapped_positions = [(int(kp.pt[0]), int(kp.pt[1])) for kp in keypoints]
|
||||
|
||||
# Add to existing features or create new entry
|
||||
if frame_number in self.features:
|
||||
# Check if descriptor dimensions match
|
||||
existing_features = self.features[frame_number]
|
||||
if existing_features['descriptors'].shape[1] != descriptors.shape[1]:
|
||||
print(f"Warning: Descriptor dimension mismatch ({existing_features['descriptors'].shape[1]} vs {descriptors.shape[1]}). Cannot concatenate. Replacing features.")
|
||||
# Replace instead of concatenate when dimensions don't match
|
||||
existing_features['keypoints'] = keypoints
|
||||
existing_features['descriptors'] = descriptors
|
||||
existing_features['positions'] = mapped_positions
|
||||
else:
|
||||
# Append to existing features
|
||||
existing_features['keypoints'] = np.concatenate([existing_features['keypoints'], keypoints])
|
||||
existing_features['descriptors'] = np.concatenate([existing_features['descriptors'], descriptors])
|
||||
existing_features['positions'].extend(mapped_positions)
|
||||
print(f"Added {len(keypoints)} features to frame {frame_number} (total: {len(existing_features['positions'])})")
|
||||
else:
|
||||
# Create new features entry
|
||||
self.features[frame_number] = {
|
||||
'keypoints': keypoints,
|
||||
'descriptors': descriptors,
|
||||
'positions': mapped_positions
|
||||
}
|
||||
print(f"Extracted {len(keypoints)} features from frame {frame_number}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error extracting features from frame {frame_number}: {e}")
|
||||
return False
|
||||
|
||||
def track_features_optical_flow(self, prev_frame, curr_frame, prev_points):
|
||||
"""Track features using Lucas-Kanade optical flow"""
|
||||
try:
|
||||
# Convert to grayscale if needed
|
||||
if len(prev_frame.shape) == 3:
|
||||
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
|
||||
else:
|
||||
prev_gray = prev_frame
|
||||
|
||||
if len(curr_frame.shape) == 3:
|
||||
curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
|
||||
else:
|
||||
curr_gray = curr_frame
|
||||
|
||||
# Parameters for Lucas-Kanade optical flow
|
||||
lk_params = dict(winSize=(15, 15),
|
||||
maxLevel=2,
|
||||
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
|
||||
|
||||
# Calculate optical flow
|
||||
new_points, status, _ = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_points, None, **lk_params)
|
||||
|
||||
# Filter out bad tracks
|
||||
good_new = new_points[status == 1]
|
||||
good_old = prev_points[status == 1]
|
||||
|
||||
return good_new, good_old, status
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in optical flow tracking: {e}")
|
||||
return None, None, None
|
||||
|
||||
def clear_features(self):
|
||||
"""Clear all stored features"""
|
||||
self.features.clear()
|
||||
print("All features cleared")
|
||||
|
||||
def get_feature_count(self, frame_number: int) -> int:
|
||||
"""Get number of features for a frame"""
|
||||
if frame_number in self.features:
|
||||
return len(self.features[frame_number]['positions'])
|
||||
return 0
|
||||
|
||||
def serialize_features(self) -> Dict[str, Any]:
|
||||
"""Serialize features for state saving"""
|
||||
serialized = {}
|
||||
|
||||
for frame_num, frame_data in self.features.items():
|
||||
frame_key = str(frame_num)
|
||||
serialized[frame_key] = {
|
||||
'positions': frame_data['positions'],
|
||||
'keypoints': None, # Keypoints are not serialized (too large)
|
||||
'descriptors': None # Descriptors are not serialized (too large)
|
||||
}
|
||||
|
||||
return serialized
|
||||
|
||||
def deserialize_features(self, serialized_data: Dict[str, Any]):
|
||||
"""Deserialize features from state loading"""
|
||||
self.features.clear()
|
||||
|
||||
for frame_key, frame_data in serialized_data.items():
|
||||
frame_num = int(frame_key)
|
||||
self.features[frame_num] = {
|
||||
'positions': frame_data['positions'],
|
||||
'keypoints': None,
|
||||
'descriptors': None
|
||||
}
|
||||
|
||||
print(f"Deserialized features for {len(self.features)} frames")
|
||||
|
||||
def get_state_dict(self) -> Dict[str, Any]:
|
||||
"""Get complete state for serialization"""
|
||||
return {
|
||||
'detector_type': self.detector_type,
|
||||
'max_features': self.max_features,
|
||||
'match_threshold': self.match_threshold,
|
||||
'tracking_enabled': self.tracking_enabled,
|
||||
'auto_tracking': self.auto_tracking,
|
||||
'features': self.serialize_features()
|
||||
}
|
||||
|
||||
def load_state_dict(self, state_dict: Dict[str, Any]):
|
||||
"""Load complete state from serialization"""
|
||||
if 'detector_type' in state_dict:
|
||||
self.detector_type = state_dict['detector_type']
|
||||
self._init_detectors()
|
||||
|
||||
if 'max_features' in state_dict:
|
||||
self.max_features = state_dict['max_features']
|
||||
|
||||
if 'match_threshold' in state_dict:
|
||||
self.match_threshold = state_dict['match_threshold']
|
||||
|
||||
if 'tracking_enabled' in state_dict:
|
||||
self.tracking_enabled = state_dict['tracking_enabled']
|
||||
|
||||
if 'auto_tracking' in state_dict:
|
||||
self.auto_tracking = state_dict['auto_tracking']
|
||||
|
||||
if 'features' in state_dict:
|
||||
self.deserialize_features(state_dict['features'])
|
||||
|
||||
print("Feature tracker state loaded")
|
||||
34
croppa/utils.py
Normal file
34
croppa/utils.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import cv2
|
||||
import ctypes
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def load_image_utf8(image_path):
|
||||
"""Load image with UTF-8 path support using PIL, then convert to OpenCV format"""
|
||||
try:
|
||||
# Use PIL to load image with UTF-8 support
|
||||
pil_image = Image.open(image_path)
|
||||
# Convert PIL image to OpenCV format (BGR)
|
||||
cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
||||
return cv_image
|
||||
except Exception as e:
|
||||
raise ValueError(f"Could not load image file: {image_path} - {e}")
|
||||
|
||||
|
||||
def get_active_window_title():
|
||||
"""Get the title of the currently active window"""
|
||||
try:
|
||||
# Get handle to foreground window
|
||||
hwnd = ctypes.windll.user32.GetForegroundWindow()
|
||||
|
||||
# Get window title length
|
||||
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd)
|
||||
|
||||
# Create buffer and get window title
|
||||
buffer = ctypes.create_unicode_buffer(length + 1)
|
||||
ctypes.windll.user32.GetWindowTextW(hwnd, buffer, length + 1)
|
||||
|
||||
return buffer.value
|
||||
except:
|
||||
return ""
|
||||
306
main.py
306
main.py
@@ -7,23 +7,78 @@ import argparse
|
||||
import shutil
|
||||
import time
|
||||
import threading
|
||||
import subprocess
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
|
||||
class Cv2BufferedCap:
|
||||
"""Buffered wrapper around cv2.VideoCapture that handles frame loading, seeking, and caching correctly"""
|
||||
|
||||
def __init__(self, video_path, backend=None):
|
||||
self.video_path = video_path
|
||||
self.cap = cv2.VideoCapture(str(video_path), backend)
|
||||
if not self.cap.isOpened():
|
||||
raise ValueError(f"Could not open video: {video_path}")
|
||||
|
||||
# Video properties
|
||||
self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
||||
self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
# Current position tracking
|
||||
self.current_frame = 0
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Delegate unknown attributes to underlying cv2.VideoCapture"""
|
||||
return getattr(self.cap, name)
|
||||
|
||||
def get_frame(self, frame_number):
|
||||
"""Get frame at specific index - always accurate"""
|
||||
# Clamp frame number to valid range
|
||||
frame_number = max(0, min(frame_number, self.total_frames - 1))
|
||||
|
||||
# Optimize for sequential reading (next frame)
|
||||
if frame_number == self.current_frame + 1:
|
||||
ret, frame = self.cap.read()
|
||||
else:
|
||||
# Seek for non-sequential access
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
||||
ret, frame = self.cap.read()
|
||||
|
||||
if ret:
|
||||
self.current_frame = frame_number
|
||||
return frame
|
||||
else:
|
||||
raise ValueError(f"Failed to read frame {frame_number}")
|
||||
|
||||
def advance_frame(self, frames=1):
|
||||
"""Advance by specified number of frames"""
|
||||
new_frame = self.current_frame + frames
|
||||
return self.get_frame(new_frame)
|
||||
|
||||
def release(self):
|
||||
"""Release the video capture"""
|
||||
if self.cap:
|
||||
self.cap.release()
|
||||
|
||||
def isOpened(self):
|
||||
"""Check if capture is opened"""
|
||||
return self.cap and self.cap.isOpened()
|
||||
|
||||
|
||||
class MediaGrader:
|
||||
BASE_FRAME_DELAY_MS = 16
|
||||
# Configuration constants - matching croppa implementation
|
||||
TARGET_FPS = 80 # Target FPS for speed calculations
|
||||
SPEED_INCREMENT = 0.1
|
||||
MIN_PLAYBACK_SPEED = 0.05
|
||||
MAX_PLAYBACK_SPEED = 1.0
|
||||
|
||||
# Legacy constants for compatibility
|
||||
KEY_REPEAT_RATE_SEC = 0.5
|
||||
FAST_SEEK_ACTIVATION_TIME = 2.0
|
||||
FRAME_RENDER_TIME_MS = 50
|
||||
SPEED_INCREMENT = 0.2
|
||||
MIN_PLAYBACK_SPEED = 0.1
|
||||
MAX_PLAYBACK_SPEED = 100.0
|
||||
FAST_SEEK_MULTIPLIER = 60
|
||||
IMAGE_DISPLAY_DELAY_MS = 100
|
||||
|
||||
MONITOR_WIDTH = 2560
|
||||
MONITOR_HEIGHT = 1440
|
||||
@@ -108,30 +163,53 @@ class MediaGrader:
|
||||
# Get frame dimensions
|
||||
frame_height, frame_width = frame.shape[:2]
|
||||
|
||||
# Calculate aspect ratio
|
||||
frame_aspect_ratio = frame_width / frame_height
|
||||
monitor_aspect_ratio = self.MONITOR_WIDTH / self.MONITOR_HEIGHT
|
||||
# Calculate available height (subtract timeline height for videos)
|
||||
timeline_height = self.TIMELINE_HEIGHT if self.is_video(self.media_files[self.current_index]) else 0
|
||||
available_height = self.MONITOR_HEIGHT - timeline_height
|
||||
|
||||
# Determine if frame is vertical or horizontal relative to monitor
|
||||
if frame_aspect_ratio < monitor_aspect_ratio:
|
||||
# Frame is more vertical than monitor - maximize height
|
||||
display_height = self.MONITOR_HEIGHT
|
||||
display_width = int(display_height * frame_aspect_ratio)
|
||||
# Calculate scale to fit within monitor bounds while maintaining aspect ratio
|
||||
scale_x = self.MONITOR_WIDTH / frame_width
|
||||
scale_y = available_height / frame_height
|
||||
scale = min(scale_x, scale_y)
|
||||
|
||||
# Calculate display dimensions
|
||||
display_width = int(frame_width * scale)
|
||||
display_height = int(frame_height * scale)
|
||||
|
||||
# Resize the frame to maintain aspect ratio
|
||||
if scale != 1.0:
|
||||
resized_frame = cv2.resize(frame, (display_width, display_height), interpolation=cv2.INTER_AREA)
|
||||
else:
|
||||
# Frame is more horizontal than monitor - maximize width
|
||||
display_width = self.MONITOR_WIDTH
|
||||
display_height = int(display_width / frame_aspect_ratio)
|
||||
resized_frame = frame
|
||||
|
||||
# Resize window to calculated dimensions
|
||||
cv2.resizeWindow("Media Grader", display_width, display_height)
|
||||
# Create canvas with proper dimensions
|
||||
canvas_height = self.MONITOR_HEIGHT
|
||||
canvas_width = self.MONITOR_WIDTH
|
||||
canvas = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8)
|
||||
|
||||
# Center the resized frame on canvas
|
||||
start_y = (available_height - display_height) // 2
|
||||
start_x = (self.MONITOR_WIDTH - display_width) // 2
|
||||
|
||||
# Ensure frame fits within canvas bounds
|
||||
end_y = min(start_y + display_height, available_height)
|
||||
end_x = min(start_x + display_width, self.MONITOR_WIDTH)
|
||||
actual_height = end_y - start_y
|
||||
actual_width = end_x - start_x
|
||||
|
||||
if actual_height > 0 and actual_width > 0:
|
||||
canvas[start_y:end_y, start_x:end_x] = resized_frame[:actual_height, :actual_width]
|
||||
|
||||
# Resize window to full monitor size
|
||||
cv2.resizeWindow("Media Grader", self.MONITOR_WIDTH, self.MONITOR_HEIGHT)
|
||||
|
||||
# Center the window on screen
|
||||
x_position = (self.MONITOR_WIDTH - display_width) // 2
|
||||
y_position = (self.MONITOR_HEIGHT - display_height) // 2
|
||||
x_position = 0
|
||||
y_position = 0
|
||||
cv2.moveWindow("Media Grader", x_position, y_position)
|
||||
|
||||
# Display the frame
|
||||
cv2.imshow("Media Grader", frame)
|
||||
# Display the canvas with properly aspect-ratioed frame
|
||||
cv2.imshow("Media Grader", canvas)
|
||||
|
||||
def find_media_files(self) -> List[Path]:
|
||||
"""Find all media files recursively in the directory"""
|
||||
@@ -158,19 +236,18 @@ class MediaGrader:
|
||||
|
||||
def calculate_frame_delay(self) -> int:
|
||||
"""Calculate frame delay in milliseconds based on playback speed"""
|
||||
delay_ms = int(self.BASE_FRAME_DELAY_MS / self.playback_speed)
|
||||
return max(1, delay_ms)
|
||||
|
||||
def calculate_frames_to_skip(self) -> int:
|
||||
"""Calculate how many frames to skip for high-speed playback"""
|
||||
if self.playback_speed <= 1.0:
|
||||
return 0
|
||||
elif self.playback_speed <= 2.0:
|
||||
return 0
|
||||
elif self.playback_speed <= 5.0:
|
||||
return int(self.playback_speed - 1)
|
||||
# Round to 2 decimals to handle floating point precision issues
|
||||
speed = round(self.playback_speed, 2)
|
||||
if speed >= 1.0:
|
||||
# Speed >= 1: maximum FPS (no delay)
|
||||
return 1
|
||||
else:
|
||||
return int(self.playback_speed * 2)
|
||||
# Speed < 1: scale FPS based on speed
|
||||
# Formula: fps = TARGET_FPS * speed, so delay = 1000 / fps
|
||||
target_fps = self.TARGET_FPS * speed
|
||||
delay_ms = int(1000 / target_fps)
|
||||
return max(1, delay_ms)
|
||||
|
||||
|
||||
def load_media(self, file_path: Path) -> bool:
|
||||
"""Load media file for display"""
|
||||
@@ -178,44 +255,18 @@ class MediaGrader:
|
||||
self.current_cap.release()
|
||||
|
||||
if self.is_video(file_path):
|
||||
# Try different backends for better performance
|
||||
# For video files: FFmpeg is usually best, DirectShow is for cameras
|
||||
backends_to_try = []
|
||||
if hasattr(cv2, 'CAP_FFMPEG'): # FFmpeg - best for video files
|
||||
backends_to_try.append(cv2.CAP_FFMPEG)
|
||||
if hasattr(cv2, 'CAP_DSHOW'): # DirectShow - usually for cameras, but try as fallback
|
||||
backends_to_try.append(cv2.CAP_DSHOW)
|
||||
backends_to_try.append(cv2.CAP_ANY) # Final fallback
|
||||
try:
|
||||
# Use Cv2BufferedCap for better frame handling
|
||||
self.current_cap = Cv2BufferedCap(file_path)
|
||||
self.total_frames = self.current_cap.total_frames
|
||||
self.current_frame = 0
|
||||
|
||||
self.current_cap = None
|
||||
for backend in backends_to_try:
|
||||
try:
|
||||
self.current_cap = cv2.VideoCapture(str(file_path), backend)
|
||||
if self.current_cap.isOpened():
|
||||
# Optimize buffer settings for better performance
|
||||
self.current_cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # Minimize buffer to reduce latency
|
||||
# Try to set hardware acceleration if available
|
||||
if hasattr(cv2, 'CAP_PROP_HW_ACCELERATION'):
|
||||
self.current_cap.set(cv2.CAP_PROP_HW_ACCELERATION, cv2.VIDEO_ACCELERATION_ANY)
|
||||
break
|
||||
self.current_cap.release()
|
||||
except:
|
||||
continue
|
||||
print(f"Loaded: {file_path.name} | Frames: {self.total_frames} | FPS: {self.current_cap.fps:.2f}")
|
||||
|
||||
if not self.current_cap or not self.current_cap.isOpened():
|
||||
print(f"Warning: Could not open video file {file_path.name} (unsupported codec)")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not open video file {file_path.name}: {e}")
|
||||
return False
|
||||
|
||||
self.total_frames = int(self.current_cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
self.current_frame = 0
|
||||
|
||||
# Get codec information for debugging
|
||||
fourcc = int(self.current_cap.get(cv2.CAP_PROP_FOURCC))
|
||||
codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
|
||||
backend = self.current_cap.getBackendName()
|
||||
|
||||
print(f"Loaded: {file_path.name} | Codec: {codec} | Backend: {backend} | Frames: {self.total_frames}")
|
||||
|
||||
else:
|
||||
self.current_cap = None
|
||||
self.total_frames = 1
|
||||
@@ -235,12 +286,13 @@ class MediaGrader:
|
||||
if not self.current_cap:
|
||||
return False
|
||||
|
||||
ret, frame = self.current_cap.read()
|
||||
if ret:
|
||||
self.current_display_frame = frame
|
||||
self.current_frame = int(self.current_cap.get(cv2.CAP_PROP_POS_FRAMES))
|
||||
try:
|
||||
# Use Cv2BufferedCap to get frame
|
||||
self.current_display_frame = self.current_cap.get_frame(self.current_frame)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Failed to load frame {self.current_frame}: {e}")
|
||||
return False
|
||||
else:
|
||||
frame = cv2.imread(str(self.media_files[self.current_index]))
|
||||
if frame is not None:
|
||||
@@ -623,14 +675,9 @@ class MediaGrader:
|
||||
# Advance to next frame in this segment
|
||||
self.segment_current_frames[i] += 1
|
||||
|
||||
# Get the segment boundaries
|
||||
start_frame = self.segment_positions[i]
|
||||
end_frame = self.segment_end_positions[i]
|
||||
|
||||
# Loop within the segment bounds
|
||||
if self.segment_current_frames[i] > end_frame:
|
||||
# Loop back to start of segment
|
||||
self.segment_current_frames[i] = start_frame
|
||||
# Loop over the entire video, starting from the segment's initial position
|
||||
if self.segment_current_frames[i] >= self.total_frames:
|
||||
self.segment_current_frames[i] = 0
|
||||
|
||||
# Ensure we don't go beyond the video cache
|
||||
if self.segment_current_frames[i] < len(self.video_frame_cache):
|
||||
@@ -672,7 +719,7 @@ class MediaGrader:
|
||||
# Draw timeline
|
||||
self.draw_timeline(frame)
|
||||
|
||||
# Maintain aspect ratio when displaying
|
||||
# Display with proper aspect ratio
|
||||
self.display_with_aspect_ratio(frame)
|
||||
|
||||
def display_multi_segment_frame(self):
|
||||
@@ -789,7 +836,7 @@ class MediaGrader:
|
||||
# Draw multi-segment timeline
|
||||
self.draw_multi_segment_timeline(combined_frame)
|
||||
|
||||
# Maintain aspect ratio when displaying
|
||||
# Display with proper aspect ratio
|
||||
self.display_with_aspect_ratio(combined_frame)
|
||||
|
||||
def draw_multi_segment_timeline(self, frame):
|
||||
@@ -904,38 +951,30 @@ class MediaGrader:
|
||||
target_frame = max(0, min(target_frame, self.total_frames - 1))
|
||||
|
||||
# Seek to target frame
|
||||
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
||||
self.current_frame = target_frame
|
||||
self.load_current_frame()
|
||||
|
||||
def advance_frame(self):
|
||||
"""Advance to next frame(s) based on playback speed"""
|
||||
if (
|
||||
not self.is_video(self.media_files[self.current_index])
|
||||
or not self.is_playing
|
||||
):
|
||||
return
|
||||
"""Advance to next frame - handles playback speed and marker looping"""
|
||||
if not self.is_playing:
|
||||
return True
|
||||
|
||||
if self.multi_segment_mode:
|
||||
self.update_segment_frames()
|
||||
return True
|
||||
else:
|
||||
frames_to_skip = self.calculate_frames_to_skip()
|
||||
# Always advance by 1 frame - speed is controlled by delay timing
|
||||
new_frame = self.current_frame + 1
|
||||
|
||||
for _ in range(frames_to_skip + 1):
|
||||
ret, frame = self.current_cap.read()
|
||||
if not ret:
|
||||
actual_frame = int(self.current_cap.get(cv2.CAP_PROP_POS_FRAMES))
|
||||
if actual_frame < self.total_frames - 5:
|
||||
print(f"Frame count mismatch! Reported: {self.total_frames}, Actual: {actual_frame}")
|
||||
self.total_frames = actual_frame
|
||||
return False
|
||||
|
||||
self.current_display_frame = frame
|
||||
self.current_frame = int(self.current_cap.get(cv2.CAP_PROP_POS_FRAMES))
|
||||
# Handle looping bounds
|
||||
if new_frame >= self.total_frames:
|
||||
# Loop to beginning
|
||||
new_frame = 0
|
||||
|
||||
# Update current frame and load it
|
||||
self.current_frame = new_frame
|
||||
self.update_watch_tracking()
|
||||
|
||||
return True
|
||||
return self.load_current_frame()
|
||||
|
||||
def seek_video(self, frames_delta: int):
|
||||
"""Seek video by specified number of frames"""
|
||||
@@ -952,7 +991,7 @@ class MediaGrader:
|
||||
0, min(self.current_frame + frames_delta, self.total_frames - 1)
|
||||
)
|
||||
|
||||
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
||||
self.current_frame = target_frame
|
||||
self.load_current_frame()
|
||||
|
||||
def process_seek_key(self, key: int) -> bool:
|
||||
@@ -1165,32 +1204,42 @@ class MediaGrader:
|
||||
cv2.setWindowTitle("Media Grader", window_title)
|
||||
|
||||
while True:
|
||||
# Update display
|
||||
self.display_current_frame()
|
||||
|
||||
if self.is_video(current_file):
|
||||
if self.is_seeking:
|
||||
delay = self.FRAME_RENDER_TIME_MS
|
||||
else:
|
||||
delay = self.calculate_frame_delay()
|
||||
# Calculate appropriate delay based on playback state
|
||||
if self.is_playing and self.is_video(current_file):
|
||||
# Use calculated frame delay for proper playback speed
|
||||
delay_ms = self.calculate_frame_delay()
|
||||
else:
|
||||
delay = self.IMAGE_DISPLAY_DELAY_MS
|
||||
# Use minimal delay for immediate responsiveness when not playing
|
||||
delay_ms = 1
|
||||
|
||||
key = cv2.waitKey(delay) & 0xFF
|
||||
# Auto advance frame when playing (videos only)
|
||||
if self.is_playing and self.is_video(current_file):
|
||||
self.advance_frame()
|
||||
|
||||
# Key capture with appropriate delay
|
||||
key = cv2.waitKey(delay_ms) & 0xFF
|
||||
|
||||
if key == ord("q") or key == 27:
|
||||
return
|
||||
elif key == ord(" "):
|
||||
self.is_playing = not self.is_playing
|
||||
elif key == ord("s"):
|
||||
self.playback_speed = max(
|
||||
self.MIN_PLAYBACK_SPEED,
|
||||
self.playback_speed - self.SPEED_INCREMENT,
|
||||
)
|
||||
# Speed control only for videos
|
||||
if self.is_video(current_file):
|
||||
self.playback_speed = max(
|
||||
self.MIN_PLAYBACK_SPEED,
|
||||
self.playback_speed - self.SPEED_INCREMENT,
|
||||
)
|
||||
elif key == ord("w"):
|
||||
self.playback_speed = min(
|
||||
self.MAX_PLAYBACK_SPEED,
|
||||
self.playback_speed + self.SPEED_INCREMENT,
|
||||
)
|
||||
# Speed control only for videos
|
||||
if self.is_video(current_file):
|
||||
self.playback_speed = min(
|
||||
self.MAX_PLAYBACK_SPEED,
|
||||
self.playback_speed + self.SPEED_INCREMENT,
|
||||
)
|
||||
elif self.process_seek_key(key):
|
||||
continue
|
||||
elif key == ord("n"):
|
||||
@@ -1229,17 +1278,6 @@ class MediaGrader:
|
||||
if self.is_seeking and self.current_seek_key is not None:
|
||||
self.process_seek_key(self.current_seek_key)
|
||||
|
||||
if (
|
||||
self.is_playing
|
||||
and self.is_video(current_file)
|
||||
and not self.is_seeking
|
||||
):
|
||||
if not self.advance_frame():
|
||||
# Video reached the end, restart it instead of navigating
|
||||
self.current_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
||||
self.current_frame = 0
|
||||
self.load_current_frame()
|
||||
|
||||
if key not in [ord("p"), ord("u"), ord("1"), ord("2"), ord("3"), ord("4"), ord("5")]:
|
||||
print("Navigating to (pu12345): ", self.current_index)
|
||||
self.current_index += 1
|
||||
|
||||
57
uv.lock
generated
57
uv.lock
generated
@@ -15,12 +15,14 @@ source = { virtual = "croppa" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "pillow" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "numpy", specifier = ">=1.24.0" },
|
||||
{ name = "opencv-python", specifier = ">=4.8.0" },
|
||||
{ name = "pillow", specifier = ">=10.0.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -85,6 +87,61 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/80/eb88edc2e2b11cd2dd2e56f1c80b5784d11d6e6b7f04a1145df64df40065/opencv_python-4.12.0.88-cp37-abi3-win_amd64.whl", hash = "sha256:d98edb20aa932fd8ebd276a72627dad9dc097695b3d435a4257557bbb49a79d2", size = 39000307, upload-time = "2025-07-07T09:14:16.641Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "11.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.12"
|
||||
|
||||
Reference in New Issue
Block a user