Enhance template matching in VideoEditor by applying motion tracking offsets
This commit updates the template matching logic in the VideoEditor to apply motion tracking offsets to the current frame before performing template matching. The new method, _apply_motion_tracking_offset, creates an offset frame based on the base position, improving tracking accuracy. This change simplifies the process by removing the previous cropping logic and ensures that template matching works effectively on the adjusted frame, enhancing overall performance during video editing sessions.
This commit is contained in:
@@ -1683,26 +1683,16 @@ class VideoEditor:
|
||||
template_offset = None
|
||||
if self.template_matching_enabled and self.tracking_template is not None:
|
||||
if self.current_display_frame is not None:
|
||||
# Use only the cropped region for much faster template matching
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
# Extract only the cropped region from raw frame
|
||||
cropped_frame = self.current_display_frame[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
|
||||
if cropped_frame is not None and cropped_frame.size > 0:
|
||||
# Track template in cropped frame (much faster!)
|
||||
result = self.track_template(cropped_frame)
|
||||
if result:
|
||||
center_x, center_y, confidence = result
|
||||
# Map from cropped frame coordinates to raw frame coordinates
|
||||
# Add crop offset back
|
||||
raw_x = center_x + crop_x
|
||||
raw_y = center_y + crop_y
|
||||
template_offset = (raw_x, raw_y)
|
||||
else:
|
||||
# No crop - use full frame
|
||||
result = self.track_template(self.current_display_frame)
|
||||
# Apply motion tracking offset to the frame before template matching
|
||||
# This ensures template matching works on the offset frame
|
||||
offset_frame = self._apply_motion_tracking_offset(self.current_display_frame, base_pos)
|
||||
if offset_frame is not None:
|
||||
result = self.track_template(offset_frame)
|
||||
if result:
|
||||
center_x, center_y, confidence = result
|
||||
print(f"DEBUG: Template match found at ({center_x}, {center_y}) with confidence {confidence:.2f}")
|
||||
|
||||
# Template matching returns coordinates in offset frame space
|
||||
template_offset = (center_x, center_y)
|
||||
|
||||
# Calculate offset from feature tracking if enabled
|
||||
@@ -1791,30 +1781,62 @@ class VideoEditor:
|
||||
return (x1 + t * (x2 - x1), y1 + t * (y2 - y1))
|
||||
return None
|
||||
|
||||
def _apply_motion_tracking_offset(self, frame, base_pos):
|
||||
"""Apply motion tracking offset to frame for template matching"""
|
||||
if base_pos is None:
|
||||
return frame
|
||||
|
||||
try:
|
||||
# Get the motion tracking offset
|
||||
offset_x, offset_y = base_pos
|
||||
|
||||
# Create offset frame by shifting the content
|
||||
h, w = frame.shape[:2]
|
||||
offset_frame = np.zeros_like(frame)
|
||||
|
||||
# Calculate the shift
|
||||
shift_x = int(offset_x)
|
||||
shift_y = int(offset_y)
|
||||
|
||||
# Apply the offset
|
||||
if shift_x != 0 or shift_y != 0:
|
||||
# Calculate source and destination regions
|
||||
src_x1 = max(0, -shift_x)
|
||||
src_y1 = max(0, -shift_y)
|
||||
src_x2 = min(w, w - shift_x)
|
||||
src_y2 = min(h, h - shift_y)
|
||||
|
||||
dst_x1 = max(0, shift_x)
|
||||
dst_y1 = max(0, shift_y)
|
||||
dst_x2 = min(w, w + shift_x)
|
||||
dst_y2 = min(h, h + shift_y)
|
||||
|
||||
if src_x2 > src_x1 and src_y2 > src_y1 and dst_x2 > dst_x1 and dst_y2 > dst_y1:
|
||||
offset_frame[dst_y1:dst_y2, dst_x1:dst_x2] = frame[src_y1:src_y2, src_x1:src_x2]
|
||||
else:
|
||||
offset_frame = frame.copy()
|
||||
else:
|
||||
offset_frame = frame.copy()
|
||||
|
||||
return offset_frame
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error applying motion tracking offset: {e}")
|
||||
return frame
|
||||
|
||||
def _get_template_matching_position(self, frame_number):
|
||||
"""Get template matching position and confidence for a frame"""
|
||||
if not self.template_matching_enabled or self.tracking_template is None:
|
||||
return None
|
||||
|
||||
if self.current_display_frame is not None:
|
||||
# Use only the cropped region for much faster template matching
|
||||
if self.crop_rect:
|
||||
crop_x, crop_y, crop_w, crop_h = self.crop_rect
|
||||
# Extract only the cropped region from raw frame
|
||||
cropped_frame = self.current_display_frame[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
|
||||
if cropped_frame is not None and cropped_frame.size > 0:
|
||||
# Track template in cropped frame (much faster!)
|
||||
result = self.track_template(cropped_frame)
|
||||
if result:
|
||||
center_x, center_y, confidence = result
|
||||
# Map from cropped frame coordinates to raw frame coordinates
|
||||
# Add crop offset back
|
||||
raw_x = center_x + crop_x
|
||||
raw_y = center_y + crop_y
|
||||
return (raw_x, raw_y, confidence)
|
||||
else:
|
||||
# No crop - use full frame
|
||||
result = self.track_template(self.current_display_frame)
|
||||
# Get base position for motion tracking offset
|
||||
base_pos = self._get_manual_tracking_position(frame_number)
|
||||
|
||||
# Apply motion tracking offset to the frame before template matching
|
||||
offset_frame = self._apply_motion_tracking_offset(self.current_display_frame, base_pos)
|
||||
if offset_frame is not None:
|
||||
result = self.track_template(offset_frame)
|
||||
if result:
|
||||
center_x, center_y, confidence = result
|
||||
return (center_x, center_y, confidence)
|
||||
|
Reference in New Issue
Block a user