From fb01aed6d5ee03663414501b69a3394ca3491373 Mon Sep 17 00:00:00 2001 From: peaklabs-dev <122374094+peaklabs-dev@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:37:36 +0100 Subject: [PATCH] feat: new global s3 and local backup deletion function --- app/Jobs/DatabaseBackupJob.php | 11 +- bootstrap/helpers/databases.php | 245 +++++++++++++++++++++++++++++++- 2 files changed, 245 insertions(+), 11 deletions(-) diff --git a/app/Jobs/DatabaseBackupJob.php b/app/Jobs/DatabaseBackupJob.php index 6730dceb7..b9be4aaa6 100644 --- a/app/Jobs/DatabaseBackupJob.php +++ b/app/Jobs/DatabaseBackupJob.php @@ -459,14 +459,9 @@ class DatabaseBackupJob implements ShouldBeEncrypted, ShouldQueue private function remove_old_backups(): void { - if ($this->backup->number_of_backups_locally === 0) { - $deletable = $this->backup->executions()->where('status', 'success'); - } else { - $deletable = $this->backup->executions()->where('status', 'success')->skip($this->backup->number_of_backups_locally - 1); - } - foreach ($deletable->get() as $execution) { - delete_backup_locally($execution->filename, $this->server); - $execution->delete(); + deleteOldBackupsLocally($this->backup); + if ($this->backup->save_s3) { + deleteOldBackupsFromS3($this->backup); } } diff --git a/bootstrap/helpers/databases.php b/bootstrap/helpers/databases.php index e12910f82..5c0c944ce 100644 --- a/bootstrap/helpers/databases.php +++ b/bootstrap/helpers/databases.php @@ -1,6 +1,7 @@ 's3', + 'key' => $s3->key, + 'secret' => $s3->secret, + 'region' => $s3->region, + 'bucket' => $s3->bucket, + 'endpoint' => $s3->endpoint, + 'use_path_style_endpoint' => true, + ]); + + // Delete files in bulk + $disk->delete($filenames); +} + +function deleteEmptyBackupFolder($folderPath, Server $server): void +{ + // Properly escape the folder path for shell commands + $escapedPath = escapeshellarg($folderPath); + $escapedParentPath = escapeshellarg(dirname($folderPath)); + + // Check if current folder is empty + $checkEmpty = instant_remote_process(["[ -d $escapedPath ] && [ -z \"$(ls -A $escapedPath)\" ] && echo 'empty' || echo 'not empty'"], $server, throwError: false); + + if (trim($checkEmpty) === 'empty') { + // Remove the empty folder + instant_remote_process(["rmdir $escapedPath"], $server, throwError: false); + + // Check if parent folder exists and is empty + $checkParentEmpty = instant_remote_process([ + "[ -d $escapedParentPath ] && [ -z \"$(ls -A $escapedParentPath)\" ] && echo 'empty' || echo 'not empty'", + ], $server, throwError: false); + + if (trim($checkParentEmpty) === 'empty') { + // Remove the empty parent folder + instant_remote_process(["rmdir $escapedParentPath"], $server, throwError: false); + } + } +} + +function deleteOldBackupsLocally($backup) +{ + if (! $backup || ! $backup->executions) { + return; + } + + $successfulBackups = $backup->executions() + ->where('status', 'success') + ->orderBy('created_at', 'desc') + ->get(); + + if ($successfulBackups->isEmpty()) { + return; + } + + // Get retention limits + $retentionAmount = $backup->database_backup_retention_amount_locally; + $retentionDays = $backup->database_backup_retention_days_locally; + + if ($retentionAmount === 0 && $retentionDays === 0) { + return; + } + + $backupsToDelete = collect(); + + // Process backups based on retention amount + if ($retentionAmount > 0) { + $backupsToDelete = $backupsToDelete->merge( + $successfulBackups->skip($retentionAmount) + ); + } + + // Process backups based on retention days + if ($retentionDays > 0) { + $oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays); + $oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) { + return $execution->created_at->utc() < $oldestAllowedDate; + }); + $backupsToDelete = $backupsToDelete->merge($oldBackups); + } + + // Get unique backups to delete and chunk them for parallel processing + $backupsToDelete = $backupsToDelete->unique('id'); + + // Keep track of folders to check + $foldersToCheck = collect(); + + // Process deletions in parallel chunks + $backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) { + $executionIds = []; + $filesToDelete = []; + + foreach ($chunk as $execution) { + if ($execution->filename) { + $filesToDelete[] = $execution->filename; + $executionIds[] = $execution->id; + // Add the folder path to check later + $foldersToCheck->push(dirname($execution->filename)); + } + } + + if (! empty($filesToDelete)) { + deleteBackupsLocally($filesToDelete, $backup->server); + + // Bulk delete executions from database + if (! empty($executionIds)) { + $backup->executions()->whereIn('id', $executionIds)->delete(); + } + } + }); + + // Check and clean up empty folders + $foldersToCheck->unique()->each(function ($folder) use ($backup) { + deleteEmptyBackupFolder($folder, $backup->server); + }); +} + +function deleteOldBackupsFromS3($backup) +{ + if (! $backup || ! $backup->executions || ! $backup->s3) { + return; + } + + $successfulBackups = $backup->executions() + ->where('status', 'success') + ->orderBy('created_at', 'desc') + ->get(); + + if ($successfulBackups->isEmpty()) { + return; + } + + // Get retention limits + $retentionAmount = $backup->database_backup_retention_amount_s3; + $retentionDays = $backup->database_backup_retention_days_s3; + $maxStorageGB = $backup->database_backup_retention_max_storage_s3; + + if ($retentionAmount === 0 && $retentionDays === 0 && $maxStorageGB === 0) { + return; + } + + $backupsToDelete = collect(); + + // Process backups based on retention amount + if ($retentionAmount > 0) { + $backupsToDelete = $backupsToDelete->merge( + $successfulBackups->skip($retentionAmount) + ); + } + + // Process backups based on retention days + if ($retentionDays > 0) { + $oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays); + $oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) { + return $execution->created_at->utc() < $oldestAllowedDate; + }); + $backupsToDelete = $backupsToDelete->merge($oldBackups); + } + + // Process backups based on total storage limit + if ($maxStorageGB > 0) { + $maxStorageBytes = $maxStorageGB * 1024 * 1024 * 1024; // Convert GB to bytes + $totalSize = 0; + $backupsOverLimit = collect(); + + foreach ($successfulBackups as $backup) { + $totalSize += (int) $backup->size; + + // If we're over the limit, add this and all older backups to delete list + if ($totalSize > $maxStorageBytes) { + $backupsOverLimit = $successfulBackups->filter(function ($b) use ($backup) { + return $b->created_at->utc() <= $backup->created_at->utc(); + }); + break; + } + } + + $backupsToDelete = $backupsToDelete->merge($backupsOverLimit); + } + + // Get unique backups to delete and chunk them for parallel processing + $backupsToDelete = $backupsToDelete->unique('id'); + + // Keep track of folders to check + $foldersToCheck = collect(); + + // Process deletions in parallel chunks + $backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) { + $executionIds = []; + $filesToDelete = []; + + foreach ($chunk as $execution) { + if ($execution->filename) { + $filesToDelete[] = $execution->filename; + $executionIds[] = $execution->id; + // Add the folder path to check later + $foldersToCheck->push(dirname($execution->filename)); + } + } + + if (! empty($filesToDelete)) { + deleteBackupsS3($filesToDelete, $backup->server, $backup->s3); + + // Update executions to mark S3 backup as deleted + if (! empty($executionIds)) { + $backup->executions() + ->whereIn('id', $executionIds) + ->update(['s3_backup_deleted_at' => now()]); + } + } + }); + + // Check and clean up empty folders + $foldersToCheck->unique()->each(function ($folder) use ($backup) { + deleteEmptyBackupFolder($folder, $backup->server); + }); } function isPublicPortAlreadyUsed(Server $server, int $port, ?string $id = null): bool