feat: new global s3 and local backup deletion function
This commit is contained in:
@@ -459,14 +459,9 @@ class DatabaseBackupJob implements ShouldBeEncrypted, ShouldQueue
|
|||||||
|
|
||||||
private function remove_old_backups(): void
|
private function remove_old_backups(): void
|
||||||
{
|
{
|
||||||
if ($this->backup->number_of_backups_locally === 0) {
|
deleteOldBackupsLocally($this->backup);
|
||||||
$deletable = $this->backup->executions()->where('status', 'success');
|
if ($this->backup->save_s3) {
|
||||||
} else {
|
deleteOldBackupsFromS3($this->backup);
|
||||||
$deletable = $this->backup->executions()->where('status', 'success')->skip($this->backup->number_of_backups_locally - 1);
|
|
||||||
}
|
|
||||||
foreach ($deletable->get() as $execution) {
|
|
||||||
delete_backup_locally($execution->filename, $this->server);
|
|
||||||
$execution->delete();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
<?php
|
<?php
|
||||||
|
|
||||||
use App\Models\EnvironmentVariable;
|
use App\Models\EnvironmentVariable;
|
||||||
|
use App\Models\S3Storage;
|
||||||
use App\Models\Server;
|
use App\Models\Server;
|
||||||
use App\Models\StandaloneClickhouse;
|
use App\Models\StandaloneClickhouse;
|
||||||
use App\Models\StandaloneDocker;
|
use App\Models\StandaloneDocker;
|
||||||
@@ -11,6 +12,7 @@ use App\Models\StandaloneMongodb;
|
|||||||
use App\Models\StandaloneMysql;
|
use App\Models\StandaloneMysql;
|
||||||
use App\Models\StandalonePostgresql;
|
use App\Models\StandalonePostgresql;
|
||||||
use App\Models\StandaloneRedis;
|
use App\Models\StandaloneRedis;
|
||||||
|
use Illuminate\Support\Facades\Storage;
|
||||||
use Visus\Cuid2\Cuid2;
|
use Visus\Cuid2\Cuid2;
|
||||||
|
|
||||||
function generate_database_name(string $type): string
|
function generate_database_name(string $type): string
|
||||||
@@ -194,12 +196,249 @@ function create_standalone_clickhouse($environment_id, $destination_uuid, ?array
|
|||||||
return $database;
|
return $database;
|
||||||
}
|
}
|
||||||
|
|
||||||
function delete_backup_locally(?string $filename, Server $server): void
|
function deleteBackupsLocally(string|array|null $filenames, Server $server): void
|
||||||
{
|
{
|
||||||
if (empty($filename)) {
|
if (empty($filenames)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
instant_remote_process(["rm -f \"{$filename}\""], $server, throwError: false);
|
|
||||||
|
if (is_string($filenames)) {
|
||||||
|
$filenames = [$filenames];
|
||||||
|
}
|
||||||
|
|
||||||
|
$quotedFiles = array_map(function ($file) {
|
||||||
|
return "\"$file\"";
|
||||||
|
}, $filenames);
|
||||||
|
|
||||||
|
instant_remote_process(['rm -f '.implode(' ', $quotedFiles)], $server, throwError: false);
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteBackupsS3(string|array|null $filenames, Server $server, S3Storage $s3): void
|
||||||
|
{
|
||||||
|
if (empty($filenames) || ! $s3) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_string($filenames)) {
|
||||||
|
$filenames = [$filenames];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize S3 client using Laravel's Storage facade
|
||||||
|
$disk = Storage::build([
|
||||||
|
'driver' => 's3',
|
||||||
|
'key' => $s3->key,
|
||||||
|
'secret' => $s3->secret,
|
||||||
|
'region' => $s3->region,
|
||||||
|
'bucket' => $s3->bucket,
|
||||||
|
'endpoint' => $s3->endpoint,
|
||||||
|
'use_path_style_endpoint' => true,
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Delete files in bulk
|
||||||
|
$disk->delete($filenames);
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteEmptyBackupFolder($folderPath, Server $server): void
|
||||||
|
{
|
||||||
|
// Properly escape the folder path for shell commands
|
||||||
|
$escapedPath = escapeshellarg($folderPath);
|
||||||
|
$escapedParentPath = escapeshellarg(dirname($folderPath));
|
||||||
|
|
||||||
|
// Check if current folder is empty
|
||||||
|
$checkEmpty = instant_remote_process(["[ -d $escapedPath ] && [ -z \"$(ls -A $escapedPath)\" ] && echo 'empty' || echo 'not empty'"], $server, throwError: false);
|
||||||
|
|
||||||
|
if (trim($checkEmpty) === 'empty') {
|
||||||
|
// Remove the empty folder
|
||||||
|
instant_remote_process(["rmdir $escapedPath"], $server, throwError: false);
|
||||||
|
|
||||||
|
// Check if parent folder exists and is empty
|
||||||
|
$checkParentEmpty = instant_remote_process([
|
||||||
|
"[ -d $escapedParentPath ] && [ -z \"$(ls -A $escapedParentPath)\" ] && echo 'empty' || echo 'not empty'",
|
||||||
|
], $server, throwError: false);
|
||||||
|
|
||||||
|
if (trim($checkParentEmpty) === 'empty') {
|
||||||
|
// Remove the empty parent folder
|
||||||
|
instant_remote_process(["rmdir $escapedParentPath"], $server, throwError: false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteOldBackupsLocally($backup)
|
||||||
|
{
|
||||||
|
if (! $backup || ! $backup->executions) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$successfulBackups = $backup->executions()
|
||||||
|
->where('status', 'success')
|
||||||
|
->orderBy('created_at', 'desc')
|
||||||
|
->get();
|
||||||
|
|
||||||
|
if ($successfulBackups->isEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retention limits
|
||||||
|
$retentionAmount = $backup->database_backup_retention_amount_locally;
|
||||||
|
$retentionDays = $backup->database_backup_retention_days_locally;
|
||||||
|
|
||||||
|
if ($retentionAmount === 0 && $retentionDays === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$backupsToDelete = collect();
|
||||||
|
|
||||||
|
// Process backups based on retention amount
|
||||||
|
if ($retentionAmount > 0) {
|
||||||
|
$backupsToDelete = $backupsToDelete->merge(
|
||||||
|
$successfulBackups->skip($retentionAmount)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process backups based on retention days
|
||||||
|
if ($retentionDays > 0) {
|
||||||
|
$oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays);
|
||||||
|
$oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) {
|
||||||
|
return $execution->created_at->utc() < $oldestAllowedDate;
|
||||||
|
});
|
||||||
|
$backupsToDelete = $backupsToDelete->merge($oldBackups);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get unique backups to delete and chunk them for parallel processing
|
||||||
|
$backupsToDelete = $backupsToDelete->unique('id');
|
||||||
|
|
||||||
|
// Keep track of folders to check
|
||||||
|
$foldersToCheck = collect();
|
||||||
|
|
||||||
|
// Process deletions in parallel chunks
|
||||||
|
$backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) {
|
||||||
|
$executionIds = [];
|
||||||
|
$filesToDelete = [];
|
||||||
|
|
||||||
|
foreach ($chunk as $execution) {
|
||||||
|
if ($execution->filename) {
|
||||||
|
$filesToDelete[] = $execution->filename;
|
||||||
|
$executionIds[] = $execution->id;
|
||||||
|
// Add the folder path to check later
|
||||||
|
$foldersToCheck->push(dirname($execution->filename));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! empty($filesToDelete)) {
|
||||||
|
deleteBackupsLocally($filesToDelete, $backup->server);
|
||||||
|
|
||||||
|
// Bulk delete executions from database
|
||||||
|
if (! empty($executionIds)) {
|
||||||
|
$backup->executions()->whereIn('id', $executionIds)->delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check and clean up empty folders
|
||||||
|
$foldersToCheck->unique()->each(function ($folder) use ($backup) {
|
||||||
|
deleteEmptyBackupFolder($folder, $backup->server);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteOldBackupsFromS3($backup)
|
||||||
|
{
|
||||||
|
if (! $backup || ! $backup->executions || ! $backup->s3) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$successfulBackups = $backup->executions()
|
||||||
|
->where('status', 'success')
|
||||||
|
->orderBy('created_at', 'desc')
|
||||||
|
->get();
|
||||||
|
|
||||||
|
if ($successfulBackups->isEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retention limits
|
||||||
|
$retentionAmount = $backup->database_backup_retention_amount_s3;
|
||||||
|
$retentionDays = $backup->database_backup_retention_days_s3;
|
||||||
|
$maxStorageGB = $backup->database_backup_retention_max_storage_s3;
|
||||||
|
|
||||||
|
if ($retentionAmount === 0 && $retentionDays === 0 && $maxStorageGB === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$backupsToDelete = collect();
|
||||||
|
|
||||||
|
// Process backups based on retention amount
|
||||||
|
if ($retentionAmount > 0) {
|
||||||
|
$backupsToDelete = $backupsToDelete->merge(
|
||||||
|
$successfulBackups->skip($retentionAmount)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process backups based on retention days
|
||||||
|
if ($retentionDays > 0) {
|
||||||
|
$oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays);
|
||||||
|
$oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) {
|
||||||
|
return $execution->created_at->utc() < $oldestAllowedDate;
|
||||||
|
});
|
||||||
|
$backupsToDelete = $backupsToDelete->merge($oldBackups);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process backups based on total storage limit
|
||||||
|
if ($maxStorageGB > 0) {
|
||||||
|
$maxStorageBytes = $maxStorageGB * 1024 * 1024 * 1024; // Convert GB to bytes
|
||||||
|
$totalSize = 0;
|
||||||
|
$backupsOverLimit = collect();
|
||||||
|
|
||||||
|
foreach ($successfulBackups as $backup) {
|
||||||
|
$totalSize += (int) $backup->size;
|
||||||
|
|
||||||
|
// If we're over the limit, add this and all older backups to delete list
|
||||||
|
if ($totalSize > $maxStorageBytes) {
|
||||||
|
$backupsOverLimit = $successfulBackups->filter(function ($b) use ($backup) {
|
||||||
|
return $b->created_at->utc() <= $backup->created_at->utc();
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$backupsToDelete = $backupsToDelete->merge($backupsOverLimit);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get unique backups to delete and chunk them for parallel processing
|
||||||
|
$backupsToDelete = $backupsToDelete->unique('id');
|
||||||
|
|
||||||
|
// Keep track of folders to check
|
||||||
|
$foldersToCheck = collect();
|
||||||
|
|
||||||
|
// Process deletions in parallel chunks
|
||||||
|
$backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) {
|
||||||
|
$executionIds = [];
|
||||||
|
$filesToDelete = [];
|
||||||
|
|
||||||
|
foreach ($chunk as $execution) {
|
||||||
|
if ($execution->filename) {
|
||||||
|
$filesToDelete[] = $execution->filename;
|
||||||
|
$executionIds[] = $execution->id;
|
||||||
|
// Add the folder path to check later
|
||||||
|
$foldersToCheck->push(dirname($execution->filename));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (! empty($filesToDelete)) {
|
||||||
|
deleteBackupsS3($filesToDelete, $backup->server, $backup->s3);
|
||||||
|
|
||||||
|
// Update executions to mark S3 backup as deleted
|
||||||
|
if (! empty($executionIds)) {
|
||||||
|
$backup->executions()
|
||||||
|
->whereIn('id', $executionIds)
|
||||||
|
->update(['s3_backup_deleted_at' => now()]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check and clean up empty folders
|
||||||
|
$foldersToCheck->unique()->each(function ($folder) use ($backup) {
|
||||||
|
deleteEmptyBackupFolder($folder, $backup->server);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function isPublicPortAlreadyUsed(Server $server, int $port, ?string $id = null): bool
|
function isPublicPortAlreadyUsed(Server $server, int $port, ?string $id = null): bool
|
||||||
|
Reference in New Issue
Block a user