diff --git a/bootstrap/helpers/databases.php b/bootstrap/helpers/databases.php index 5c0c944ce..d41ae5582 100644 --- a/bootstrap/helpers/databases.php +++ b/bootstrap/helpers/databases.php @@ -17,17 +17,12 @@ use Visus\Cuid2\Cuid2; function generate_database_name(string $type): string { - $cuid = new Cuid2; - - return $type.'-database-'.$cuid; + return $type.'-database-'.(new Cuid2); } function create_standalone_postgresql($environmentId, $destinationUuid, ?array $otherData = null, string $databaseImage = 'postgres:16-alpine'): StandalonePostgresql { - $destination = StandaloneDocker::where('uuid', $destinationUuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destinationUuid)->firstOrFail(); $database = new StandalonePostgresql; $database->name = generate_database_name('postgresql'); $database->image = $databaseImage; @@ -45,10 +40,7 @@ function create_standalone_postgresql($environmentId, $destinationUuid, ?array $ function create_standalone_redis($environment_id, $destination_uuid, ?array $otherData = null): StandaloneRedis { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneRedis; $database->name = generate_database_name('redis'); $redis_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -79,10 +71,7 @@ function create_standalone_redis($environment_id, $destination_uuid, ?array $oth function create_standalone_mongodb($environment_id, $destination_uuid, ?array $otherData = null): StandaloneMongodb { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneMongodb; $database->name = generate_database_name('mongodb'); $database->mongo_initdb_root_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -96,12 +85,10 @@ function create_standalone_mongodb($environment_id, $destination_uuid, ?array $o return $database; } + function create_standalone_mysql($environment_id, $destination_uuid, ?array $otherData = null): StandaloneMysql { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneMysql; $database->name = generate_database_name('mysql'); $database->mysql_root_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -116,12 +103,10 @@ function create_standalone_mysql($environment_id, $destination_uuid, ?array $oth return $database; } + function create_standalone_mariadb($environment_id, $destination_uuid, ?array $otherData = null): StandaloneMariadb { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneMariadb; $database->name = generate_database_name('mariadb'); $database->mariadb_root_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -129,7 +114,6 @@ function create_standalone_mariadb($environment_id, $destination_uuid, ?array $o $database->environment_id = $environment_id; $database->destination_id = $destination->id; $database->destination_type = $destination->getMorphClass(); - if ($otherData) { $database->fill($otherData); } @@ -137,12 +121,10 @@ function create_standalone_mariadb($environment_id, $destination_uuid, ?array $o return $database; } + function create_standalone_keydb($environment_id, $destination_uuid, ?array $otherData = null): StandaloneKeydb { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneKeydb; $database->name = generate_database_name('keydb'); $database->keydb_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -159,10 +141,7 @@ function create_standalone_keydb($environment_id, $destination_uuid, ?array $oth function create_standalone_dragonfly($environment_id, $destination_uuid, ?array $otherData = null): StandaloneDragonfly { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneDragonfly; $database->name = generate_database_name('dragonfly'); $database->dragonfly_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -176,12 +155,10 @@ function create_standalone_dragonfly($environment_id, $destination_uuid, ?array return $database; } + function create_standalone_clickhouse($environment_id, $destination_uuid, ?array $otherData = null): StandaloneClickhouse { - $destination = StandaloneDocker::where('uuid', $destination_uuid)->first(); - if (! $destination) { - throw new Exception('Destination not found'); - } + $destination = StandaloneDocker::where('uuid', $destination_uuid)->firstOrFail(); $database = new StandaloneClickhouse; $database->name = generate_database_name('clickhouse'); $database->clickhouse_admin_password = \Illuminate\Support\Str::password(length: 64, symbols: false); @@ -201,29 +178,22 @@ function deleteBackupsLocally(string|array|null $filenames, Server $server): voi if (empty($filenames)) { return; } - if (is_string($filenames)) { $filenames = [$filenames]; } - - $quotedFiles = array_map(function ($file) { - return "\"$file\""; - }, $filenames); - + $quotedFiles = array_map(fn ($file) => "\"$file\"", $filenames); instant_remote_process(['rm -f '.implode(' ', $quotedFiles)], $server, throwError: false); } -function deleteBackupsS3(string|array|null $filenames, Server $server, S3Storage $s3): void +function deleteBackupsS3(string|array|null $filenames, S3Storage $s3): void { if (empty($filenames) || ! $s3) { return; } - if (is_string($filenames)) { $filenames = [$filenames]; } - // Initialize S3 client using Laravel's Storage facade $disk = Storage::build([ 'driver' => 's3', 'key' => $s3->key, @@ -232,38 +202,30 @@ function deleteBackupsS3(string|array|null $filenames, Server $server, S3Storage 'bucket' => $s3->bucket, 'endpoint' => $s3->endpoint, 'use_path_style_endpoint' => true, + 'bucket_endpoint' => $s3->isHetzner() || $s3->isDigitalOcean(), + 'aws_url' => $s3->awsUrl(), ]); - // Delete files in bulk $disk->delete($filenames); } function deleteEmptyBackupFolder($folderPath, Server $server): void { - // Properly escape the folder path for shell commands $escapedPath = escapeshellarg($folderPath); $escapedParentPath = escapeshellarg(dirname($folderPath)); - // Check if current folder is empty $checkEmpty = instant_remote_process(["[ -d $escapedPath ] && [ -z \"$(ls -A $escapedPath)\" ] && echo 'empty' || echo 'not empty'"], $server, throwError: false); if (trim($checkEmpty) === 'empty') { - // Remove the empty folder instant_remote_process(["rmdir $escapedPath"], $server, throwError: false); - - // Check if parent folder exists and is empty - $checkParentEmpty = instant_remote_process([ - "[ -d $escapedParentPath ] && [ -z \"$(ls -A $escapedParentPath)\" ] && echo 'empty' || echo 'not empty'", - ], $server, throwError: false); - + $checkParentEmpty = instant_remote_process(["[ -d $escapedParentPath ] && [ -z \"$(ls -A $escapedParentPath)\" ] && echo 'empty' || echo 'not empty'"], $server, throwError: false); if (trim($checkParentEmpty) === 'empty') { - // Remove the empty parent folder instant_remote_process(["rmdir $escapedParentPath"], $server, throwError: false); } } } -function deleteOldBackupsLocally($backup) +function deleteOldBackupsLocally($backup): void { if (! $backup || ! $backup->executions) { return; @@ -278,7 +240,6 @@ function deleteOldBackupsLocally($backup) return; } - // Get retention limits $retentionAmount = $backup->database_backup_retention_amount_locally; $retentionDays = $backup->database_backup_retention_days_locally; @@ -288,29 +249,19 @@ function deleteOldBackupsLocally($backup) $backupsToDelete = collect(); - // Process backups based on retention amount if ($retentionAmount > 0) { - $backupsToDelete = $backupsToDelete->merge( - $successfulBackups->skip($retentionAmount) - ); + $backupsToDelete = $backupsToDelete->merge($successfulBackups->skip($retentionAmount)); } - // Process backups based on retention days if ($retentionDays > 0) { $oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays); - $oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) { - return $execution->created_at->utc() < $oldestAllowedDate; - }); + $oldBackups = $successfulBackups->filter(fn ($execution) => $execution->created_at->utc() < $oldestAllowedDate); $backupsToDelete = $backupsToDelete->merge($oldBackups); } - // Get unique backups to delete and chunk them for parallel processing $backupsToDelete = $backupsToDelete->unique('id'); - - // Keep track of folders to check $foldersToCheck = collect(); - // Process deletions in parallel chunks $backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) { $executionIds = []; $filesToDelete = []; @@ -319,28 +270,22 @@ function deleteOldBackupsLocally($backup) if ($execution->filename) { $filesToDelete[] = $execution->filename; $executionIds[] = $execution->id; - // Add the folder path to check later $foldersToCheck->push(dirname($execution->filename)); } } if (! empty($filesToDelete)) { deleteBackupsLocally($filesToDelete, $backup->server); - - // Bulk delete executions from database if (! empty($executionIds)) { $backup->executions()->whereIn('id', $executionIds)->delete(); } } }); - // Check and clean up empty folders - $foldersToCheck->unique()->each(function ($folder) use ($backup) { - deleteEmptyBackupFolder($folder, $backup->server); - }); + $foldersToCheck->unique()->each(fn ($folder) => deleteEmptyBackupFolder($folder, $backup->server)); } -function deleteOldBackupsFromS3($backup) +function deleteOldBackupsFromS3($backup): void { if (! $backup || ! $backup->executions || ! $backup->s3) { return; @@ -355,7 +300,6 @@ function deleteOldBackupsFromS3($backup) return; } - // Get retention limits $retentionAmount = $backup->database_backup_retention_amount_s3; $retentionDays = $backup->database_backup_retention_days_s3; $maxStorageGB = $backup->database_backup_retention_max_storage_s3; @@ -366,36 +310,25 @@ function deleteOldBackupsFromS3($backup) $backupsToDelete = collect(); - // Process backups based on retention amount if ($retentionAmount > 0) { - $backupsToDelete = $backupsToDelete->merge( - $successfulBackups->skip($retentionAmount) - ); + $backupsToDelete = $backupsToDelete->merge($successfulBackups->skip($retentionAmount)); } - // Process backups based on retention days if ($retentionDays > 0) { $oldestAllowedDate = $successfulBackups->first()->created_at->clone()->utc()->subDays($retentionDays); - $oldBackups = $successfulBackups->filter(function ($execution) use ($oldestAllowedDate) { - return $execution->created_at->utc() < $oldestAllowedDate; - }); + $oldBackups = $successfulBackups->filter(fn ($execution) => $execution->created_at->utc() < $oldestAllowedDate); $backupsToDelete = $backupsToDelete->merge($oldBackups); } - // Process backups based on total storage limit if ($maxStorageGB > 0) { - $maxStorageBytes = $maxStorageGB * 1024 * 1024 * 1024; // Convert GB to bytes + $maxStorageBytes = $maxStorageGB * 1024 * 1024 * 1024; $totalSize = 0; $backupsOverLimit = collect(); foreach ($successfulBackups as $backup) { $totalSize += (int) $backup->size; - - // If we're over the limit, add this and all older backups to delete list if ($totalSize > $maxStorageBytes) { - $backupsOverLimit = $successfulBackups->filter(function ($b) use ($backup) { - return $b->created_at->utc() <= $backup->created_at->utc(); - }); + $backupsOverLimit = $successfulBackups->filter(fn ($b) => $b->created_at->utc() <= $backup->created_at->utc()); break; } } @@ -403,13 +336,9 @@ function deleteOldBackupsFromS3($backup) $backupsToDelete = $backupsToDelete->merge($backupsOverLimit); } - // Get unique backups to delete and chunk them for parallel processing $backupsToDelete = $backupsToDelete->unique('id'); - - // Keep track of folders to check $foldersToCheck = collect(); - // Process deletions in parallel chunks $backupsToDelete->chunk(10)->each(function ($chunk) use ($backup, &$foldersToCheck) { $executionIds = []; $filesToDelete = []; @@ -418,15 +347,12 @@ function deleteOldBackupsFromS3($backup) if ($execution->filename) { $filesToDelete[] = $execution->filename; $executionIds[] = $execution->id; - // Add the folder path to check later $foldersToCheck->push(dirname($execution->filename)); } } if (! empty($filesToDelete)) { deleteBackupsS3($filesToDelete, $backup->server, $backup->s3); - - // Update executions to mark S3 backup as deleted if (! empty($executionIds)) { $backup->executions() ->whereIn('id', $executionIds) @@ -435,10 +361,7 @@ function deleteOldBackupsFromS3($backup) } }); - // Check and clean up empty folders - $foldersToCheck->unique()->each(function ($folder) use ($backup) { - deleteEmptyBackupFolder($folder, $backup->server); - }); + $foldersToCheck->unique()->each(fn ($folder) => deleteEmptyBackupFolder($folder, $backup->server)); } function isPublicPortAlreadyUsed(Server $server, int $port, ?string $id = null): bool