From dd345d5ac7325636bb4880cf37fbfc42309a8295 Mon Sep 17 00:00:00 2001 From: Andras Bacsai <5845193+andrasbacsai@users.noreply.github.com> Date: Fri, 28 Mar 2025 22:45:09 +0100 Subject: [PATCH 1/5] chore(versions): update version numbers for coolify and nightly --- config/constants.php | 2 +- other/nightly/versions.json | 4 ++-- versions.json | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config/constants.php b/config/constants.php index c3f177f67..c674fe3c7 100644 --- a/config/constants.php +++ b/config/constants.php @@ -2,7 +2,7 @@ return [ 'coolify' => [ - 'version' => '4.0.0-beta.401', + 'version' => '4.0.0-beta.402', 'helper_version' => '1.0.7', 'realtime_version' => '1.0.6', 'self_hosted' => env('SELF_HOSTED', true), diff --git a/other/nightly/versions.json b/other/nightly/versions.json index 03c56756d..1ad2d31cf 100644 --- a/other/nightly/versions.json +++ b/other/nightly/versions.json @@ -1,10 +1,10 @@ { "coolify": { "v4": { - "version": "4.0.0-beta.401" + "version": "4.0.0-beta.402" }, "nightly": { - "version": "4.0.0-beta.402" + "version": "4.0.0-beta.403" }, "helper": { "version": "1.0.7" diff --git a/versions.json b/versions.json index 03c56756d..1ad2d31cf 100644 --- a/versions.json +++ b/versions.json @@ -1,10 +1,10 @@ { "coolify": { "v4": { - "version": "4.0.0-beta.401" + "version": "4.0.0-beta.402" }, "nightly": { - "version": "4.0.0-beta.402" + "version": "4.0.0-beta.403" }, "helper": { "version": "1.0.7" From 34e1587068001f6cbbbc30e8d319287f9a103782 Mon Sep 17 00:00:00 2001 From: Andras Bacsai <5845193+andrasbacsai@users.noreply.github.com> Date: Fri, 28 Mar 2025 22:45:12 +0100 Subject: [PATCH 2/5] refactor(database-backup): remove existing Docker container before backup upload --- app/Jobs/DatabaseBackupJob.php | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/app/Jobs/DatabaseBackupJob.php b/app/Jobs/DatabaseBackupJob.php index 6070ad16a..3276711c5 100644 --- a/app/Jobs/DatabaseBackupJob.php +++ b/app/Jobs/DatabaseBackupJob.php @@ -484,6 +484,11 @@ class DatabaseBackupJob implements ShouldBeEncrypted, ShouldQueue $fullImageName = $this->getFullImageName(); + $containerExists = instant_remote_process(["docker ps -a -q -f name=backup-of-{$this->backup->uuid}"], $this->server, false); + if (filled($containerExists)) { + instant_remote_process(["docker rm -f backup-of-{$this->backup->uuid}"], $this->server, false); + } + if (isDev()) { if ($this->database->name === 'coolify-db') { $backup_location_from = '/var/lib/docker/volumes/coolify_dev_backups_data/_data/coolify/coolify-db-'.$this->server->ip.$this->backup_file; From b376d6df2a58b8057cfb78b2e5bf8d3ab4af01ab Mon Sep 17 00:00:00 2001 From: Andras Bacsai <5845193+andrasbacsai@users.noreply.github.com> Date: Sat, 29 Mar 2025 22:16:12 +0100 Subject: [PATCH 3/5] reverting: encrypting mount and fs_path --- app/Models/LocalFileVolume.php | 4 +- bootstrap/helpers/shared.php | 72 +++++++------- ...00_revert_some_local_volume_encryption.php | 96 +++++++++++++++++++ 3 files changed, 137 insertions(+), 35 deletions(-) create mode 100644 database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php diff --git a/app/Models/LocalFileVolume.php b/app/Models/LocalFileVolume.php index 1a5430c5b..c56cd7694 100644 --- a/app/Models/LocalFileVolume.php +++ b/app/Models/LocalFileVolume.php @@ -9,8 +9,8 @@ use Illuminate\Database\Eloquent\Factories\HasFactory; class LocalFileVolume extends BaseModel { protected $casts = [ - 'fs_path' => 'encrypted', - 'mount_path' => 'encrypted', + // 'fs_path' => 'encrypted', + // 'mount_path' => 'encrypted', 'content' => 'encrypted', 'is_directory' => 'boolean', ]; diff --git a/bootstrap/helpers/shared.php b/bootstrap/helpers/shared.php index 218ca1b37..a020e7558 100644 --- a/bootstrap/helpers/shared.php +++ b/bootstrap/helpers/shared.php @@ -1363,15 +1363,21 @@ function parseServiceVolumes($serviceVolumes, $resource, $topLevelVolumes, $pull $source = $source."-pr-$pull_request_id"; } if (! $resource?->settings?->is_preserve_repository_enabled || $foundConfig?->is_based_on_git) { - $volume = LocalFileVolume::wherePlainMountPath($target)->first() ?? new LocalFileVolume; - $volume->fill([ - 'fs_path' => $source, - 'mount_path' => $target, - 'content' => $content, - 'is_directory' => $isDirectory, - 'resource_id' => $resource->id, - 'resource_type' => get_class($resource), - ])->save(); + LocalFileVolume::updateOrCreate( + [ + 'mount_path' => $target, + 'resource_id' => $resource->id, + 'resource_type' => get_class($resource), + ], + [ + 'fs_path' => $source, + 'mount_path' => $target, + 'content' => $content, + 'is_directory' => $isDirectory, + 'resource_id' => $resource->id, + 'resource_type' => get_class($resource), + ] + ); } } elseif ($type->value() === 'volume') { if ($topLevelVolumes->has($source->value())) { @@ -1670,27 +1676,21 @@ function parseDockerComposeFile(Service|Application $resource, bool $isNew = fal return $volume; } - $existingVolume = LocalFileVolume::wherePlainMountPath($target)->first(); - - if ($existingVolume) { - $existingVolume->update([ + LocalFileVolume::updateOrCreate( + [ + 'mount_path' => $target, + 'resource_id' => $savedService->id, + 'resource_type' => get_class($savedService), + ], + [ 'fs_path' => $source, 'mount_path' => $target, 'content' => $content, 'is_directory' => $isDirectory, 'resource_id' => $savedService->id, 'resource_type' => get_class($savedService), - ]); - } else { - LocalFileVolume::create([ - 'fs_path' => $source, - 'mount_path' => $target, - 'content' => $content, - 'is_directory' => $isDirectory, - 'resource_id' => $savedService->id, - 'resource_type' => get_class($savedService), - ]); - } + ] + ); } elseif ($type->value() === 'volume') { if ($topLevelVolumes->has($source->value())) { $v = $topLevelVolumes->get($source->value()); @@ -3328,15 +3328,21 @@ function newParser(Application|Service $resource, int $pull_request_id = 0, ?int if ($isApplication && $isPullRequest) { $source = $source."-pr-$pullRequestId"; } - $volume = LocalFileVolume::wherePlainMountPath($target)->first() ?? new LocalFileVolume; - $volume->fill([ - 'fs_path' => $source, - 'mount_path' => $target, - 'content' => $content, - 'is_directory' => $isDirectory, - 'resource_id' => $originalResource->id, - 'resource_type' => get_class($originalResource), - ])->save(); + LocalFileVolume::updateOrCreate( + [ + 'mount_path' => $target, + 'resource_id' => $originalResource->id, + 'resource_type' => get_class($originalResource), + ], + [ + 'fs_path' => $source, + 'mount_path' => $target, + 'content' => $content, + 'is_directory' => $isDirectory, + 'resource_id' => $originalResource->id, + 'resource_type' => get_class($originalResource), + ] + ); if (isDev()) { if ((int) $resource->compose_parsing_version >= 4) { if ($isApplication) { diff --git a/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php new file mode 100644 index 000000000..683f1be3d --- /dev/null +++ b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php @@ -0,0 +1,96 @@ +exists()) { + DB::table('local_file_volumes') + ->orderBy('id') + ->chunk(100, function ($volumes) { + foreach ($volumes as $volume) { + DB::beginTransaction(); + + try { + $fs_path = $volume->fs_path; + $mount_path = $volume->mount_path; + try { + if ($fs_path) { + $fs_path = Crypt::decryptString($fs_path); + } + } catch (\Exception $e) { + } + + try { + if ($mount_path) { + $mount_path = Crypt::decryptString($mount_path); + } + } catch (\Exception $e) { + } + + DB::table('local_file_volumes')->where('id', $volume->id)->update([ + 'fs_path' => $fs_path, + 'mount_path' => $mount_path, + ]); + echo "Updated volume {$volume->id}\n"; + } catch (\Exception $e) { + echo "Error encrypting local file volume fields: {$e->getMessage()}\n"; + Log::error('Error encrypting local file volume fields: '.$e->getMessage()); + } + DB::commit(); + } + }); + } + } + + /** + * Reverse the migrations. + */ + public function down(): void + { + if (DB::table('local_file_volumes')->exists()) { + DB::table('local_file_volumes') + ->orderBy('id') + ->chunk(100, function ($volumes) { + foreach ($volumes as $volume) { + DB::beginTransaction(); + try { + $fs_path = $volume->fs_path; + $mount_path = $volume->mount_path; + try { + if ($fs_path) { + $fs_path = Crypt::encrypt($fs_path); + } + } catch (\Exception $e) { + } + + try { + if ($mount_path) { + $mount_path = Crypt::encrypt($mount_path); + } + } catch (\Exception $e) { + } + + DB::table('local_file_volumes')->where('id', $volume->id)->update([ + 'fs_path' => $fs_path, + 'mount_path' => $mount_path, + ]); + echo "Updated volume {$volume->id}\n"; + } catch (\Exception $e) { + echo "Error decrypting local file volume fields: {$e->getMessage()}\n"; + Log::error('Error decrypting local file volume fields: '.$e->getMessage()); + } + DB::commit(); + } + }); + } + } +}; From a8018ad2c4928bab68a3e4a2361eb5657f3cc49e Mon Sep 17 00:00:00 2001 From: Andras Bacsai <5845193+andrasbacsai@users.noreply.github.com> Date: Sun, 30 Mar 2025 18:04:09 +0200 Subject: [PATCH 4/5] refactor(database): improve decryption and deduplication of local file volumes --- ...00_revert_some_local_volume_encryption.php | 60 ++++++++++++++++--- 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php index 683f1be3d..13419e82f 100644 --- a/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php +++ b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php @@ -13,15 +13,17 @@ return new class extends Migration public function up(): void { if (DB::table('local_file_volumes')->exists()) { + // First, get all volumes and decrypt their values + $decryptedVolumes = collect(); + DB::table('local_file_volumes') ->orderBy('id') - ->chunk(100, function ($volumes) { + ->chunk(100, function ($volumes) use (&$decryptedVolumes) { foreach ($volumes as $volume) { - DB::beginTransaction(); - try { $fs_path = $volume->fs_path; $mount_path = $volume->mount_path; + try { if ($fs_path) { $fs_path = Crypt::decryptString($fs_path); @@ -36,18 +38,60 @@ return new class extends Migration } catch (\Exception $e) { } - DB::table('local_file_volumes')->where('id', $volume->id)->update([ + $decryptedVolumes->push([ + 'id' => $volume->id, 'fs_path' => $fs_path, 'mount_path' => $mount_path, + 'resource_id' => $volume->resource_id, + 'resource_type' => $volume->resource_type, ]); - echo "Updated volume {$volume->id}\n"; + } catch (\Exception $e) { - echo "Error encrypting local file volume fields: {$e->getMessage()}\n"; - Log::error('Error encrypting local file volume fields: '.$e->getMessage()); + echo "Error decrypting volume {$volume->id}: {$e->getMessage()}\n"; + Log::error("Error decrypting volume {$volume->id}: ".$e->getMessage()); } - DB::commit(); } }); + + // Group by the unique constraint fields and keep only the first occurrence + $uniqueVolumes = $decryptedVolumes->groupBy(function ($volume) { + return $volume['mount_path'].'|'.$volume['resource_id'].'|'.$volume['resource_type']; + })->map(function ($group) { + return $group->first(); + }); + + // Get IDs to delete (all except the ones we're keeping) + $idsToKeep = $uniqueVolumes->pluck('id')->toArray(); + $idsToDelete = $decryptedVolumes->pluck('id')->diff($idsToKeep)->toArray(); + + // Delete duplicate records + if (! empty($idsToDelete)) { + // Show details of volumes being deleted + $volumesToDelete = $decryptedVolumes->whereIn('id', $idsToDelete); + echo "\nVolumes to be deleted:\n"; + foreach ($volumesToDelete as $volume) { + echo "ID: {$volume['id']}, Mount Path: {$volume['mount_path']}, Resource ID: {$volume['resource_id']}, Resource Type: {$volume['resource_type']}\n"; + echo "FS Path: {$volume['fs_path']}\n"; + echo "-------------------\n"; + } + + DB::table('local_file_volumes')->whereIn('id', $idsToDelete)->delete(); + echo 'Deleted '.count($idsToDelete)." duplicate volume(s)\n"; + } + + // Update the remaining records with decrypted values + foreach ($uniqueVolumes as $volume) { + try { + DB::table('local_file_volumes')->where('id', $volume['id'])->update([ + 'fs_path' => $volume['fs_path'], + 'mount_path' => $volume['mount_path'], + ]); + echo "Updated volume {$volume['id']}\n"; + } catch (\Exception $e) { + echo "Error updating volume {$volume['id']}: {$e->getMessage()}\n"; + Log::error("Error updating volume {$volume['id']}: ".$e->getMessage()); + } + } } } From c7591fde15fbef7ef6ce238ff563dbc782fbfc8f Mon Sep 17 00:00:00 2001 From: Andras Bacsai <5845193+andrasbacsai@users.noreply.github.com> Date: Sun, 30 Mar 2025 20:07:56 +0200 Subject: [PATCH 5/5] refactor(database): remove debug output from volume update process --- .../2025_03_29_204400_revert_some_local_volume_encryption.php | 1 - 1 file changed, 1 deletion(-) diff --git a/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php index 13419e82f..fe3e51318 100644 --- a/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php +++ b/database/migrations/2025_03_29_204400_revert_some_local_volume_encryption.php @@ -86,7 +86,6 @@ return new class extends Migration 'fs_path' => $volume['fs_path'], 'mount_path' => $volume['mount_path'], ]); - echo "Updated volume {$volume['id']}\n"; } catch (\Exception $e) { echo "Error updating volume {$volume['id']}: {$e->getMessage()}\n"; Log::error("Error updating volume {$volume['id']}: ".$e->getMessage());