fix(docker): enhance container status aggregation to include restarting and exited states
This commit is contained in:
@@ -96,7 +96,11 @@ class GetContainersStatus
|
|||||||
}
|
}
|
||||||
$containerStatus = data_get($container, 'State.Status');
|
$containerStatus = data_get($container, 'State.Status');
|
||||||
$containerHealth = data_get($container, 'State.Health.Status', 'unhealthy');
|
$containerHealth = data_get($container, 'State.Health.Status', 'unhealthy');
|
||||||
$containerStatus = "$containerStatus ($containerHealth)";
|
if ($containerStatus === 'restarting') {
|
||||||
|
$containerStatus = "restarting ($containerHealth)";
|
||||||
|
} else {
|
||||||
|
$containerStatus = "$containerStatus ($containerHealth)";
|
||||||
|
}
|
||||||
$labels = Arr::undot(format_docker_labels_to_json($labels));
|
$labels = Arr::undot(format_docker_labels_to_json($labels));
|
||||||
$applicationId = data_get($labels, 'coolify.applicationId');
|
$applicationId = data_get($labels, 'coolify.applicationId');
|
||||||
if ($applicationId) {
|
if ($applicationId) {
|
||||||
@@ -386,19 +390,33 @@ class GetContainersStatus
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate status: if any container is running, app is running
|
|
||||||
$hasRunning = false;
|
$hasRunning = false;
|
||||||
|
$hasRestarting = false;
|
||||||
$hasUnhealthy = false;
|
$hasUnhealthy = false;
|
||||||
|
$hasExited = false;
|
||||||
|
|
||||||
foreach ($relevantStatuses as $status) {
|
foreach ($relevantStatuses as $status) {
|
||||||
if (str($status)->contains('running')) {
|
if (str($status)->contains('restarting')) {
|
||||||
|
$hasRestarting = true;
|
||||||
|
} elseif (str($status)->contains('running')) {
|
||||||
$hasRunning = true;
|
$hasRunning = true;
|
||||||
if (str($status)->contains('unhealthy')) {
|
if (str($status)->contains('unhealthy')) {
|
||||||
$hasUnhealthy = true;
|
$hasUnhealthy = true;
|
||||||
}
|
}
|
||||||
|
} elseif (str($status)->contains('exited')) {
|
||||||
|
$hasExited = true;
|
||||||
|
$hasUnhealthy = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ($hasRestarting) {
|
||||||
|
return 'degraded (unhealthy)';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($hasRunning && $hasExited) {
|
||||||
|
return 'degraded (unhealthy)';
|
||||||
|
}
|
||||||
|
|
||||||
if ($hasRunning) {
|
if ($hasRunning) {
|
||||||
return $hasUnhealthy ? 'running (unhealthy)' : 'running (healthy)';
|
return $hasUnhealthy ? 'running (unhealthy)' : 'running (healthy)';
|
||||||
}
|
}
|
||||||
|
@@ -26,22 +26,22 @@ class ComplexStatusCheck
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
$container = instant_remote_process(["docker container inspect $(docker container ls -q --filter 'label=coolify.applicationId={$application->id}' --filter 'label=coolify.pullRequestId=0') --format '{{json .}}'"], $server, false);
|
$containers = instant_remote_process(["docker container inspect $(docker container ls -q --filter 'label=coolify.applicationId={$application->id}' --filter 'label=coolify.pullRequestId=0') --format '{{json .}}'"], $server, false);
|
||||||
$container = format_docker_command_output_to_json($container);
|
$containers = format_docker_command_output_to_json($containers);
|
||||||
if ($container->count() === 1) {
|
|
||||||
$container = $container->first();
|
if ($containers->count() > 0) {
|
||||||
$containerStatus = data_get($container, 'State.Status');
|
$statusToSet = $this->aggregateContainerStatuses($application, $containers);
|
||||||
$containerHealth = data_get($container, 'State.Health.Status', 'unhealthy');
|
|
||||||
if ($is_main_server) {
|
if ($is_main_server) {
|
||||||
$statusFromDb = $application->status;
|
$statusFromDb = $application->status;
|
||||||
if ($statusFromDb !== $containerStatus) {
|
if ($statusFromDb !== $statusToSet) {
|
||||||
$application->update(['status' => "$containerStatus:$containerHealth"]);
|
$application->update(['status' => $statusToSet]);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
$additional_server = $application->additional_servers()->wherePivot('server_id', $server->id);
|
$additional_server = $application->additional_servers()->wherePivot('server_id', $server->id);
|
||||||
$statusFromDb = $additional_server->first()->pivot->status;
|
$statusFromDb = $additional_server->first()->pivot->status;
|
||||||
if ($statusFromDb !== $containerStatus) {
|
if ($statusFromDb !== $statusToSet) {
|
||||||
$additional_server->updateExistingPivot($server->id, ['status' => "$containerStatus:$containerHealth"]);
|
$additional_server->updateExistingPivot($server->id, ['status' => $statusToSet]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -57,4 +57,78 @@ class ComplexStatusCheck
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private function aggregateContainerStatuses($application, $containers)
|
||||||
|
{
|
||||||
|
$dockerComposeRaw = data_get($application, 'docker_compose_raw');
|
||||||
|
$excludedContainers = collect();
|
||||||
|
|
||||||
|
if ($dockerComposeRaw) {
|
||||||
|
try {
|
||||||
|
$dockerCompose = \Symfony\Component\Yaml\Yaml::parse($dockerComposeRaw);
|
||||||
|
$services = data_get($dockerCompose, 'services', []);
|
||||||
|
|
||||||
|
foreach ($services as $serviceName => $serviceConfig) {
|
||||||
|
$excludeFromHc = data_get($serviceConfig, 'exclude_from_hc', false);
|
||||||
|
$restartPolicy = data_get($serviceConfig, 'restart', 'always');
|
||||||
|
|
||||||
|
if ($excludeFromHc || $restartPolicy === 'no') {
|
||||||
|
$excludedContainers->push($serviceName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (\Exception $e) {
|
||||||
|
// If we can't parse, treat all containers as included
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$hasRunning = false;
|
||||||
|
$hasRestarting = false;
|
||||||
|
$hasUnhealthy = false;
|
||||||
|
$hasExited = false;
|
||||||
|
$relevantContainerCount = 0;
|
||||||
|
|
||||||
|
foreach ($containers as $container) {
|
||||||
|
$labels = data_get($container, 'Config.Labels', []);
|
||||||
|
$serviceName = data_get($labels, 'com.docker.compose.service');
|
||||||
|
|
||||||
|
if ($serviceName && $excludedContainers->contains($serviceName)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
$relevantContainerCount++;
|
||||||
|
$containerStatus = data_get($container, 'State.Status');
|
||||||
|
$containerHealth = data_get($container, 'State.Health.Status', 'unhealthy');
|
||||||
|
|
||||||
|
if ($containerStatus === 'restarting') {
|
||||||
|
$hasRestarting = true;
|
||||||
|
$hasUnhealthy = true;
|
||||||
|
} elseif ($containerStatus === 'running') {
|
||||||
|
$hasRunning = true;
|
||||||
|
if ($containerHealth === 'unhealthy') {
|
||||||
|
$hasUnhealthy = true;
|
||||||
|
}
|
||||||
|
} elseif ($containerStatus === 'exited') {
|
||||||
|
$hasExited = true;
|
||||||
|
$hasUnhealthy = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($relevantContainerCount === 0) {
|
||||||
|
return 'running:healthy';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($hasRestarting) {
|
||||||
|
return 'degraded:unhealthy';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($hasRunning && $hasExited) {
|
||||||
|
return 'degraded:unhealthy';
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($hasRunning) {
|
||||||
|
return $hasUnhealthy ? 'running:unhealthy' : 'running:healthy';
|
||||||
|
}
|
||||||
|
|
||||||
|
return 'exited:unhealthy';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -8360,7 +8360,10 @@
|
|||||||
"is_preview": {
|
"is_preview": {
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"is_buildtime_only": {
|
"is_runtime": {
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"is_buildtime": {
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"is_shared": {
|
"is_shared": {
|
||||||
|
@@ -5411,7 +5411,9 @@ components:
|
|||||||
type: boolean
|
type: boolean
|
||||||
is_preview:
|
is_preview:
|
||||||
type: boolean
|
type: boolean
|
||||||
is_buildtime_only:
|
is_runtime:
|
||||||
|
type: boolean
|
||||||
|
is_buildtime:
|
||||||
type: boolean
|
type: boolean
|
||||||
is_shared:
|
is_shared:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
Reference in New Issue
Block a user