pr feedback

pull/767/head
Frostebite 2025-12-29 17:14:31 +00:00
parent 775395d4d3
commit 6d42b8f6f2
5 changed files with 32 additions and 5 deletions

View File

@ -205,6 +205,11 @@ jobs:
# Remove all stopped containers and unused images
docker exec k3d-unity-builder-agent-0 sh -c "docker container prune -f 2>/dev/null || true" || true
docker exec k3d-unity-builder-agent-0 sh -c "docker image prune -af 2>/dev/null || true" || true
# Remove all unused images (including those with tags) to free more space
docker exec k3d-unity-builder-agent-0 sh -c "docker images --format '{{.ID}}' | xargs -r docker rmi -f 2>/dev/null || true" || true
# Clean up k3s containerd data
docker exec k3d-unity-builder-agent-0 sh -c "rm -rf /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/*/fs 2>/dev/null || true" || true
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s -type f -name '*.log' -delete 2>/dev/null || true" || true
# Clean up host docker
docker system prune -af --volumes || true
sleep 5
@ -416,6 +421,11 @@ jobs:
# Remove all stopped containers and unused images
docker exec k3d-unity-builder-agent-0 sh -c "docker container prune -f 2>/dev/null || true" || true
docker exec k3d-unity-builder-agent-0 sh -c "docker image prune -af 2>/dev/null || true" || true
# Remove all unused images (including those with tags) to free more space
docker exec k3d-unity-builder-agent-0 sh -c "docker images --format '{{.ID}}' | xargs -r docker rmi -f 2>/dev/null || true" || true
# Clean up k3s containerd data
docker exec k3d-unity-builder-agent-0 sh -c "rm -rf /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/*/fs 2>/dev/null || true" || true
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s -type f -name '*.log' -delete 2>/dev/null || true" || true
# Clean up host docker
docker system prune -af --volumes || true
sleep 5

5
dist/index.js vendored
View File

@ -4019,10 +4019,11 @@ class KubernetesJobSpecFactory {
};
}
// Set ephemeral-storage request to a reasonable value to prevent evictions
// For tests, use smaller request (1Gi) since k3d nodes have limited disk space (~2.8GB available)
// For tests, use much smaller request (512Mi) since k3d nodes have very limited disk space (~2.8GB available, often 96%+ used)
// For production, use 2Gi to allow for larger builds
// The node needs some free space headroom, so requesting too much causes evictions
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '1Gi' : '2Gi';
// With node at 96% usage, we need to be very conservative with requests
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '512Mi' : '2Gi';
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = ephemeralStorageRequest;
return job;
}

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -159,10 +159,11 @@ class KubernetesJobSpecFactory {
}
// Set ephemeral-storage request to a reasonable value to prevent evictions
// For tests, use smaller request (1Gi) since k3d nodes have limited disk space (~2.8GB available)
// For tests, use much smaller request (512Mi) since k3d nodes have very limited disk space (~2.8GB available, often 96%+ used)
// For production, use 2Gi to allow for larger builds
// The node needs some free space headroom, so requesting too much causes evictions
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '1Gi' : '2Gi';
// With node at 96% usage, we need to be very conservative with requests
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '512Mi' : '2Gi';
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = ephemeralStorageRequest;
return job;

View File

@ -50,14 +50,29 @@ describe('Cloud Runner Kubernetes', () => {
const fallbackLogsUnavailableMessage =
'Pod logs unavailable - pod may have been terminated before logs could be collected.';
const incompleteLogsMessage =
'Pod logs incomplete - "Collected Logs" marker not found. Pod may have been terminated before post-build completed.';
// If we hit the aggressive fallback path and couldn't retrieve any logs from the pod,
// don't assert on specific Unity log contents just assert that we got the fallback message.
// This makes the test resilient to cluster-level evictions / PreStop hook failures while still
// ensuring Cloud Runner surfaces a useful message in BuildResults.
// However, if we got logs but they're incomplete (missing "Collected Logs"), the test should fail
// as this indicates the build didn't complete successfully (pod was evicted/killed).
if (results.includes(fallbackLogsUnavailableMessage)) {
// Complete failure - no logs at all (acceptable for eviction scenarios)
expect(results).toContain(fallbackLogsUnavailableMessage);
CloudRunnerLogger.log('Test passed with fallback message (pod was evicted before any logs were written)');
} else if (results.includes(incompleteLogsMessage)) {
// Incomplete logs - we got some output but missing "Collected Logs" (build didn't complete)
// This should fail the test as the build didn't succeed
throw new Error(
`Build did not complete successfully: ${incompleteLogsMessage}\n` +
`This indicates the pod was evicted or killed before post-build completed.\n` +
`Build results:\n${results.substring(0, 500)}`,
);
} else {
// Normal case - logs are complete
expect(results).toContain('Collected Logs');
expect(results).toContain(libraryString);
expect(results).toContain(buildSucceededString);