pr feedback - remove ephemeral-storage request for tests
parent
f4d28fa6d2
commit
355551c72e
|
|
@ -167,10 +167,15 @@ jobs:
|
||||||
docker system prune -af --volumes || true
|
docker system prune -af --volumes || true
|
||||||
# Clean up disk space on k3d node to prevent ephemeral-storage evictions and disk pressure
|
# Clean up disk space on k3d node to prevent ephemeral-storage evictions and disk pressure
|
||||||
echo "Cleaning up disk space on k3d node..."
|
echo "Cleaning up disk space on k3d node..."
|
||||||
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
# Use containerd/crictl commands (docker not available in k3d nodes)
|
||||||
# Also clean up old logs and temporary files that might be taking up space
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
||||||
docker exec k3d-unity-builder-agent-0 sh -c "find /var/log -type f -name '*.log' -mtime +1 -delete 2>/dev/null || true" || true
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmp --all 2>/dev/null || true" || true
|
||||||
docker exec k3d-unity-builder-agent-0 sh -c "find /tmp -type f -mtime +1 -delete 2>/dev/null || true" || true
|
# Clean up containerd snapshots and images more aggressively
|
||||||
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s/agent/containerd -type d -name 'snapshots' -exec rm -rf {}/* 2>/dev/null \; || true" || true
|
||||||
|
# Clean up old logs and temporary files
|
||||||
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/log -type f -name '*.log' -delete 2>/dev/null || true" || true
|
||||||
|
docker exec k3d-unity-builder-agent-0 sh -c "find /tmp -type f -delete 2>/dev/null || true" || true
|
||||||
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s -type f -name '*.log' -delete 2>/dev/null || true" || true
|
||||||
docker exec k3d-unity-builder-agent-0 sh -c "df -h" 2>/dev/null || true
|
docker exec k3d-unity-builder-agent-0 sh -c "df -h" 2>/dev/null || true
|
||||||
# Wait for disk pressure taints to clear before proceeding
|
# Wait for disk pressure taints to clear before proceeding
|
||||||
echo "Checking for disk pressure taints..."
|
echo "Checking for disk pressure taints..."
|
||||||
|
|
|
||||||
|
|
@ -4019,12 +4019,16 @@ class KubernetesJobSpecFactory {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// Set ephemeral-storage request to a reasonable value to prevent evictions
|
// Set ephemeral-storage request to a reasonable value to prevent evictions
|
||||||
// For tests, use much smaller request (512Mi) since k3d nodes have very limited disk space (~2.8GB available, often 96%+ used)
|
// For tests, don't set a request (or use minimal 128Mi) since k3d nodes have very limited disk space
|
||||||
|
// Kubernetes will use whatever is available without a request, which is better for constrained environments
|
||||||
// For production, use 2Gi to allow for larger builds
|
// For production, use 2Gi to allow for larger builds
|
||||||
// The node needs some free space headroom, so requesting too much causes evictions
|
// The node needs some free space headroom, so requesting too much causes evictions
|
||||||
// With node at 96% usage, we need to be very conservative with requests
|
// With node at 96% usage and only ~2.7GB free, we can't request much without triggering evictions
|
||||||
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '512Mi' : '2Gi';
|
if (process.env['cloudRunnerTests'] !== 'true') {
|
||||||
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = ephemeralStorageRequest;
|
// Only set ephemeral-storage request for production builds
|
||||||
|
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = '2Gi';
|
||||||
|
}
|
||||||
|
// For tests, don't set ephemeral-storage request - let Kubernetes use available space
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -159,12 +159,16 @@ class KubernetesJobSpecFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set ephemeral-storage request to a reasonable value to prevent evictions
|
// Set ephemeral-storage request to a reasonable value to prevent evictions
|
||||||
// For tests, use much smaller request (512Mi) since k3d nodes have very limited disk space (~2.8GB available, often 96%+ used)
|
// For tests, don't set a request (or use minimal 128Mi) since k3d nodes have very limited disk space
|
||||||
|
// Kubernetes will use whatever is available without a request, which is better for constrained environments
|
||||||
// For production, use 2Gi to allow for larger builds
|
// For production, use 2Gi to allow for larger builds
|
||||||
// The node needs some free space headroom, so requesting too much causes evictions
|
// The node needs some free space headroom, so requesting too much causes evictions
|
||||||
// With node at 96% usage, we need to be very conservative with requests
|
// With node at 96% usage and only ~2.7GB free, we can't request much without triggering evictions
|
||||||
const ephemeralStorageRequest = process.env['cloudRunnerTests'] === 'true' ? '512Mi' : '2Gi';
|
if (process.env['cloudRunnerTests'] !== 'true') {
|
||||||
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = ephemeralStorageRequest;
|
// Only set ephemeral-storage request for production builds
|
||||||
|
job.spec.template.spec.containers[0].resources.requests[`ephemeral-storage`] = '2Gi';
|
||||||
|
}
|
||||||
|
// For tests, don't set ephemeral-storage request - let Kubernetes use available space
|
||||||
|
|
||||||
return job;
|
return job;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue