fix
parent
9aa24e21f1
commit
b2cb6ebb19
|
|
@ -330,6 +330,32 @@ jobs:
|
||||||
containerMemory: '512'
|
containerMemory: '512'
|
||||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
|
- name: Clean up after cloud-runner-image test
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "Cleaning up after cloud-runner-image test..."
|
||||||
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
||||||
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
||||||
|
kubectl delete "$pod" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
||||||
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
||||||
|
kubectl delete "$secret" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
# Aggressive cleanup in k3d nodes, but preserve Unity images
|
||||||
|
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
|
||||||
|
for NODE in $K3D_NODE_CONTAINERS; do
|
||||||
|
# Remove stopped containers
|
||||||
|
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
|
||||||
|
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
|
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
|
||||||
|
# Clean up unused layers
|
||||||
|
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
||||||
|
done || true
|
||||||
|
rm -rf ./cloud-runner-cache/* || true
|
||||||
|
docker system prune -f || true
|
||||||
- name: Run cloud-runner-kubernetes test
|
- name: Run cloud-runner-kubernetes test
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
run: yarn run test "cloud-runner-kubernetes" --detectOpenHandles --forceExit --runInBand
|
run: yarn run test "cloud-runner-kubernetes" --detectOpenHandles --forceExit --runInBand
|
||||||
|
|
@ -356,6 +382,32 @@ jobs:
|
||||||
AWS_EC2_METADATA_DISABLED: 'true'
|
AWS_EC2_METADATA_DISABLED: 'true'
|
||||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
|
- name: Clean up after cloud-runner-kubernetes test
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "Cleaning up after cloud-runner-kubernetes test..."
|
||||||
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
||||||
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
||||||
|
kubectl delete "$pod" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
||||||
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
||||||
|
kubectl delete "$secret" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
# Aggressive cleanup in k3d nodes, but preserve Unity images
|
||||||
|
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
|
||||||
|
for NODE in $K3D_NODE_CONTAINERS; do
|
||||||
|
# Remove stopped containers
|
||||||
|
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
|
||||||
|
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
|
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
|
||||||
|
# Clean up unused layers
|
||||||
|
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
||||||
|
done || true
|
||||||
|
rm -rf ./cloud-runner-cache/* || true
|
||||||
|
docker system prune -f || true
|
||||||
- name: Run cloud-runner-s3-steps test (K8s)
|
- name: Run cloud-runner-s3-steps test (K8s)
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
||||||
|
|
@ -382,6 +434,32 @@ jobs:
|
||||||
AWS_EC2_METADATA_DISABLED: 'true'
|
AWS_EC2_METADATA_DISABLED: 'true'
|
||||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
|
- name: Clean up after cloud-runner-s3-steps test
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "Cleaning up after cloud-runner-s3-steps test..."
|
||||||
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
||||||
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
||||||
|
kubectl delete "$pod" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
||||||
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
||||||
|
kubectl delete "$secret" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
# Aggressive cleanup in k3d nodes, but preserve Unity images
|
||||||
|
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
|
||||||
|
for NODE in $K3D_NODE_CONTAINERS; do
|
||||||
|
# Remove stopped containers
|
||||||
|
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
|
||||||
|
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
|
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
|
||||||
|
# Clean up unused layers
|
||||||
|
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
||||||
|
done || true
|
||||||
|
rm -rf ./cloud-runner-cache/* || true
|
||||||
|
docker system prune -f || true
|
||||||
- name: Run cloud-runner-end2end-caching test (K8s)
|
- name: Run cloud-runner-end2end-caching test (K8s)
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
||||||
|
|
@ -408,6 +486,32 @@ jobs:
|
||||||
AWS_EC2_METADATA_DISABLED: 'true'
|
AWS_EC2_METADATA_DISABLED: 'true'
|
||||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||||
|
- name: Clean up after cloud-runner-end2end-caching test
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
echo "Cleaning up after cloud-runner-end2end-caching test..."
|
||||||
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
||||||
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
||||||
|
kubectl delete "$pod" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
||||||
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
||||||
|
kubectl delete "$secret" --ignore-not-found=true || true
|
||||||
|
done || true
|
||||||
|
# Aggressive cleanup in k3d nodes, but preserve Unity images
|
||||||
|
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
|
||||||
|
for NODE in $K3D_NODE_CONTAINERS; do
|
||||||
|
# Remove stopped containers
|
||||||
|
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
|
||||||
|
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
|
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
|
||||||
|
# Clean up unused layers
|
||||||
|
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
||||||
|
done || true
|
||||||
|
rm -rf ./cloud-runner-cache/* || true
|
||||||
|
docker system prune -f || true
|
||||||
- name: Clean up disk space before end2end-retaining test
|
- name: Clean up disk space before end2end-retaining test
|
||||||
run: |
|
run: |
|
||||||
echo "Cleaning up disk space before end2end-retaining test..."
|
echo "Cleaning up disk space before end2end-retaining test..."
|
||||||
|
|
|
||||||
|
|
@ -3757,28 +3757,29 @@ class Kubernetes {
|
||||||
try {
|
try {
|
||||||
cloud_runner_logger_1.default.log('Cleaning up old images in k3d node before pulling new image...');
|
cloud_runner_logger_1.default.log('Cleaning up old images in k3d node before pulling new image...');
|
||||||
const { CloudRunnerSystem } = await Promise.resolve().then(() => __importStar(__nccwpck_require__(4197)));
|
const { CloudRunnerSystem } = await Promise.resolve().then(() => __importStar(__nccwpck_require__(4197)));
|
||||||
// Extract image name without tag for matching
|
// Aggressive cleanup: remove stopped containers and non-Unity images
|
||||||
const imageName = image.split(':')[0];
|
// IMPORTANT: Preserve Unity images (unityci/editor) to avoid re-pulling the 3.9GB image
|
||||||
const imageTag = image.split(':')[1] || 'latest';
|
const K3D_NODE_CONTAINERS = ['k3d-unity-builder-agent-0', 'k3d-unity-builder-server-0'];
|
||||||
// More targeted cleanup: remove stopped containers only
|
const cleanupCommands = [];
|
||||||
// IMPORTANT: Do NOT remove images - preserve Unity image to avoid re-pulling the 3.9GB image
|
for (const NODE of K3D_NODE_CONTAINERS) {
|
||||||
// Strategy: Only remove containers, never touch images (safest approach)
|
|
||||||
const cleanupCommands = [
|
|
||||||
// Remove all stopped containers (this frees runtime space but keeps images)
|
// Remove all stopped containers (this frees runtime space but keeps images)
|
||||||
'docker exec k3d-unity-builder-agent-0 sh -c "crictl rm --all 2>/dev/null || true" || true',
|
cleanupCommands.push(`docker exec ${NODE} sh -c "crictl rm --all 2>/dev/null || true" || true`);
|
||||||
'docker exec k3d-unity-builder-server-0 sh -c "crictl rm --all 2>/dev/null || true" || true',
|
// Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
// DO NOT remove images - preserve everything including Unity image
|
// This is safe because we explicitly exclude Unity images from deletion
|
||||||
// Removing images risks removing the Unity image which causes "no space left" errors
|
cleanupCommands.push(`docker exec ${NODE} sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true`);
|
||||||
];
|
// Clean up unused layers (prune should preserve referenced images)
|
||||||
|
cleanupCommands.push(`docker exec ${NODE} sh -c "crictl rmi --prune 2>/dev/null || true" || true`);
|
||||||
|
}
|
||||||
for (const cmd of cleanupCommands) {
|
for (const cmd of cleanupCommands) {
|
||||||
try {
|
try {
|
||||||
await CloudRunnerSystem.Run(cmd, true, true);
|
await CloudRunnerSystem.Run(cmd, true, true);
|
||||||
}
|
}
|
||||||
catch (cmdError) {
|
catch (cmdError) {
|
||||||
// Ignore individual command failures
|
// Ignore individual command failures - cleanup is best effort
|
||||||
cloud_runner_logger_1.default.log(`Cleanup command failed (non-fatal): ${cmdError}`);
|
cloud_runner_logger_1.default.log(`Cleanup command failed (non-fatal): ${cmdError}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cloud_runner_logger_1.default.log('Cleanup completed (containers and non-Unity images removed, Unity images preserved)');
|
||||||
}
|
}
|
||||||
catch (cleanupError) {
|
catch (cleanupError) {
|
||||||
cloud_runner_logger_1.default.logWarning(`Failed to cleanup images before job creation: ${cleanupError}`);
|
cloud_runner_logger_1.default.logWarning(`Failed to cleanup images before job creation: ${cleanupError}`);
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -163,29 +163,34 @@ class Kubernetes implements ProviderInterface {
|
||||||
CloudRunnerLogger.log('Cleaning up old images in k3d node before pulling new image...');
|
CloudRunnerLogger.log('Cleaning up old images in k3d node before pulling new image...');
|
||||||
const { CloudRunnerSystem } = await import('../../services/core/cloud-runner-system');
|
const { CloudRunnerSystem } = await import('../../services/core/cloud-runner-system');
|
||||||
|
|
||||||
// Extract image name without tag for matching
|
// Aggressive cleanup: remove stopped containers and non-Unity images
|
||||||
const imageName = image.split(':')[0];
|
// IMPORTANT: Preserve Unity images (unityci/editor) to avoid re-pulling the 3.9GB image
|
||||||
const imageTag = image.split(':')[1] || 'latest';
|
const K3D_NODE_CONTAINERS = ['k3d-unity-builder-agent-0', 'k3d-unity-builder-server-0'];
|
||||||
|
const cleanupCommands: string[] = [];
|
||||||
|
|
||||||
// More targeted cleanup: remove stopped containers only
|
for (const NODE of K3D_NODE_CONTAINERS) {
|
||||||
// IMPORTANT: Do NOT remove images - preserve Unity image to avoid re-pulling the 3.9GB image
|
|
||||||
// Strategy: Only remove containers, never touch images (safest approach)
|
|
||||||
const cleanupCommands = [
|
|
||||||
// Remove all stopped containers (this frees runtime space but keeps images)
|
// Remove all stopped containers (this frees runtime space but keeps images)
|
||||||
'docker exec k3d-unity-builder-agent-0 sh -c "crictl rm --all 2>/dev/null || true" || true',
|
cleanupCommands.push(
|
||||||
'docker exec k3d-unity-builder-server-0 sh -c "crictl rm --all 2>/dev/null || true" || true',
|
`docker exec ${NODE} sh -c "crictl rm --all 2>/dev/null || true" || true`,
|
||||||
// DO NOT remove images - preserve everything including Unity image
|
);
|
||||||
// Removing images risks removing the Unity image which causes "no space left" errors
|
// Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
|
||||||
];
|
// This is safe because we explicitly exclude Unity images from deletion
|
||||||
|
cleanupCommands.push(
|
||||||
|
`docker exec ${NODE} sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true`,
|
||||||
|
);
|
||||||
|
// Clean up unused layers (prune should preserve referenced images)
|
||||||
|
cleanupCommands.push(`docker exec ${NODE} sh -c "crictl rmi --prune 2>/dev/null || true" || true`);
|
||||||
|
}
|
||||||
|
|
||||||
for (const cmd of cleanupCommands) {
|
for (const cmd of cleanupCommands) {
|
||||||
try {
|
try {
|
||||||
await CloudRunnerSystem.Run(cmd, true, true);
|
await CloudRunnerSystem.Run(cmd, true, true);
|
||||||
} catch (cmdError) {
|
} catch (cmdError) {
|
||||||
// Ignore individual command failures
|
// Ignore individual command failures - cleanup is best effort
|
||||||
CloudRunnerLogger.log(`Cleanup command failed (non-fatal): ${cmdError}`);
|
CloudRunnerLogger.log(`Cleanup command failed (non-fatal): ${cmdError}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
CloudRunnerLogger.log('Cleanup completed (containers and non-Unity images removed, Unity images preserved)');
|
||||||
} catch (cleanupError) {
|
} catch (cleanupError) {
|
||||||
CloudRunnerLogger.logWarning(`Failed to cleanup images before job creation: ${cleanupError}`);
|
CloudRunnerLogger.logWarning(`Failed to cleanup images before job creation: ${cleanupError}`);
|
||||||
// Continue anyway - image might already be cached
|
// Continue anyway - image might already be cached
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ commands: echo "test"`;
|
||||||
const getCustomStepsFromFiles = ContainerHookService.GetContainerHooksFromFiles(`before`);
|
const getCustomStepsFromFiles = ContainerHookService.GetContainerHooksFromFiles(`before`);
|
||||||
CloudRunnerLogger.log(JSON.stringify(getCustomStepsFromFiles, undefined, 4));
|
CloudRunnerLogger.log(JSON.stringify(getCustomStepsFromFiles, undefined, 4));
|
||||||
});
|
});
|
||||||
if (CloudRunnerOptions.cloudRunnerDebug && CloudRunnerOptions.providerStrategy !== `k8s`) {
|
if (CloudRunnerOptions.cloudRunnerDebug) {
|
||||||
it('Should be 1 before and 1 after hook', async () => {
|
it('Should be 1 before and 1 after hook', async () => {
|
||||||
const overrides = {
|
const overrides = {
|
||||||
versioning: 'None',
|
versioning: 'None',
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue