unity-builder/.github/workflows/cloud-runner-integrity.yml

1094 lines
51 KiB
YAML

name: cloud-runner-integrity
on:
workflow_call:
inputs:
runGithubIntegrationTests:
description: 'Run GitHub Checks integration tests'
required: false
default: 'false'
type: string
permissions:
contents: read
checks: write
statuses: write
env:
# Commented out: Using LocalStack tests instead of real AWS
# AWS_REGION: eu-west-2
# AWS_DEFAULT_REGION: eu-west-2
AWS_STACK_NAME: game-ci-team-pipelines # Still needed for LocalStack S3 bucket creation
CLOUD_RUNNER_BRANCH: ${{ github.ref }}
DEBUG: true
PROJECT_PATH: test-project
USE_IL2CPP: false
jobs:
cloud-runner-tests:
name: Cloud Runner Integrity Tests
runs-on: ubuntu-latest
env:
K3D_NODE_CONTAINERS: 'k3d-unity-builder-agent-0'
steps:
# ==========================================
# SETUP SECTION
# ==========================================
- uses: actions/checkout@v4
with:
lfs: false
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'yarn'
- name: Set up kubectl
uses: azure/setup-kubectl@v4
with:
version: 'v1.31.0'
- name: Install k3d
run: |
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
k3d version | cat
- name: Initial disk space cleanup
run: |
echo "Initial disk space cleanup..."
echo "Current disk usage:"
df -h
# Clean up any leftover k3d clusters/images from previous runs
k3d cluster delete unity-builder || true
k3d image delete --all || true
# Stop any existing LocalStack container
docker stop localstack-main 2>/dev/null || true
docker rm localstack-main 2>/dev/null || true
# Clean up Docker images and containers on host
docker system prune -af --volumes || true
docker image prune -af || true
docker volume prune -f || true
echo "Disk usage after cleanup:"
df -h
- name: Start LocalStack (S3) as managed Docker container
run: |
echo "Starting LocalStack as managed Docker container..."
# Start LocalStack with specific name and resource limits
# Note: Using default DATA_DIR to avoid tmpfs mount conflicts
docker run -d \
--name localstack-main \
--network bridge \
-p 4566:4566 \
-e SERVICES=s3,cloudformation,ecs,kinesis,cloudwatch,logs \
-e DEBUG=0 \
localstack/localstack:latest || true
# Wait for LocalStack to be ready - check both health endpoint and S3 service
echo "Waiting for LocalStack to be ready..."
MAX_ATTEMPTS=60
READY=false
for i in $(seq 1 $MAX_ATTEMPTS); do
# Check if container is running
if ! docker ps | grep -q localstack-main; then
echo "LocalStack container not running (attempt $i/$MAX_ATTEMPTS)"
sleep 2
continue
fi
# Check health endpoint - must return valid JSON
HEALTH=$(curl -s http://localhost:4566/_localstack/health 2>/dev/null || echo "")
if [ -z "$HEALTH" ] || ! echo "$HEALTH" | grep -q "services"; then
echo "LocalStack health endpoint not ready (attempt $i/$MAX_ATTEMPTS)"
sleep 2
continue
fi
# Verify S3 service is in the health response
if echo "$HEALTH" | grep -q '"s3"'; then
echo "LocalStack is ready with S3 service (attempt $i/$MAX_ATTEMPTS)"
echo "Health check response:"
echo "$HEALTH" | head -10
READY=true
break
fi
echo "Waiting for LocalStack S3 service... ($i/$MAX_ATTEMPTS)"
sleep 2
done
if [ "$READY" != "true" ]; then
echo "ERROR: LocalStack did not become ready after $MAX_ATTEMPTS attempts"
echo "Container status:"
docker ps -a | grep localstack || echo "No LocalStack container found"
echo "Container logs:"
docker logs localstack-main --tail 100 || true
exit 1
fi
# Final verification
echo "Final LocalStack verification..."
docker ps | grep localstack || echo "WARNING: No LocalStack container found"
curl -s http://localhost:4566/_localstack/health | head -10 || echo "WARNING: LocalStack health check failed"
- name: Install AWS CLI tools
run: |
# Install AWS CLI if not already available
if ! command -v aws > /dev/null 2>&1; then
pip install awscli || true
fi
# Install awscli-local for convenience (optional)
pip install awscli-local || true
aws --version || echo "AWS CLI not available"
awslocal --version || echo "awslocal not available, will use aws CLI with endpoint-url"
- name: Create S3 bucket for tests (host LocalStack)
run: |
# Verify LocalStack is still accessible before creating bucket
echo "Verifying LocalStack connectivity..."
for i in {1..10}; do
if curl -s http://localhost:4566/_localstack/health > /dev/null 2>&1; then
echo "LocalStack is accessible"
break
fi
echo "Waiting for LocalStack... ($i/10)"
sleep 1
done
# Use awslocal if available, otherwise use aws CLI with endpoint-url
# Retry bucket creation in case LocalStack needs a moment
MAX_RETRIES=5
RETRY_COUNT=0
BUCKET_CREATED=false
while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ "$BUCKET_CREATED" != "true" ]; do
RETRY_COUNT=$((RETRY_COUNT + 1))
echo "Attempting to create S3 bucket (attempt $RETRY_COUNT/$MAX_RETRIES)..."
if command -v awslocal > /dev/null 2>&1; then
if awslocal s3 mb s3://$AWS_STACK_NAME 2>&1; then
echo "Bucket created successfully with awslocal"
awslocal s3 ls
BUCKET_CREATED=true
else
echo "Bucket creation failed with awslocal, will retry..."
sleep 2
fi
elif command -v aws > /dev/null 2>&1; then
if aws --endpoint-url=http://localhost:4566 s3 mb s3://$AWS_STACK_NAME 2>&1; then
echo "Bucket created successfully with aws CLI"
aws --endpoint-url=http://localhost:4566 s3 ls || true
BUCKET_CREATED=true
else
echo "Bucket creation failed with aws CLI, will retry..."
sleep 2
fi
else
echo "Neither awslocal nor aws CLI available"
exit 1
fi
done
if [ "$BUCKET_CREATED" != "true" ]; then
echo "ERROR: Failed to create S3 bucket after $MAX_RETRIES attempts"
echo "LocalStack container status:"
docker ps | grep localstack || echo "LocalStack container not running"
echo "LocalStack logs:"
docker logs localstack-main --tail 50 || true
exit 1
fi
- run: yarn install --frozen-lockfile
# ==========================================
# K8S TESTS SECTION
# ==========================================
- name: Clean up disk space before K8s tests
run: |
echo "Cleaning up disk space before K8s tests..."
rm -rf ./cloud-runner-cache/* || true
sudo apt-get clean || true
docker system prune -f || true
df -h
- name: Create k3s cluster (k3d)
timeout-minutes: 5
run: |
# Create cluster - host.k3d.internal will allow pods to access host services (LocalStack)
k3d cluster create unity-builder \
--agents 1 \
--wait
kubectl config current-context | cat
- name: Verify cluster readiness and LocalStack connectivity
timeout-minutes: 2
run: |
for i in {1..60}; do
if kubectl get nodes 2>/dev/null | grep -q Ready; then
echo "Cluster is ready"
break
fi
echo "Waiting for cluster... ($i/60)"
sleep 5
done
kubectl get nodes
kubectl get storageclass
# Show node resources
kubectl describe nodes | grep -A 5 "Allocated resources" || true
# Test LocalStack connectivity from k3d cluster
echo "Testing LocalStack connectivity from k3d cluster..."
echo "From host (should work):"
curl -s --max-time 5 http://localhost:4566/_localstack/health | head -5 || echo "Host connectivity failed"
echo "From k3d cluster via host.k3d.internal:"
kubectl run test-localstack --image=curlimages/curl --rm -i --restart=Never --timeout=10s -- \
curl -v --max-time 5 http://host.k3d.internal:4566/_localstack/health 2>&1 | head -20 || \
echo "Cluster connectivity test - if this fails, LocalStack may not be accessible from k3d"
- name: Pre-pull Unity image into k3d cluster
timeout-minutes: 15
run: |
# Pre-pull the Unity image into the k3d cluster before running tests
# This ensures it's cached in the k3d node's containerd and won't need to be pulled during test execution
UNITY_IMAGE="unityci/editor:ubuntu-2021.3.45f1-base-3"
# Check disk space before pulling
echo "Checking disk space before pre-pulling Unity image..."
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
echo "Disk space in $NODE:"
docker exec "$NODE" sh -c "df -h /var/lib/rancher/k3s 2>/dev/null || df -h / 2>/dev/null || true" || true
done
# Clean up before pulling to ensure we have space
echo "Cleaning up before pre-pulling image..."
for NODE in $K3D_NODE_CONTAINERS; do
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Only remove non-Unity images to preserve space while keeping Unity image if already cached
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
done || true
# Explicitly pull the image on BOTH nodes to ensure it's cached wherever pods might be scheduled
# This prevents "no space left" errors when pods are scheduled on nodes without the cached image
echo "Pulling Unity image directly on each node to ensure it's cached..."
for NODE in $K3D_NODE_CONTAINERS; do
echo "Checking if image already exists on $NODE..."
IMAGE_EXISTS=$(docker exec "$NODE" sh -c "crictl images | grep -q unityci/editor && echo 'yes' || echo 'no'" || echo "no")
if [ "$IMAGE_EXISTS" = "yes" ]; then
echo "Unity image already cached on $NODE, skipping pull"
else
echo "Pulling Unity image on $NODE (this may take several minutes for 3.9GB image)..."
# Use crictl pull directly in the node's containerd
# This ensures the image is cached in the node's local storage
# Use timeout to prevent hanging indefinitely (10 minutes max)
if timeout 600 docker exec "$NODE" sh -c "crictl pull $UNITY_IMAGE 2>&1"; then
echo "Successfully pulled image on $NODE"
# Verify it's cached
docker exec "$NODE" sh -c "crictl images | grep unityci/editor || echo 'Warning: Image not found after pull'" || true
else
PULL_EXIT_CODE=$?
if [ $PULL_EXIT_CODE -eq 124 ]; then
echo "Warning: Image pull on $NODE timed out after 10 minutes. Checking if partially cached..."
else
echo "Warning: Image pull on $NODE failed (exit code: $PULL_EXIT_CODE). Checking if partially cached..."
fi
docker exec "$NODE" sh -c "crictl images | grep unityci/editor || echo 'Image not found on $NODE'" || true
echo "Note: Pods scheduled on $NODE will attempt to pull the image during runtime, which may fail if disk space is insufficient."
fi
fi
done
# Verify image is cached
echo "Checking if Unity image is cached..."
for NODE in $K3D_NODE_CONTAINERS; do
docker exec "$NODE" sh -c "crictl images | grep unityci/editor || echo 'Image not found in $NODE'" || true
done
echo "Image pre-pull completed. Image should be cached in k3d node."
- name: Clean up K8s test resources before tests
run: |
echo "Cleaning up K8s test resources..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
for i in {1..30}; do
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
if [ "$PVC_COUNT" -eq 0 ]; then
echo "All PVCs deleted"
break
fi
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
sleep 1
done
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
kubectl delete pv "$pv" --ignore-not-found=true || true
fi
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
sleep 3
docker system prune -f || true
- name: Run cloud-runner-image test (K8s)
timeout-minutes: 10
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: k8s
KUBE_VOLUME_SIZE: 2Gi
containerCpu: '512'
containerMemory: '512'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up after cloud-runner-image test
if: always()
run: |
echo "Cleaning up after cloud-runner-image test..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
# Aggressive cleanup in k3d nodes, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
# Remove stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
- name: Run cloud-runner-kubernetes test
timeout-minutes: 30
run: yarn run test "cloud-runner-kubernetes" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneLinux64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: k8s
KUBE_VOLUME_SIZE: 2Gi
containerCpu: '1000'
containerMemory: '1024'
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_S3_ENDPOINT: http://localhost:4566
AWS_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up after cloud-runner-kubernetes test
if: always()
run: |
echo "Cleaning up after cloud-runner-kubernetes test..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
# Aggressive cleanup in k3d nodes, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
# Remove stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
- name: Run cloud-runner-s3-steps test (K8s)
timeout-minutes: 30
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneLinux64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: k8s
KUBE_VOLUME_SIZE: 2Gi
containerCpu: '1000'
containerMemory: '1024'
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_S3_ENDPOINT: http://localhost:4566
AWS_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up after cloud-runner-s3-steps test
if: always()
run: |
echo "Cleaning up after cloud-runner-s3-steps test..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
# Aggressive cleanup in k3d nodes, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
# Remove stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
- name: Run cloud-runner-end2end-caching test (K8s)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneLinux64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: k8s
KUBE_VOLUME_SIZE: 2Gi
containerCpu: '1000'
containerMemory: '1024'
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_S3_ENDPOINT: http://localhost:4566
AWS_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up after cloud-runner-end2end-caching test
if: always()
run: |
echo "Cleaning up after cloud-runner-end2end-caching test..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
# Aggressive cleanup in k3d nodes, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
# Remove stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
- name: Clean up disk space before end2end-retaining test
run: |
echo "Cleaning up disk space before end2end-retaining test..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
# Aggressive cleanup in k3d nodes, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
# Remove stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
echo "Disk usage before end2end-retaining test:"
df -h
- name: Run cloud-runner-end2end-retaining test (K8s)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-retaining" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: k8s
KUBE_VOLUME_SIZE: 2Gi
containerCpu: '512'
containerMemory: '512'
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_S3_ENDPOINT: http://localhost:4566
AWS_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up K8s resources and disk space
run: |
echo "Cleaning up K8s resources after K8s tests..."
kubectl delete jobs --all --ignore-not-found=true -n default || true
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
kubectl delete "$pod" --ignore-not-found=true || true
done || true
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
kubectl delete "$pvc" --ignore-not-found=true || true
done || true
for i in {1..30}; do
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
if [ "$PVC_COUNT" -eq 0 ]; then
echo "All PVCs deleted"
break
fi
sleep 1
done
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
kubectl delete pv "$pv" --ignore-not-found=true || true
fi
done || true
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
kubectl delete "$secret" --ignore-not-found=true || true
done || true
rm -rf ./cloud-runner-cache/* || true
docker system prune -af --volumes || true
# Aggressive cleanup in k3d nodes to free ephemeral storage, but preserve Unity images
K3D_NODE_CONTAINERS="${K3D_NODE_CONTAINERS:-k3d-unity-builder-agent-0 k3d-unity-builder-server-0}"
for NODE in $K3D_NODE_CONTAINERS; do
echo "Cleaning up $NODE (preserving Unity images)..."
# Remove all stopped containers
docker exec "$NODE" sh -c "crictl rm --all 2>/dev/null || true" || true
# Remove non-Unity images only (preserve unityci/editor images to avoid re-pulling 3.9GB)
docker exec "$NODE" sh -c "for img in \$(crictl images -q 2>/dev/null); do repo=\$(crictl inspecti \$img --format '{{.repo}}' 2>/dev/null || echo ''); if echo \"\$repo\" | grep -qvE 'unityci/editor|unity'; then crictl rmi \$img 2>/dev/null || true; fi; done" || true
# Clean up unused layers (prune should preserve referenced images)
docker exec "$NODE" sh -c "crictl rmi --prune 2>/dev/null || true" || true
# Check disk space
docker exec "$NODE" sh -c "df -h /var/lib/rancher/k3s 2>/dev/null || df -h / 2>/dev/null || true" || true
done
echo "Disk usage after K8s cleanup:"
df -h
- name: Delete k3d cluster
run: |
echo "Deleting k3d cluster to free disk space..."
k3d cluster delete unity-builder || true
k3d image delete --all || true
docker system prune -af --volumes || true
echo "Disk usage after k3d deletion:"
df -h
# ==========================================
# AWS/LOCALSTACK PROVIDER TESTS SECTION
# ==========================================
- name: Clean up disk space before AWS/LocalStack provider tests
run: |
echo "Cleaning up disk space before AWS/LocalStack provider tests..."
rm -rf ./cloud-runner-cache/* || true
sudo apt-get clean || true
docker system prune -af --volumes || true
echo "Disk usage:"
df -h
- name: Run cloud-runner-image test (AWS provider)
timeout-minutes: 10
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-environment test
timeout-minutes: 30
run: yarn run test "cloud-runner-environment" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-s3-steps test (AWS provider)
timeout-minutes: 30
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-hooks test (AWS provider)
timeout-minutes: 30
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-end2end-caching test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-end2end-retaining test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-retaining" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-caching test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-caching" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-locking-core test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-locking-core" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-locking-get-locked test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-locking-get-locked" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-end2end-locking test (AWS provider)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-locking" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
KUBE_STORAGE_CLASS: local-path
PROVIDER_STRATEGY: aws
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space after AWS/LocalStack provider tests
run: |
echo "Cleaning up disk space after AWS/LocalStack provider tests..."
rm -rf ./cloud-runner-cache/* || true
docker system prune -af --volumes || true
echo "Disk usage:"
df -h
# ==========================================
# LOCAL DOCKER TESTS SECTION
# ==========================================
- name: Clean up disk space before local-docker tests
run: |
echo "Cleaning up disk space before local-docker tests..."
rm -rf ./cloud-runner-cache/* || true
sudo apt-get clean || true
docker system prune -af --volumes || true
echo "Disk usage:"
df -h
- name: Run cloud-runner-image test (local-docker)
timeout-minutes: 10
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-hooks test (local-docker)
timeout-minutes: 30
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-local-persistence test
timeout-minutes: 30
run: yarn run test "cloud-runner-local-persistence" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-locking-core test (local-docker with S3)
timeout-minutes: 30
run: yarn run test "cloud-runner-locking-core" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
AWS_STACK_NAME: game-ci-team-pipelines
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
AWS_S3_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-locking-get-locked test (local-docker with S3)
timeout-minutes: 30
run: yarn run test "cloud-runner-locking-get-locked" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
AWS_STACK_NAME: game-ci-team-pipelines
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
AWS_S3_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-caching test (local-docker)
timeout-minutes: 30
run: yarn run test "cloud-runner-caching" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-github-checks test (local-docker)
timeout-minutes: 30
run: yarn run test "cloud-runner-github-checks" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneWindows64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-s3-steps test (local-docker with S3)
timeout-minutes: 30
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneLinux64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
AWS_STACK_NAME: game-ci-team-pipelines
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
AWS_S3_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Clean up disk space
run: |
rm -rf ./cloud-runner-cache/* || true
docker system prune -f || true
df -h
- name: Run cloud-runner-end2end-caching test (local-docker with S3)
timeout-minutes: 60
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
env:
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
PROJECT_PATH: test-project
TARGET_PLATFORM: StandaloneLinux64
cloudRunnerTests: true
versioning: None
PROVIDER_STRATEGY: local-docker
AWS_STACK_NAME: game-ci-team-pipelines
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: test
AWS_ENDPOINT: http://localhost:4566
AWS_ENDPOINT_URL: http://localhost:4566
AWS_S3_ENDPOINT: http://localhost:4566
INPUT_AWSS3ENDPOINT: http://localhost:4566
INPUT_AWSENDPOINT: http://localhost:4566
AWS_S3_FORCE_PATH_STYLE: 'true'
AWS_EC2_METADATA_DISABLED: 'true'
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
- name: Final disk space cleanup
run: |
echo "Final disk space cleanup..."
rm -rf ./cloud-runner-cache/* || true
docker stop localstack-main 2>/dev/null || true
docker rm localstack-main 2>/dev/null || true
docker system prune -af --volumes || true
echo "Final disk usage:"
df -h