1459 lines
69 KiB
YAML
1459 lines
69 KiB
YAML
name: cloud-runner-integrity
|
|
|
|
on:
|
|
workflow_call:
|
|
inputs:
|
|
runGithubIntegrationTests:
|
|
description: 'Run GitHub Checks integration tests'
|
|
required: false
|
|
default: 'false'
|
|
type: string
|
|
|
|
permissions:
|
|
contents: read
|
|
checks: write
|
|
statuses: write
|
|
|
|
env:
|
|
# Commented out: Using LocalStack tests instead of real AWS
|
|
# AWS_REGION: eu-west-2
|
|
# AWS_DEFAULT_REGION: eu-west-2
|
|
AWS_STACK_NAME: game-ci-team-pipelines # Still needed for LocalStack S3 bucket creation
|
|
CLOUD_RUNNER_BRANCH: ${{ github.ref }}
|
|
DEBUG: true
|
|
PROJECT_PATH: test-project
|
|
USE_IL2CPP: false
|
|
|
|
jobs:
|
|
k8s:
|
|
name: Cloud Runner Tests (K8s)
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
lfs: false
|
|
# Set up Kubernetes (k3s via k3d)
|
|
- name: Set up kubectl
|
|
uses: azure/setup-kubectl@v4
|
|
with:
|
|
version: 'v1.34.1'
|
|
- name: Install k3d
|
|
run: |
|
|
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
|
k3d version | cat
|
|
- name: Start LocalStack (S3)
|
|
uses: localstack/setup-localstack@v0.2.4
|
|
with:
|
|
install-awslocal: true
|
|
- name: Verify LocalStack is running
|
|
run: |
|
|
echo "Checking LocalStack status..."
|
|
curl -s http://localhost:4566/_localstack/health | head -10 || echo "LocalStack health check failed"
|
|
# Check if LocalStack container is running
|
|
docker ps | grep localstack || echo "No LocalStack container found"
|
|
# Show LocalStack container network info
|
|
docker ps --format "{{.Names}}" | grep -i localstack | head -1 | xargs -I {} docker inspect {} --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' || echo "Could not get LocalStack IP"
|
|
- name: Create S3 bucket for tests (host LocalStack)
|
|
run: |
|
|
awslocal s3 mb s3://$AWS_STACK_NAME || true
|
|
awslocal s3 ls
|
|
- name: Create k3s cluster (k3d)
|
|
timeout-minutes: 5
|
|
run: |
|
|
# Clean up any existing cluster and free disk space before creating new one
|
|
k3d cluster delete unity-builder || true
|
|
docker system prune -af --volumes || true
|
|
# Create cluster - host.k3d.internal will allow pods to access host services
|
|
# No port mapping needed - LocalStack is on host, accessible via host.k3d.internal:4566
|
|
k3d cluster create unity-builder --agents 1 --wait
|
|
kubectl config current-context | cat
|
|
- name: Verify cluster readiness and LocalStack connectivity
|
|
timeout-minutes: 2
|
|
run: |
|
|
for i in {1..60}; do
|
|
if kubectl get nodes 2>/dev/null | grep -q Ready; then
|
|
echo "Cluster is ready"
|
|
break
|
|
fi
|
|
echo "Waiting for cluster... ($i/60)"
|
|
sleep 5
|
|
done
|
|
kubectl get nodes
|
|
kubectl get storageclass
|
|
# Show node resources
|
|
kubectl describe nodes | grep -A 5 "Allocated resources" || true
|
|
# Test LocalStack connectivity from k3d cluster
|
|
echo "Testing LocalStack connectivity from k3d cluster..."
|
|
echo "From host (should work):"
|
|
curl -s --max-time 5 http://localhost:4566/_localstack/health | head -5 || echo "Host connectivity failed"
|
|
echo "From k3d cluster via host.k3d.internal:"
|
|
kubectl run test-localstack --image=curlimages/curl --rm -i --restart=Never --timeout=10s -- \
|
|
curl -v --max-time 5 http://host.k3d.internal:4566/_localstack/health 2>&1 | head -20 || \
|
|
echo "Cluster connectivity test - if this fails, LocalStack may not be accessible from k3d"
|
|
# Clean up disk space on the k3d node to prevent evictions and disk pressure
|
|
echo "Cleaning up disk space on k3d nodes..."
|
|
docker exec k3d-unity-builder-agent-0 sh -c "df -h && docker system prune -af --volumes || true" || true
|
|
docker system prune -af --volumes || true
|
|
# Wait for disk pressure taints to clear (with timeout)
|
|
echo "Checking for disk pressure taints on nodes..."
|
|
for i in {1..30}; do
|
|
if kubectl describe nodes | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, waiting for it to clear... ($i/30)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
echo "No disk pressure taints found"
|
|
break
|
|
fi
|
|
done
|
|
kubectl describe nodes | grep -i taint || echo "No taints found"
|
|
- uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: 'yarn'
|
|
- name: Clean up disk space before tests
|
|
run: |
|
|
# Clean up any leftover cache files from previous runs
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
# Clean up system caches and temporary files
|
|
sudo apt-get clean || true
|
|
docker system prune -f || true
|
|
# Show available disk space
|
|
df -h
|
|
- run: yarn install --frozen-lockfile
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
# Clean up K8s resources before each test (only test resources, not system pods)
|
|
echo "Cleaning up K8s test resources..."
|
|
# Only clean up resources in default namespace and resources matching our test patterns
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
# Delete completed/failed pods in default namespace (not system pods)
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
# Only delete secrets that match our naming pattern (build-credentials-*)
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
# Clean up disk space - aggressive cleanup to prevent evictions
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node to prevent ephemeral-storage evictions and disk pressure
|
|
echo "Cleaning up disk space on k3d node..."
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Also clean up old logs and temporary files that might be taking up space
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/log -type f -name '*.log' -mtime +1 -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /tmp -type f -mtime +1 -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "df -h" 2>/dev/null || true
|
|
# Wait for disk pressure taints to clear before proceeding
|
|
echo "Checking for disk pressure taints..."
|
|
for i in {1..20}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up and waiting... ($i/20)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
- name: Ensure disk pressure cleared before test
|
|
timeout-minutes: 3
|
|
run: |
|
|
echo "Ensuring disk pressure is cleared before test..."
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure taints to clear (with aggressive cleanup)
|
|
# Limit to 10 attempts to avoid timeout - if cleanup doesn't work, just remove the taint
|
|
PREVIOUS_DISK_USAGE=100
|
|
for i in {1..10}; do
|
|
HAS_DISK_PRESSURE=$(kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure" && echo "true" || echo "false")
|
|
if [ "$HAS_DISK_PRESSURE" = "true" ]; then
|
|
echo "Disk pressure detected, cleaning up aggressively... ($i/10)"
|
|
# Check actual disk usage on the node
|
|
DISK_USAGE=$(docker exec k3d-unity-builder-agent-0 sh -c "df -h / 2>/dev/null | tail -1 | awk '{print \$5}' | sed 's/%//'" || echo "unknown")
|
|
echo "Current disk usage on k3d node: ${DISK_USAGE}%"
|
|
|
|
# Use k3s/containerd commands instead of docker (docker not available in k3d nodes)
|
|
# Clean up k3s containerd snapshots and images
|
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmp --all 2>/dev/null || true" || true
|
|
# Clean up old containerd snapshots
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s/agent/containerd -type d -name 'snapshots' -exec rm -rf {}/* 2>/dev/null \; || true" || true
|
|
# Clean up k3s logs and temp files
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s -type f -name '*.log' -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /tmp -type f -mtime +0 -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/log -type f -name '*.log' -mtime +0 -delete 2>/dev/null || true" || true
|
|
# Clean up host docker
|
|
docker system prune -af --volumes || true
|
|
|
|
# Check if disk usage improved
|
|
NEW_DISK_USAGE=$(docker exec k3d-unity-builder-agent-0 sh -c "df -h / 2>/dev/null | tail -1 | awk '{print \$5}' | sed 's/%//'" || echo "unknown")
|
|
if [ "$NEW_DISK_USAGE" != "unknown" ] && [ "$PREVIOUS_DISK_USAGE" != "unknown" ]; then
|
|
if [ "$NEW_DISK_USAGE" -ge "$PREVIOUS_DISK_USAGE" ] && [ "$i" -ge 3 ]; then
|
|
echo "Disk usage not improving (${PREVIOUS_DISK_USAGE}% -> ${NEW_DISK_USAGE}%), breaking cleanup loop and removing taint manually"
|
|
break
|
|
fi
|
|
PREVIOUS_DISK_USAGE=$NEW_DISK_USAGE
|
|
fi
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
kubectl describe nodes | grep -i taint || echo "No taints found"
|
|
break
|
|
fi
|
|
done
|
|
# If disk pressure taint is still present after cleanup, manually remove it (CI only)
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "WARNING: Disk pressure taint still present after cleanup. Manually removing taint for CI..."
|
|
NODE_NAMES=$(kubectl get nodes -o name 2>/dev/null | sed 's/node\///' || echo "")
|
|
for node in $NODE_NAMES; do
|
|
kubectl taint nodes "$node" node.kubernetes.io/disk-pressure- 2>/dev/null || true
|
|
done
|
|
echo "Taint removed. Checking nodes..."
|
|
kubectl describe nodes | grep -i taint || echo "No taints found"
|
|
fi
|
|
# Wait for disk pressure condition to clear (not just taint)
|
|
echo "Waiting for disk pressure condition to clear on nodes..."
|
|
for i in {1..20}; do
|
|
HAS_DISK_PRESSURE_CONDITION=$(kubectl get nodes -o json 2>/dev/null | grep -q '"type":"DiskPressure"' && echo "true" || echo "false")
|
|
if [ "$HAS_DISK_PRESSURE_CONDITION" = "true" ]; then
|
|
echo "Disk pressure condition still present, waiting... ($i/20)"
|
|
sleep 2
|
|
else
|
|
echo "Disk pressure condition cleared, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
# Final check - if condition still exists, remove taint and wait a bit more
|
|
if kubectl get nodes -o json 2>/dev/null | grep -q '"type":"DiskPressure"'; then
|
|
echo "WARNING: Disk pressure condition still exists. Removing taint and waiting 10 seconds..."
|
|
NODE_NAMES=$(kubectl get nodes -o name 2>/dev/null | sed 's/node\///' || echo "")
|
|
for node in $NODE_NAMES; do
|
|
kubectl taint nodes "$node" node.kubernetes.io/disk-pressure- 2>/dev/null || true
|
|
done
|
|
sleep 10
|
|
fi
|
|
- name: Run cloud-runner-image test (validate image creation)
|
|
timeout-minutes: 10
|
|
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: k8s
|
|
KUBE_VOLUME_SIZE: 5Gi
|
|
containerCpu: '1000'
|
|
containerMemory: '1024'
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure to clear
|
|
for i in {1..15}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up... ($i/15)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
break
|
|
fi
|
|
done
|
|
- name: Ensure disk pressure cleared before test
|
|
timeout-minutes: 2
|
|
run: |
|
|
echo "Ensuring disk pressure is cleared before test..."
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
for i in {1..30}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up aggressively... ($i/30)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker images -q | xargs -r docker rmi -f 2>/dev/null || true" || true
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
- name: Run cloud-runner-kubernetes test (simple K8s build validation)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-kubernetes" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: k8s
|
|
KUBE_VOLUME_SIZE: 5Gi
|
|
ENABLE_K8S_E2E: 'true'
|
|
containerCpu: '1000'
|
|
containerMemory: '1024'
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure to clear
|
|
for i in {1..15}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up... ($i/15)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
break
|
|
fi
|
|
done
|
|
- name: Ensure disk pressure cleared before test
|
|
timeout-minutes: 3
|
|
run: |
|
|
echo "Ensuring disk pressure is cleared before test..."
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure taints to clear (with aggressive cleanup)
|
|
# Limit to 10 attempts to avoid timeout - if cleanup doesn't work, just remove the taint
|
|
PREVIOUS_DISK_USAGE=100
|
|
for i in {1..10}; do
|
|
HAS_DISK_PRESSURE=$(kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure" && echo "true" || echo "false")
|
|
if [ "$HAS_DISK_PRESSURE" = "true" ]; then
|
|
echo "Disk pressure detected, cleaning up aggressively... ($i/10)"
|
|
# Check actual disk usage on the node
|
|
DISK_USAGE=$(docker exec k3d-unity-builder-agent-0 sh -c "df -h / 2>/dev/null | tail -1 | awk '{print \$5}' | sed 's/%//'" || echo "unknown")
|
|
echo "Current disk usage on k3d node: ${DISK_USAGE}%"
|
|
|
|
# Use k3s/containerd commands instead of docker (docker not available in k3d nodes)
|
|
# Clean up k3s containerd snapshots and images
|
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmi --prune 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "crictl rmp --all 2>/dev/null || true" || true
|
|
# Clean up old containerd snapshots
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s/agent/containerd -type d -name 'snapshots' -exec rm -rf {}/* 2>/dev/null \; || true" || true
|
|
# Clean up k3s logs and temp files
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/lib/rancher/k3s -type f -name '*.log' -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /tmp -type f -mtime +0 -delete 2>/dev/null || true" || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "find /var/log -type f -name '*.log' -mtime +0 -delete 2>/dev/null || true" || true
|
|
# Clean up host docker
|
|
docker system prune -af --volumes || true
|
|
|
|
# Check if disk usage improved
|
|
NEW_DISK_USAGE=$(docker exec k3d-unity-builder-agent-0 sh -c "df -h / 2>/dev/null | tail -1 | awk '{print \$5}' | sed 's/%//'" || echo "unknown")
|
|
if [ "$NEW_DISK_USAGE" != "unknown" ] && [ "$PREVIOUS_DISK_USAGE" != "unknown" ]; then
|
|
if [ "$NEW_DISK_USAGE" -ge "$PREVIOUS_DISK_USAGE" ] && [ "$i" -ge 3 ]; then
|
|
echo "Disk usage not improving (${PREVIOUS_DISK_USAGE}% -> ${NEW_DISK_USAGE}%), breaking cleanup loop and removing taint manually"
|
|
break
|
|
fi
|
|
PREVIOUS_DISK_USAGE=$NEW_DISK_USAGE
|
|
fi
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
kubectl describe nodes | grep -i taint || echo "No taints found"
|
|
break
|
|
fi
|
|
done
|
|
# If disk pressure taint is still present after cleanup, manually remove it (CI only)
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "WARNING: Disk pressure taint still present after cleanup. Manually removing taint for CI..."
|
|
NODE_NAMES=$(kubectl get nodes -o name 2>/dev/null | sed 's/node\///' || echo "")
|
|
for node in $NODE_NAMES; do
|
|
kubectl taint nodes "$node" node.kubernetes.io/disk-pressure- 2>/dev/null || true
|
|
done
|
|
echo "Taint removed. Checking nodes..."
|
|
kubectl describe nodes | grep -i taint || echo "No taints found"
|
|
fi
|
|
# Wait for disk pressure condition to clear (not just taint)
|
|
echo "Waiting for disk pressure condition to clear on nodes..."
|
|
for i in {1..20}; do
|
|
HAS_DISK_PRESSURE_CONDITION=$(kubectl get nodes -o json 2>/dev/null | grep -q '"type":"DiskPressure"' && echo "true" || echo "false")
|
|
if [ "$HAS_DISK_PRESSURE_CONDITION" = "true" ]; then
|
|
echo "Disk pressure condition still present, waiting... ($i/20)"
|
|
sleep 2
|
|
else
|
|
echo "Disk pressure condition cleared, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
# Final check - if condition still exists, remove taint and wait a bit more
|
|
if kubectl get nodes -o json 2>/dev/null | grep -q '"type":"DiskPressure"'; then
|
|
echo "WARNING: Disk pressure condition still exists. Removing taint and waiting 10 seconds..."
|
|
NODE_NAMES=$(kubectl get nodes -o name 2>/dev/null | sed 's/node\///' || echo "")
|
|
for node in $NODE_NAMES; do
|
|
kubectl taint nodes "$node" node.kubernetes.io/disk-pressure- 2>/dev/null || true
|
|
done
|
|
sleep 10
|
|
fi
|
|
- name: Run cloud-runner-s3-steps test (validate S3 operations with K8s)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: k8s
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
containerCpu: '1000'
|
|
containerMemory: '1024'
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure to clear
|
|
for i in {1..15}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up... ($i/15)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
break
|
|
fi
|
|
done
|
|
- name: Ensure disk pressure cleared before test
|
|
timeout-minutes: 2
|
|
run: |
|
|
echo "Ensuring disk pressure is cleared before test..."
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
for i in {1..30}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up aggressively... ($i/30)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker images -q | xargs -r docker rmi -f 2>/dev/null || true" || true
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
- name: Run cloud-runner-end2end-caching test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: k8s
|
|
KUBE_VOLUME_SIZE: 5Gi
|
|
# Set resource requests for tests - increased memory to prevent OOM kills
|
|
containerCpu: '1000'
|
|
containerMemory: '1024'
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure to clear
|
|
for i in {1..15}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up... ($i/15)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
break
|
|
fi
|
|
done
|
|
- name: Ensure disk pressure cleared before test
|
|
timeout-minutes: 2
|
|
run: |
|
|
echo "Ensuring disk pressure is cleared before test..."
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
for i in {1..30}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up aggressively... ($i/30)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker images -q | xargs -r docker rmi -f 2>/dev/null || true" || true
|
|
sleep 3
|
|
else
|
|
echo "No disk pressure taints found, proceeding with test"
|
|
break
|
|
fi
|
|
done
|
|
- name: Run cloud-runner-end2end-retaining test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-retaining" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: k8s
|
|
KUBE_VOLUME_SIZE: 5Gi
|
|
containerCpu: '512'
|
|
containerMemory: '512'
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up K8s test resources
|
|
run: |
|
|
kubectl delete jobs --all --ignore-not-found=true -n default || true
|
|
kubectl get pods -n default -o name 2>/dev/null | grep -E "(unity-builder-job-|helper-pod-)" | while read pod; do
|
|
kubectl delete "$pod" --ignore-not-found=true || true
|
|
done || true
|
|
# Only delete PVCs that match our naming pattern (unity-builder-pvc-*)
|
|
# Wait for PVC deletion to complete to ensure underlying PVs are released
|
|
kubectl get pvc -n default -o name 2>/dev/null | grep "unity-builder-pvc-" | while read pvc; do
|
|
kubectl delete "$pvc" --ignore-not-found=true || true
|
|
done || true
|
|
# Wait for PVCs to be fully deleted (up to 30 seconds)
|
|
echo "Waiting for PVCs to be deleted..."
|
|
for i in {1..30}; do
|
|
PVC_COUNT=$(kubectl get pvc -n default 2>/dev/null | grep "unity-builder-pvc-" | wc -l || echo "0")
|
|
if [ "$PVC_COUNT" -eq 0 ]; then
|
|
echo "All PVCs deleted"
|
|
break
|
|
fi
|
|
echo "Waiting for PVCs to be deleted... ($i/30) - Found $PVC_COUNT PVCs"
|
|
sleep 1
|
|
done
|
|
# Clean up PersistentVolumes that are in Released state (orphaned from deleted PVCs)
|
|
# This is important for local-path storage in k3d where PVs might not auto-delete
|
|
echo "Cleaning up orphaned PersistentVolumes..."
|
|
kubectl get pv 2>/dev/null | grep -E "(Released|Failed)" | awk '{print $1}' | while read pv; do
|
|
if [ -n "$pv" ] && [ "$pv" != "NAME" ]; then
|
|
echo "Deleting orphaned PV: $pv"
|
|
kubectl delete pv "$pv" --ignore-not-found=true || true
|
|
fi
|
|
done || true
|
|
kubectl get secrets -n default -o name 2>/dev/null | grep "build-credentials-" | while read secret; do
|
|
kubectl delete "$secret" --ignore-not-found=true || true
|
|
done || true
|
|
sleep 3
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -af --volumes || true
|
|
# Clean up disk space on k3d node
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
# Wait for disk pressure to clear
|
|
for i in {1..15}; do
|
|
if kubectl describe nodes 2>/dev/null | grep -q "node.kubernetes.io/disk-pressure"; then
|
|
echo "Disk pressure detected, cleaning up... ($i/15)"
|
|
docker exec k3d-unity-builder-agent-0 sh -c "docker system prune -af --volumes 2>/dev/null || true" || true
|
|
docker system prune -af --volumes || true
|
|
sleep 2
|
|
else
|
|
break
|
|
fi
|
|
done
|
|
localstack:
|
|
name: Cloud Runner Tests (LocalStack)
|
|
runs-on: ubuntu-latest
|
|
services:
|
|
localstack:
|
|
image: localstack/localstack
|
|
ports:
|
|
- 4566:4566
|
|
env:
|
|
SERVICES: cloudformation,ecs,kinesis,cloudwatch,s3,logs
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
lfs: false
|
|
- uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: 'yarn'
|
|
- name: Clean up disk space before tests
|
|
run: |
|
|
# Clean up any leftover cache files from previous runs
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
# Clean up system caches and temporary files
|
|
sudo apt-get clean || true
|
|
docker system prune -f || true
|
|
# Show available disk space
|
|
df -h
|
|
- run: yarn install --frozen-lockfile
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-end2end-locking test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-locking" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-image test (validate image creation)
|
|
timeout-minutes: 10
|
|
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-environment test (validate environment variables)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-environment" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-s3-steps test (validate S3 operations)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-hooks test (validate hooks functionality)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-end2end-caching test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-end2end-retaining test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-retaining" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-caching test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-caching" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-environment test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-environment" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-image test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-hooks test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-local-persistence test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-local-persistence" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-locking-core test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-locking-core" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-locking-get-locked test
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-locking-get-locked" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
KUBE_STORAGE_CLASS: local-path
|
|
PROVIDER_STRATEGY: aws
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
localDocker:
|
|
name: Cloud Runner Tests (Local Docker with LocalStack S3)
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
lfs: false
|
|
- uses: actions/setup-node@v4
|
|
with:
|
|
node-version: 20
|
|
cache: 'yarn'
|
|
- name: Clean up disk space before tests
|
|
run: |
|
|
# Clean up any leftover cache files from previous runs
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
# Clean up system caches and temporary files
|
|
sudo apt-get clean || true
|
|
docker system prune -f || true
|
|
# Show available disk space
|
|
df -h
|
|
- name: Start LocalStack (S3)
|
|
uses: localstack/setup-localstack@v0.2.4
|
|
with:
|
|
install-awslocal: true
|
|
- name: Verify LocalStack is running
|
|
run: |
|
|
echo "Checking LocalStack status..."
|
|
curl -s http://localhost:4566/_localstack/health | head -10 || echo "LocalStack health check failed"
|
|
# Check if LocalStack container is running
|
|
docker ps | grep localstack || echo "No LocalStack container found"
|
|
- name: Create S3 bucket for tests
|
|
run: |
|
|
awslocal s3 mb s3://$AWS_STACK_NAME || true
|
|
awslocal s3 ls
|
|
- run: yarn install --frozen-lockfile
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-image test
|
|
timeout-minutes: 10
|
|
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-hooks test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-local-persistence test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-local-persistence" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-locking-core test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-locking-core" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-locking-get-locked test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-locking-get-locked" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT_URL: http://localhost:4566
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-caching test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-caching" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-github-checks test
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-github-checks" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-s3-steps test (LocalStack S3 with local-docker)
|
|
timeout-minutes: 30
|
|
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
- name: Clean up disk space
|
|
run: |
|
|
rm -rf ./cloud-runner-cache/* || true
|
|
docker system prune -f || true
|
|
df -h
|
|
- name: Run cloud-runner-end2end-caching test (LocalStack S3 with local-docker)
|
|
timeout-minutes: 60
|
|
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
|
env:
|
|
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
PROJECT_PATH: test-project
|
|
TARGET_PLATFORM: StandaloneWindows64
|
|
cloudRunnerTests: true
|
|
versioning: None
|
|
PROVIDER_STRATEGY: local-docker
|
|
AWS_STACK_NAME: game-ci-team-pipelines
|
|
AWS_ACCESS_KEY_ID: test
|
|
AWS_SECRET_ACCESS_KEY: test
|
|
AWS_S3_ENDPOINT: http://localhost:4566
|
|
AWS_ENDPOINT: http://localhost:4566
|
|
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
|
INPUT_AWSENDPOINT: http://localhost:4566
|
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
|
AWS_EC2_METADATA_DISABLED: 'true'
|
|
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
# Commented out: Using LocalStack tests instead of real AWS
|
|
# aws:
|
|
# name: Cloud Runner Tests (AWS)
|
|
# runs-on: ubuntu-latest
|
|
# needs: [k8s, localstack]
|
|
# strategy:
|
|
# fail-fast: false
|
|
# matrix:
|
|
# test:
|
|
# - 'cloud-runner-end2end-caching'
|
|
# - 'cloud-runner-end2end-retaining'
|
|
# - 'cloud-runner-hooks'
|
|
# steps:
|
|
# - uses: actions/checkout@v4
|
|
# with:
|
|
# lfs: false
|
|
# - name: Configure AWS Credentials
|
|
# uses: aws-actions/configure-aws-credentials@v1
|
|
# with:
|
|
# aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
|
# aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
|
# aws-region: ${{ env.AWS_REGION }}
|
|
# - uses: actions/setup-node@v4
|
|
# with:
|
|
# node-version: 20
|
|
# cache: 'yarn'
|
|
# - run: yarn install --frozen-lockfile
|
|
# - run: yarn run test "${{ matrix.test }}" --detectOpenHandles --forceExit --runInBand
|
|
# timeout-minutes: 60
|
|
# env:
|
|
# UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
|
# UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
|
# UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
|
# PROJECT_PATH: test-project
|
|
# TARGET_PLATFORM: StandaloneWindows64
|
|
# cloudRunnerTests: true
|
|
# versioning: None
|
|
# PROVIDER_STRATEGY: aws
|
|
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
|
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
|
# GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
|
# GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|