pr feedback
parent
8824ea4f18
commit
35c6d45981
|
|
@ -728,6 +728,230 @@ jobs:
|
|||
AWS_ENDPOINT_URL: http://localhost:4566
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
localDocker:
|
||||
name: Cloud Runner Tests (Local Docker with LocalStack S3)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
lfs: false
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'yarn'
|
||||
- name: Clean up disk space before tests
|
||||
run: |
|
||||
# Clean up any leftover cache files from previous runs
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
# Clean up system caches and temporary files
|
||||
sudo apt-get clean || true
|
||||
docker system prune -f || true
|
||||
# Show available disk space
|
||||
df -h
|
||||
- name: Start LocalStack (S3)
|
||||
uses: localstack/setup-localstack@v0.2.4
|
||||
with:
|
||||
install-awslocal: true
|
||||
- name: Verify LocalStack is running
|
||||
run: |
|
||||
echo "Checking LocalStack status..."
|
||||
curl -s http://localhost:4566/_localstack/health | head -10 || echo "LocalStack health check failed"
|
||||
# Check if LocalStack container is running
|
||||
docker ps | grep localstack || echo "No LocalStack container found"
|
||||
- name: Create S3 bucket for tests
|
||||
run: |
|
||||
awslocal s3 mb s3://$AWS_STACK_NAME || true
|
||||
awslocal s3 ls
|
||||
- run: yarn install --frozen-lockfile
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-image test
|
||||
timeout-minutes: 10
|
||||
run: yarn run test "cloud-runner-image" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-hooks test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-hooks" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-local-persistence test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-local-persistence" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-locking-core test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-locking-core" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-locking-get-locked test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-locking-get-locked" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-caching test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-caching" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-github-checks test
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-github-checks" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-s3-steps test (LocalStack S3 with local-docker)
|
||||
timeout-minutes: 30
|
||||
run: yarn run test "cloud-runner-s3-steps" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
AWS_STACK_NAME: game-ci-team-pipelines
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: test
|
||||
AWS_S3_ENDPOINT: http://localhost:4566
|
||||
AWS_ENDPOINT: http://localhost:4566
|
||||
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
||||
INPUT_AWSENDPOINT: http://localhost:4566
|
||||
AWS_S3_FORCE_PATH_STYLE: 'true'
|
||||
AWS_EC2_METADATA_DISABLED: 'true'
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
- name: Clean up disk space
|
||||
run: |
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
docker system prune -f || true
|
||||
df -h
|
||||
- name: Run cloud-runner-end2end-caching test (LocalStack S3 with local-docker)
|
||||
timeout-minutes: 60
|
||||
run: yarn run test "cloud-runner-end2end-caching" --detectOpenHandles --forceExit --runInBand
|
||||
env:
|
||||
UNITY_EMAIL: ${{ secrets.UNITY_EMAIL }}
|
||||
UNITY_PASSWORD: ${{ secrets.UNITY_PASSWORD }}
|
||||
UNITY_SERIAL: ${{ secrets.UNITY_SERIAL }}
|
||||
PROJECT_PATH: test-project
|
||||
TARGET_PLATFORM: StandaloneWindows64
|
||||
cloudRunnerTests: true
|
||||
versioning: None
|
||||
PROVIDER_STRATEGY: local-docker
|
||||
AWS_STACK_NAME: game-ci-team-pipelines
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: test
|
||||
AWS_S3_ENDPOINT: http://localhost:4566
|
||||
AWS_ENDPOINT: http://localhost:4566
|
||||
INPUT_AWSS3ENDPOINT: http://localhost:4566
|
||||
INPUT_AWSENDPOINT: http://localhost:4566
|
||||
AWS_S3_FORCE_PATH_STYLE: 'true'
|
||||
AWS_EC2_METADATA_DISABLED: 'true'
|
||||
GIT_PRIVATE_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_PRIVATE_TOKEN }}
|
||||
# Commented out: Using LocalStack tests instead of real AWS
|
||||
# aws:
|
||||
# name: Cloud Runner Tests (AWS)
|
||||
|
|
|
|||
|
|
@ -4413,8 +4413,6 @@ class KubernetesTaskRunner {
|
|||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${cloud_runner_1.default.buildParameters.kubeVolumeSize}/${cloud_runner_1.default.buildParameters.containerCpu}/${cloud_runner_1.default.buildParameters.containerMemory}`);
|
||||
const isRunning = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
|
||||
let extraFlags = ``;
|
||||
extraFlags += isRunning ? ` -f -c ${containerName} -n ${namespace}` : ` --previous -n ${namespace}`;
|
||||
const callback = (outputChunk) => {
|
||||
output += outputChunk;
|
||||
// split output chunk and handle per line
|
||||
|
|
@ -4423,7 +4421,9 @@ class KubernetesTaskRunner {
|
|||
}
|
||||
};
|
||||
try {
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName}${extraFlags}`, false, true, callback);
|
||||
// Always specify container name explicitly to avoid containerd:// errors
|
||||
// Use -f for running pods, --previous for terminated pods
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} -n ${namespace}${isRunning ? ' -f' : ' --previous'}`, false, true, callback);
|
||||
}
|
||||
catch (error) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
|
|
@ -5976,17 +5976,37 @@ class RemoteClientLogger {
|
|||
if (cloud_runner_options_1.default.providerStrategy !== 'k8s') {
|
||||
return;
|
||||
}
|
||||
cloud_runner_logger_1.default.log(`Collected Logs`);
|
||||
const collectedLogsMessage = `Collected Logs`;
|
||||
// For K8s, write to stdout so kubectl logs can capture it
|
||||
// This is critical because kubectl logs reads from stdout/stderr, not from GitHub Actions logs
|
||||
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${collectedLogsMessage}\n`, 'utf8');
|
||||
process.stderr.write(`${collectedLogsMessage}\n`, 'utf8');
|
||||
}
|
||||
// Also log via CloudRunnerLogger for GitHub Actions
|
||||
cloud_runner_logger_1.default.log(collectedLogsMessage);
|
||||
// check for log file not existing
|
||||
if (!node_fs_1.default.existsSync(RemoteClientLogger.LogFilePath)) {
|
||||
cloud_runner_logger_1.default.log(`Log file does not exist`);
|
||||
const logFileMissingMessage = `Log file does not exist`;
|
||||
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${logFileMissingMessage}\n`, 'utf8');
|
||||
}
|
||||
cloud_runner_logger_1.default.log(logFileMissingMessage);
|
||||
// check if CloudRunner.isCloudRunnerEnvironment is true, log
|
||||
if (!cloud_runner_1.default.isCloudRunnerEnvironment) {
|
||||
cloud_runner_logger_1.default.log(`Cloud Runner is not running in a cloud environment, not collecting logs`);
|
||||
const notCloudEnvMessage = `Cloud Runner is not running in a cloud environment, not collecting logs`;
|
||||
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${notCloudEnvMessage}\n`, 'utf8');
|
||||
}
|
||||
cloud_runner_logger_1.default.log(notCloudEnvMessage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
cloud_runner_logger_1.default.log(`Log file exist`);
|
||||
const logFileExistsMessage = `Log file exist`;
|
||||
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${logFileExistsMessage}\n`, 'utf8');
|
||||
}
|
||||
cloud_runner_logger_1.default.log(logFileExistsMessage);
|
||||
await new Promise((resolve) => setTimeout(resolve, 1));
|
||||
// let hashedLogs = fs.readFileSync(RemoteClientLogger.LogFilePath).toString();
|
||||
//
|
||||
|
|
@ -7533,23 +7553,25 @@ echo "CACHE_KEY=$CACHE_KEY"`;
|
|||
if [ ! -f "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" ] && [ ! -f "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar.lz4" ]; then
|
||||
tar -cf "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" --files-from /dev/null || touch "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar"
|
||||
fi
|
||||
# Run post-build tasks and pipe output through log stream to capture "Activation successful"
|
||||
# Use set +e to allow the command to fail without exiting the script, then restore set -e behavior
|
||||
set +e
|
||||
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "Post-build command completed with warnings"
|
||||
set -e
|
||||
# Write end marker and pipe through log stream
|
||||
# Use set +e to prevent failure if builder path doesn't exist (builder might have been cleaned up)
|
||||
# Keep set +e for the rest of the script to prevent exit on error
|
||||
# Run post-build tasks and capture output
|
||||
# Note: Post-build may clean up the builder directory, so we write output directly to log file
|
||||
# Use set +e to allow the command to fail without exiting the script
|
||||
set +e
|
||||
# Run post-build and write output to both stdout (for K8s kubectl logs) and log file
|
||||
# For local-docker, stdout is captured by the log stream mechanism
|
||||
if [ -f "${builderPath}" ]; then
|
||||
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "end of cloud runner job" >> /home/job-log.txt
|
||||
echo "---${cloud_runner_1.default.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "---${cloud_runner_1.default.buildParameters.logId}" >> /home/job-log.txt
|
||||
# Use tee to write to both stdout and log file, ensuring output is captured
|
||||
# For K8s, kubectl logs reads from stdout, so we need stdout
|
||||
# For local-docker, the log file is read directly
|
||||
node ${builderPath} -m remote-cli-post-build 2>&1 | tee -a /home/job-log.txt || echo "Post-build command completed with warnings" | tee -a /home/job-log.txt
|
||||
else
|
||||
# Builder path doesn't exist, write directly to log file
|
||||
echo "end of cloud runner job" >> /home/job-log.txt
|
||||
echo "---${cloud_runner_1.default.buildParameters.logId}" >> /home/job-log.txt
|
||||
# Builder doesn't exist, skip post-build (shouldn't happen, but handle gracefully)
|
||||
echo "Builder path not found, skipping post-build" | tee -a /home/job-log.txt
|
||||
fi
|
||||
# Write end markers directly to log file (builder might be cleaned up by post-build)
|
||||
# Also write to stdout for K8s kubectl logs
|
||||
echo "end of cloud runner job" | tee -a /home/job-log.txt
|
||||
echo "---${cloud_runner_1.default.buildParameters.logId}" | tee -a /home/job-log.txt
|
||||
# Don't restore set -e - keep set +e to prevent script from exiting on error
|
||||
# This ensures the script completes successfully even if some operations fail
|
||||
# Mirror cache back into workspace for test assertions
|
||||
|
|
@ -7568,11 +7590,18 @@ echo "CACHE_KEY=$CACHE_KEY"`;
|
|||
chmod -R +x "/entrypoint.sh"
|
||||
chmod -R +x "/steps"
|
||||
{ echo "game ci start"; echo "game ci start" >> /home/job-log.txt; echo "CACHE_KEY=$CACHE_KEY"; echo "$CACHE_KEY"; if [ -n "$LOCKED_WORKSPACE" ]; then echo "Retained Workspace: true"; fi; if [ -n "$LOCKED_WORKSPACE" ] && [ -d "$GITHUB_WORKSPACE/.git" ]; then echo "Retained Workspace Already Exists!"; fi; /entrypoint.sh; } | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
|
||||
# Run post-build and ensure output is captured in logs
|
||||
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || true
|
||||
# Write end marker through log stream to ensure it's captured in BuildResults
|
||||
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
|
||||
echo "---${cloud_runner_1.default.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt`;
|
||||
# Run post-build and capture output to both stdout (for kubectl logs) and log file
|
||||
# Note: Post-build may clean up the builder directory, so write output directly
|
||||
set +e
|
||||
if [ -f "${builderPath}" ]; then
|
||||
# Use tee to write to both stdout and log file for K8s kubectl logs
|
||||
node ${builderPath} -m remote-cli-post-build 2>&1 | tee -a /home/job-log.txt || echo "Post-build command completed with warnings" | tee -a /home/job-log.txt
|
||||
else
|
||||
echo "Builder path not found, skipping post-build" | tee -a /home/job-log.txt
|
||||
fi
|
||||
# Write end markers to both stdout and log file (builder might be cleaned up by post-build)
|
||||
echo "end of cloud runner job" | tee -a /home/job-log.txt
|
||||
echo "---${cloud_runner_1.default.buildParameters.logId}" | tee -a /home/job-log.txt`;
|
||||
}
|
||||
// prettier-ignore
|
||||
return `
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -29,8 +29,6 @@ class KubernetesTaskRunner {
|
|||
`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${CloudRunner.buildParameters.kubeVolumeSize}/${CloudRunner.buildParameters.containerCpu}/${CloudRunner.buildParameters.containerMemory}`,
|
||||
);
|
||||
const isRunning = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
||||
let extraFlags = ``;
|
||||
extraFlags += isRunning ? ` -f -c ${containerName} -n ${namespace}` : ` --previous -n ${namespace}`;
|
||||
|
||||
const callback = (outputChunk: string) => {
|
||||
output += outputChunk;
|
||||
|
|
@ -46,7 +44,14 @@ class KubernetesTaskRunner {
|
|||
}
|
||||
};
|
||||
try {
|
||||
await CloudRunnerSystem.Run(`kubectl logs ${podName}${extraFlags}`, false, true, callback);
|
||||
// Always specify container name explicitly to avoid containerd:// errors
|
||||
// Use -f for running pods, --previous for terminated pods
|
||||
await CloudRunnerSystem.Run(
|
||||
`kubectl logs ${podName} -c ${containerName} -n ${namespace}${isRunning ? ' -f' : ' --previous'}`,
|
||||
false,
|
||||
true,
|
||||
callback,
|
||||
);
|
||||
} catch (error: any) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
const continueStreaming = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
||||
|
|
|
|||
|
|
@ -48,20 +48,42 @@ export class RemoteClientLogger {
|
|||
if (CloudRunnerOptions.providerStrategy !== 'k8s') {
|
||||
return;
|
||||
}
|
||||
CloudRunnerLogger.log(`Collected Logs`);
|
||||
const collectedLogsMessage = `Collected Logs`;
|
||||
|
||||
// For K8s, write to stdout so kubectl logs can capture it
|
||||
// This is critical because kubectl logs reads from stdout/stderr, not from GitHub Actions logs
|
||||
if (CloudRunnerOptions.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${collectedLogsMessage}\n`, 'utf8');
|
||||
process.stderr.write(`${collectedLogsMessage}\n`, 'utf8');
|
||||
}
|
||||
|
||||
// Also log via CloudRunnerLogger for GitHub Actions
|
||||
CloudRunnerLogger.log(collectedLogsMessage);
|
||||
|
||||
// check for log file not existing
|
||||
if (!fs.existsSync(RemoteClientLogger.LogFilePath)) {
|
||||
CloudRunnerLogger.log(`Log file does not exist`);
|
||||
const logFileMissingMessage = `Log file does not exist`;
|
||||
if (CloudRunnerOptions.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${logFileMissingMessage}\n`, 'utf8');
|
||||
}
|
||||
CloudRunnerLogger.log(logFileMissingMessage);
|
||||
|
||||
// check if CloudRunner.isCloudRunnerEnvironment is true, log
|
||||
if (!CloudRunner.isCloudRunnerEnvironment) {
|
||||
CloudRunnerLogger.log(`Cloud Runner is not running in a cloud environment, not collecting logs`);
|
||||
const notCloudEnvMessage = `Cloud Runner is not running in a cloud environment, not collecting logs`;
|
||||
if (CloudRunnerOptions.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${notCloudEnvMessage}\n`, 'utf8');
|
||||
}
|
||||
CloudRunnerLogger.log(notCloudEnvMessage);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
CloudRunnerLogger.log(`Log file exist`);
|
||||
const logFileExistsMessage = `Log file exist`;
|
||||
if (CloudRunnerOptions.providerStrategy === 'k8s') {
|
||||
process.stdout.write(`${logFileExistsMessage}\n`, 'utf8');
|
||||
}
|
||||
CloudRunnerLogger.log(logFileExistsMessage);
|
||||
await new Promise((resolve) => setTimeout(resolve, 1));
|
||||
|
||||
// let hashedLogs = fs.readFileSync(RemoteClientLogger.LogFilePath).toString();
|
||||
|
|
|
|||
|
|
@ -177,23 +177,25 @@ echo "CACHE_KEY=$CACHE_KEY"`;
|
|||
if [ ! -f "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" ] && [ ! -f "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar.lz4" ]; then
|
||||
tar -cf "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" --files-from /dev/null || touch "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar"
|
||||
fi
|
||||
# Run post-build tasks and pipe output through log stream to capture "Activation successful"
|
||||
# Use set +e to allow the command to fail without exiting the script, then restore set -e behavior
|
||||
set +e
|
||||
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "Post-build command completed with warnings"
|
||||
set -e
|
||||
# Write end marker and pipe through log stream
|
||||
# Use set +e to prevent failure if builder path doesn't exist (builder might have been cleaned up)
|
||||
# Keep set +e for the rest of the script to prevent exit on error
|
||||
# Run post-build tasks and capture output
|
||||
# Note: Post-build may clean up the builder directory, so we write output directly to log file
|
||||
# Use set +e to allow the command to fail without exiting the script
|
||||
set +e
|
||||
# Run post-build and write output to both stdout (for K8s kubectl logs) and log file
|
||||
# For local-docker, stdout is captured by the log stream mechanism
|
||||
if [ -f "${builderPath}" ]; then
|
||||
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "end of cloud runner job" >> /home/job-log.txt
|
||||
echo "---${CloudRunner.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "---${CloudRunner.buildParameters.logId}" >> /home/job-log.txt
|
||||
# Use tee to write to both stdout and log file, ensuring output is captured
|
||||
# For K8s, kubectl logs reads from stdout, so we need stdout
|
||||
# For local-docker, the log file is read directly
|
||||
node ${builderPath} -m remote-cli-post-build 2>&1 | tee -a /home/job-log.txt || echo "Post-build command completed with warnings" | tee -a /home/job-log.txt
|
||||
else
|
||||
# Builder path doesn't exist, write directly to log file
|
||||
echo "end of cloud runner job" >> /home/job-log.txt
|
||||
echo "---${CloudRunner.buildParameters.logId}" >> /home/job-log.txt
|
||||
# Builder doesn't exist, skip post-build (shouldn't happen, but handle gracefully)
|
||||
echo "Builder path not found, skipping post-build" | tee -a /home/job-log.txt
|
||||
fi
|
||||
# Write end markers directly to log file (builder might be cleaned up by post-build)
|
||||
# Also write to stdout for K8s kubectl logs
|
||||
echo "end of cloud runner job" | tee -a /home/job-log.txt
|
||||
echo "---${CloudRunner.buildParameters.logId}" | tee -a /home/job-log.txt
|
||||
# Don't restore set -e - keep set +e to prevent script from exiting on error
|
||||
# This ensures the script completes successfully even if some operations fail
|
||||
# Mirror cache back into workspace for test assertions
|
||||
|
|
@ -213,11 +215,18 @@ echo "CACHE_KEY=$CACHE_KEY"`;
|
|||
chmod -R +x "/entrypoint.sh"
|
||||
chmod -R +x "/steps"
|
||||
{ echo "game ci start"; echo "game ci start" >> /home/job-log.txt; echo "CACHE_KEY=$CACHE_KEY"; echo "$CACHE_KEY"; if [ -n "$LOCKED_WORKSPACE" ]; then echo "Retained Workspace: true"; fi; if [ -n "$LOCKED_WORKSPACE" ] && [ -d "$GITHUB_WORKSPACE/.git" ]; then echo "Retained Workspace Already Exists!"; fi; /entrypoint.sh; } | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
|
||||
# Run post-build and ensure output is captured in logs
|
||||
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || true
|
||||
# Write end marker through log stream to ensure it's captured in BuildResults
|
||||
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
|
||||
echo "---${CloudRunner.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt`;
|
||||
# Run post-build and capture output to both stdout (for kubectl logs) and log file
|
||||
# Note: Post-build may clean up the builder directory, so write output directly
|
||||
set +e
|
||||
if [ -f "${builderPath}" ]; then
|
||||
# Use tee to write to both stdout and log file for K8s kubectl logs
|
||||
node ${builderPath} -m remote-cli-post-build 2>&1 | tee -a /home/job-log.txt || echo "Post-build command completed with warnings" | tee -a /home/job-log.txt
|
||||
else
|
||||
echo "Builder path not found, skipping post-build" | tee -a /home/job-log.txt
|
||||
fi
|
||||
# Write end markers to both stdout and log file (builder might be cleaned up by post-build)
|
||||
echo "end of cloud runner job" | tee -a /home/job-log.txt
|
||||
echo "---${CloudRunner.buildParameters.logId}" | tee -a /home/job-log.txt`;
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
|
|
|
|||
Loading…
Reference in New Issue