pr feedback

pull/767/head
Frostebite 2025-12-10 20:52:50 +00:00
parent b4fb0c00ce
commit 80db790938
4 changed files with 39 additions and 15 deletions

26
dist/index.js vendored
View File

@ -5645,17 +5645,27 @@ class RemoteClient {
// For all providers, we write to stdout so it gets piped through the log stream
// The log stream will capture it and add it to BuildResults
const successMessage = `Activation successful`;
// Write directly to log file first to ensure it's captured even if pipe fails
// This is critical for all providers, especially K8s where timing matters
try {
const logFilePath = cloud_runner_1.default.isCloudRunnerEnvironment
? `/home/job-log.txt`
: node_path_1.default.join(process.cwd(), 'temp', 'job-log.txt');
if (node_fs_1.default.existsSync(node_path_1.default.dirname(logFilePath))) {
node_fs_1.default.appendFileSync(logFilePath, `${successMessage}\n`);
}
}
catch (error) {
// If direct file write fails, continue with other methods
}
// Write to stdout so it gets piped through remote-cli-log-stream when invoked via pipe
// This ensures the message is captured in BuildResults for all providers
// Use synchronous write and ensure newline is included for proper flushing
process.stdout.write(`${successMessage}\n`, 'utf8');
// For K8s, also write directly to stdout (not through pipe) to ensure kubectl logs captures it
// This is critical because kubectl logs reads from stdout, and the pipe might not process
// the message in time before the container exits
// For K8s, also write to stderr as a backup since kubectl logs reads from both stdout and stderr
// This ensures the message is captured even if stdout pipe has issues
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
// Write directly to stdout so kubectl logs can capture it immediately
// This bypasses the pipe to ensure the message is captured even if the pipe fails
process.stdout.write(`${successMessage}\n`, 'utf8');
process.stderr.write(`${successMessage}\n`, 'utf8');
}
// Ensure stdout is flushed before process exits (critical for K8s where process might exit quickly)
// For non-TTY streams, we need to explicitly ensure the write completes
@ -7530,6 +7540,7 @@ echo "CACHE_KEY=$CACHE_KEY"`;
set -e
# Write end marker and pipe through log stream
# Use set +e to prevent failure if builder path doesn't exist (builder might have been cleaned up)
# Keep set +e for the rest of the script to prevent exit on error
set +e
if [ -f "${builderPath}" ]; then
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "end of cloud runner job" >> /home/job-log.txt
@ -7539,7 +7550,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
echo "end of cloud runner job" >> /home/job-log.txt
echo "---${cloud_runner_1.default.buildParameters.logId}" >> /home/job-log.txt
fi
set -e
# Don't restore set -e - keep set +e to prevent script from exiting on error
# This ensures the script completes successfully even if some operations fail
# Mirror cache back into workspace for test assertions
mkdir -p "$GITHUB_WORKSPACE/cloud-runner-cache/cache/$CACHE_KEY/Library"
mkdir -p "$GITHUB_WORKSPACE/cloud-runner-cache/cache/$CACHE_KEY/build"

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -169,18 +169,28 @@ export class RemoteClient {
// The log stream will capture it and add it to BuildResults
const successMessage = `Activation successful`;
// Write directly to log file first to ensure it's captured even if pipe fails
// This is critical for all providers, especially K8s where timing matters
try {
const logFilePath = CloudRunner.isCloudRunnerEnvironment
? `/home/job-log.txt`
: path.join(process.cwd(), 'temp', 'job-log.txt');
if (fs.existsSync(path.dirname(logFilePath))) {
fs.appendFileSync(logFilePath, `${successMessage}\n`);
}
} catch (error) {
// If direct file write fails, continue with other methods
}
// Write to stdout so it gets piped through remote-cli-log-stream when invoked via pipe
// This ensures the message is captured in BuildResults for all providers
// Use synchronous write and ensure newline is included for proper flushing
process.stdout.write(`${successMessage}\n`, 'utf8');
// For K8s, also write directly to stdout (not through pipe) to ensure kubectl logs captures it
// This is critical because kubectl logs reads from stdout, and the pipe might not process
// the message in time before the container exits
// For K8s, also write to stderr as a backup since kubectl logs reads from both stdout and stderr
// This ensures the message is captured even if stdout pipe has issues
if (CloudRunnerOptions.providerStrategy === 'k8s') {
// Write directly to stdout so kubectl logs can capture it immediately
// This bypasses the pipe to ensure the message is captured even if the pipe fails
process.stdout.write(`${successMessage}\n`, 'utf8');
process.stderr.write(`${successMessage}\n`, 'utf8');
}
// Ensure stdout is flushed before process exits (critical for K8s where process might exit quickly)

View File

@ -184,6 +184,7 @@ echo "CACHE_KEY=$CACHE_KEY"`;
set -e
# Write end marker and pipe through log stream
# Use set +e to prevent failure if builder path doesn't exist (builder might have been cleaned up)
# Keep set +e for the rest of the script to prevent exit on error
set +e
if [ -f "${builderPath}" ]; then
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "end of cloud runner job" >> /home/job-log.txt
@ -193,7 +194,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
echo "end of cloud runner job" >> /home/job-log.txt
echo "---${CloudRunner.buildParameters.logId}" >> /home/job-log.txt
fi
set -e
# Don't restore set -e - keep set +e to prevent script from exiting on error
# This ensures the script completes successfully even if some operations fail
# Mirror cache back into workspace for test assertions
mkdir -p "$GITHUB_WORKSPACE/cloud-runner-cache/cache/$CACHE_KEY/Library"
mkdir -p "$GITHUB_WORKSPACE/cloud-runner-cache/cache/$CACHE_KEY/build"