pr feedback

pull/767/head
Frostebite 2025-12-27 15:44:43 +00:00
parent eee8b4cbd1
commit 71f48ceff4
4 changed files with 40 additions and 15 deletions

25
dist/index.js vendored
View File

@ -4599,29 +4599,40 @@ class KubernetesTaskRunner {
// This ensures all log messages are included in BuildResults for test assertions
// If output is empty, we need to be more aggressive about getting logs
const needsFallback = output.trim().length === 0;
const missingCollectedLogs = !output.includes('Collected Logs');
if (needsFallback) {
cloud_runner_logger_1.default.log('Output is empty, attempting aggressive log collection fallback...');
// Give the pod a moment to finish writing logs before we try to read them
await new Promise((resolve) => setTimeout(resolve, 5000));
}
// Always try fallback if output is empty, or if pod is terminated (to capture post-build messages)
// Always try fallback if output is empty, if pod is terminated, or if "Collected Logs" is missing
// The "Collected Logs" check ensures we try to get post-build messages even if we have some output
try {
const isPodStillRunning = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
const shouldTryFallback = !isPodStillRunning || needsFallback;
const shouldTryFallback = !isPodStillRunning || needsFallback || missingCollectedLogs;
if (shouldTryFallback) {
cloud_runner_logger_1.default.log(`Pod is ${isPodStillRunning ? 'running' : 'terminated'} and output is ${needsFallback ? 'empty' : 'not empty'}, reading log file as fallback...`);
const reason = needsFallback
? 'output is empty'
: missingCollectedLogs
? 'Collected Logs missing from output'
: 'pod is terminated';
cloud_runner_logger_1.default.log(`Pod is ${isPodStillRunning ? 'running' : 'terminated'} and ${reason}, reading log file as fallback...`);
try {
// Try to read the log file from the pod
// For killed pods (OOM), kubectl exec might not work, so we try multiple approaches
// First try --previous flag for terminated containers, then try without it
let logFileContent = '';
// Try multiple approaches to get the log file
// Order matters: try terminated container first, then current, then kubectl logs as last resort
// Order matters: try terminated container first, then current, then PVC, then kubectl logs as last resort
// For K8s, the PVC is mounted at /data, so try reading from there too
const attempts = [
// For terminated pods, try --previous first
`kubectl exec ${podName} -c ${containerName} -n ${namespace} --previous -- cat /home/job-log.txt 2>/dev/null || echo ""`,
// Try current container
`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /home/job-log.txt 2>/dev/null || echo ""`,
// Try reading from PVC (/data) in case log was copied there
`kubectl exec ${podName} -c ${containerName} -n ${namespace} --previous -- cat /data/job-log.txt 2>/dev/null || echo ""`,
`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /data/job-log.txt 2>/dev/null || echo ""`,
// Try kubectl logs as fallback (might capture stdout even if exec fails)
`kubectl logs ${podName} -c ${containerName} -n ${namespace} --previous 2>/dev/null || echo ""`,
`kubectl logs ${podName} -c ${containerName} -n ${namespace} 2>/dev/null || echo ""`,
@ -8038,7 +8049,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
fi
# Write "Collected Logs" message for K8s (needed for test assertions)
# Write to both stdout and log file to ensure it's captured even if kubectl has issues
echo "Collected Logs" | tee -a /home/job-log.txt
# Also write to PVC (/data) as backup in case pod is OOM-killed and ephemeral filesystem is lost
echo "Collected Logs" | tee -a /home/job-log.txt /data/job-log.txt 2>/dev/null || echo "Collected Logs" | tee -a /home/job-log.txt
# Write end markers directly to log file (builder might be cleaned up by post-build)
# Also write to stdout for K8s kubectl logs
echo "end of cloud runner job" | tee -a /home/job-log.txt
@ -8072,7 +8084,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
fi
# Write "Collected Logs" message for K8s (needed for test assertions)
# Write to both stdout and log file to ensure it's captured even if kubectl has issues
echo "Collected Logs" | tee -a /home/job-log.txt
# Also write to PVC (/data) as backup in case pod is OOM-killed and ephemeral filesystem is lost
echo "Collected Logs" | tee -a /home/job-log.txt /data/job-log.txt 2>/dev/null || echo "Collected Logs" | tee -a /home/job-log.txt
# Write end markers to both stdout and log file (builder might be cleaned up by post-build)
echo "end of cloud runner job" | tee -a /home/job-log.txt
echo "---${cloud_runner_1.default.buildParameters.logId}" | tee -a /home/job-log.txt`;

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -216,22 +216,28 @@ class KubernetesTaskRunner {
// This ensures all log messages are included in BuildResults for test assertions
// If output is empty, we need to be more aggressive about getting logs
const needsFallback = output.trim().length === 0;
const missingCollectedLogs = !output.includes('Collected Logs');
if (needsFallback) {
CloudRunnerLogger.log('Output is empty, attempting aggressive log collection fallback...');
// Give the pod a moment to finish writing logs before we try to read them
await new Promise((resolve) => setTimeout(resolve, 5000));
}
// Always try fallback if output is empty, or if pod is terminated (to capture post-build messages)
// Always try fallback if output is empty, if pod is terminated, or if "Collected Logs" is missing
// The "Collected Logs" check ensures we try to get post-build messages even if we have some output
try {
const isPodStillRunning = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
const shouldTryFallback = !isPodStillRunning || needsFallback;
const shouldTryFallback = !isPodStillRunning || needsFallback || missingCollectedLogs;
if (shouldTryFallback) {
const reason = needsFallback
? 'output is empty'
: missingCollectedLogs
? 'Collected Logs missing from output'
: 'pod is terminated';
CloudRunnerLogger.log(
`Pod is ${isPodStillRunning ? 'running' : 'terminated'} and output is ${
needsFallback ? 'empty' : 'not empty'
}, reading log file as fallback...`,
`Pod is ${isPodStillRunning ? 'running' : 'terminated'} and ${reason}, reading log file as fallback...`,
);
try {
// Try to read the log file from the pod
@ -240,12 +246,16 @@ class KubernetesTaskRunner {
let logFileContent = '';
// Try multiple approaches to get the log file
// Order matters: try terminated container first, then current, then kubectl logs as last resort
// Order matters: try terminated container first, then current, then PVC, then kubectl logs as last resort
// For K8s, the PVC is mounted at /data, so try reading from there too
const attempts = [
// For terminated pods, try --previous first
`kubectl exec ${podName} -c ${containerName} -n ${namespace} --previous -- cat /home/job-log.txt 2>/dev/null || echo ""`,
// Try current container
`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /home/job-log.txt 2>/dev/null || echo ""`,
// Try reading from PVC (/data) in case log was copied there
`kubectl exec ${podName} -c ${containerName} -n ${namespace} --previous -- cat /data/job-log.txt 2>/dev/null || echo ""`,
`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /data/job-log.txt 2>/dev/null || echo ""`,
// Try kubectl logs as fallback (might capture stdout even if exec fails)
`kubectl logs ${podName} -c ${containerName} -n ${namespace} --previous 2>/dev/null || echo ""`,
`kubectl logs ${podName} -c ${containerName} -n ${namespace} 2>/dev/null || echo ""`,

View File

@ -194,7 +194,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
fi
# Write "Collected Logs" message for K8s (needed for test assertions)
# Write to both stdout and log file to ensure it's captured even if kubectl has issues
echo "Collected Logs" | tee -a /home/job-log.txt
# Also write to PVC (/data) as backup in case pod is OOM-killed and ephemeral filesystem is lost
echo "Collected Logs" | tee -a /home/job-log.txt /data/job-log.txt 2>/dev/null || echo "Collected Logs" | tee -a /home/job-log.txt
# Write end markers directly to log file (builder might be cleaned up by post-build)
# Also write to stdout for K8s kubectl logs
echo "end of cloud runner job" | tee -a /home/job-log.txt
@ -229,7 +230,8 @@ echo "CACHE_KEY=$CACHE_KEY"`;
fi
# Write "Collected Logs" message for K8s (needed for test assertions)
# Write to both stdout and log file to ensure it's captured even if kubectl has issues
echo "Collected Logs" | tee -a /home/job-log.txt
# Also write to PVC (/data) as backup in case pod is OOM-killed and ephemeral filesystem is lost
echo "Collected Logs" | tee -a /home/job-log.txt /data/job-log.txt 2>/dev/null || echo "Collected Logs" | tee -a /home/job-log.txt
# Write end markers to both stdout and log file (builder might be cleaned up by post-build)
echo "end of cloud runner job" | tee -a /home/job-log.txt
echo "---${CloudRunner.buildParameters.logId}" | tee -a /home/job-log.txt`;