pr feedback
parent
f61478ba77
commit
192cb2e14e
|
|
@ -4414,9 +4414,7 @@ class KubernetesTaskRunner {
|
||||||
cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${cloud_runner_1.default.buildParameters.kubeVolumeSize}/${cloud_runner_1.default.buildParameters.containerCpu}/${cloud_runner_1.default.buildParameters.containerMemory}`);
|
cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${cloud_runner_1.default.buildParameters.kubeVolumeSize}/${cloud_runner_1.default.buildParameters.containerCpu}/${cloud_runner_1.default.buildParameters.containerMemory}`);
|
||||||
const isRunning = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
|
const isRunning = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
|
||||||
let extraFlags = ``;
|
let extraFlags = ``;
|
||||||
extraFlags += isRunning
|
extraFlags += isRunning ? ` -f -c ${containerName} -n ${namespace}` : ` --previous -n ${namespace}`;
|
||||||
? ` -f -c ${containerName} -n ${namespace}`
|
|
||||||
: ` --previous -n ${namespace}`;
|
|
||||||
const callback = (outputChunk) => {
|
const callback = (outputChunk) => {
|
||||||
output += outputChunk;
|
output += outputChunk;
|
||||||
// split output chunk and handle per line
|
// split output chunk and handle per line
|
||||||
|
|
@ -4436,10 +4434,29 @@ class KubernetesTaskRunner {
|
||||||
cloud_runner_logger_1.default.log(`Previous container not found, trying current container logs...`);
|
cloud_runner_logger_1.default.log(`Previous container not found, trying current container logs...`);
|
||||||
try {
|
try {
|
||||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} -n ${namespace}`, false, true, callback);
|
await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} -n ${namespace}`, false, true, callback);
|
||||||
|
// If we successfully got logs, check for end of transmission
|
||||||
|
if (follow_log_stream_service_1.FollowLogStreamService.DidReceiveEndOfTransmission) {
|
||||||
|
cloud_runner_logger_1.default.log('end of log stream');
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// If we got logs but no end marker, continue trying (might be more logs)
|
||||||
|
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
||||||
|
retriesAfterFinish++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// If we've exhausted retries, break
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
catch (fallbackError) {
|
catch (fallbackError) {
|
||||||
cloud_runner_logger_1.default.log(`Fallback log fetch also failed: ${fallbackError}`);
|
cloud_runner_logger_1.default.log(`Fallback log fetch also failed: ${fallbackError}`);
|
||||||
// If both fail, continue - we'll get what we can from pod status
|
// If both fail, continue retrying if we haven't exhausted retries
|
||||||
|
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
||||||
|
retriesAfterFinish++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Only break if we've exhausted all retries
|
||||||
|
cloud_runner_logger_1.default.logWarning(`Could not fetch any container logs after ${KubernetesTaskRunner.maxRetry} retries`);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (continueStreaming) {
|
if (continueStreaming) {
|
||||||
|
|
@ -4449,13 +4466,14 @@ class KubernetesTaskRunner {
|
||||||
retriesAfterFinish++;
|
retriesAfterFinish++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Don't throw if we're just missing previous container logs - this is non-fatal
|
// If we've exhausted retries and it's not a previous container issue, throw
|
||||||
if (error?.message?.includes('previous terminated container')) {
|
if (!error?.message?.includes('previous terminated container')) {
|
||||||
cloud_runner_logger_1.default.logWarning(`Could not fetch previous container logs, but continuing...`);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
|
// For previous container errors, we've already tried fallback, so just break
|
||||||
|
cloud_runner_logger_1.default.logWarning(`Could not fetch previous container logs after retries, but continuing with available logs`);
|
||||||
|
break;
|
||||||
|
}
|
||||||
if (follow_log_stream_service_1.FollowLogStreamService.DidReceiveEndOfTransmission) {
|
if (follow_log_stream_service_1.FollowLogStreamService.DidReceiveEndOfTransmission) {
|
||||||
cloud_runner_logger_1.default.log('end of log stream');
|
cloud_runner_logger_1.default.log('end of log stream');
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -30,9 +30,7 @@ class KubernetesTaskRunner {
|
||||||
);
|
);
|
||||||
const isRunning = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
const isRunning = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
||||||
let extraFlags = ``;
|
let extraFlags = ``;
|
||||||
extraFlags += isRunning
|
extraFlags += isRunning ? ` -f -c ${containerName} -n ${namespace}` : ` --previous -n ${namespace}`;
|
||||||
? ` -f -c ${containerName} -n ${namespace}`
|
|
||||||
: ` --previous -n ${namespace}`;
|
|
||||||
|
|
||||||
const callback = (outputChunk: string) => {
|
const callback = (outputChunk: string) => {
|
||||||
output += outputChunk;
|
output += outputChunk;
|
||||||
|
|
@ -53,31 +51,62 @@ class KubernetesTaskRunner {
|
||||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||||
const continueStreaming = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
const continueStreaming = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
|
||||||
CloudRunnerLogger.log(`K8s logging error ${error} ${continueStreaming}`);
|
CloudRunnerLogger.log(`K8s logging error ${error} ${continueStreaming}`);
|
||||||
|
|
||||||
// If pod is not running and we tried --previous but it failed, try without --previous
|
// If pod is not running and we tried --previous but it failed, try without --previous
|
||||||
if (!isRunning && !continueStreaming && error?.message?.includes('previous terminated container')) {
|
if (!isRunning && !continueStreaming && error?.message?.includes('previous terminated container')) {
|
||||||
CloudRunnerLogger.log(`Previous container not found, trying current container logs...`);
|
CloudRunnerLogger.log(`Previous container not found, trying current container logs...`);
|
||||||
try {
|
try {
|
||||||
await CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} -n ${namespace}`, false, true, callback);
|
await CloudRunnerSystem.Run(
|
||||||
|
`kubectl logs ${podName} -c ${containerName} -n ${namespace}`,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
callback,
|
||||||
|
);
|
||||||
|
// If we successfully got logs, check for end of transmission
|
||||||
|
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
|
||||||
|
CloudRunnerLogger.log('end of log stream');
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// If we got logs but no end marker, continue trying (might be more logs)
|
||||||
|
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
||||||
|
retriesAfterFinish++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// If we've exhausted retries, break
|
||||||
|
break;
|
||||||
} catch (fallbackError: any) {
|
} catch (fallbackError: any) {
|
||||||
CloudRunnerLogger.log(`Fallback log fetch also failed: ${fallbackError}`);
|
CloudRunnerLogger.log(`Fallback log fetch also failed: ${fallbackError}`);
|
||||||
// If both fail, continue - we'll get what we can from pod status
|
// If both fail, continue retrying if we haven't exhausted retries
|
||||||
|
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
||||||
|
retriesAfterFinish++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Only break if we've exhausted all retries
|
||||||
|
CloudRunnerLogger.logWarning(
|
||||||
|
`Could not fetch any container logs after ${KubernetesTaskRunner.maxRetry} retries`,
|
||||||
|
);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (continueStreaming) {
|
if (continueStreaming) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
|
||||||
retriesAfterFinish++;
|
retriesAfterFinish++;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Don't throw if we're just missing previous container logs - this is non-fatal
|
|
||||||
if (error?.message?.includes('previous terminated container')) {
|
// If we've exhausted retries and it's not a previous container issue, throw
|
||||||
CloudRunnerLogger.logWarning(`Could not fetch previous container logs, but continuing...`);
|
if (!error?.message?.includes('previous terminated container')) {
|
||||||
break;
|
|
||||||
}
|
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
|
// For previous container errors, we've already tried fallback, so just break
|
||||||
|
CloudRunnerLogger.logWarning(
|
||||||
|
`Could not fetch previous container logs after retries, but continuing with available logs`,
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
|
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
|
||||||
CloudRunnerLogger.log('end of log stream');
|
CloudRunnerLogger.log('end of log stream');
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue