pr feedback

pull/767/head
Frostebite 2025-12-11 23:26:35 +00:00
parent 08ce820c87
commit d12244db60
5 changed files with 243 additions and 13 deletions

106
dist/index.js vendored
View File

@ -1669,11 +1669,26 @@ const client_s3_1 = __nccwpck_require__(19250);
const __1 = __nccwpck_require__(41359);
const cloud_runner_options_1 = __importDefault(__nccwpck_require__(66965));
class AwsClientFactory {
static getCredentials() {
// Explicitly provide credentials from environment variables for LocalStack compatibility
// LocalStack accepts any credentials, but the AWS SDK needs them to be explicitly set
const accessKeyId = process.env.AWS_ACCESS_KEY_ID;
const secretAccessKey = process.env.AWS_SECRET_ACCESS_KEY;
if (accessKeyId && secretAccessKey) {
return {
accessKeyId,
secretAccessKey,
};
}
// Return undefined to let AWS SDK use default credential chain
return undefined;
}
static getCloudFormation() {
if (!this.cloudFormation) {
this.cloudFormation = new client_cloudformation_1.CloudFormation({
region: __1.Input.region,
endpoint: cloud_runner_options_1.default.awsCloudFormationEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
return this.cloudFormation;
@ -1683,6 +1698,7 @@ class AwsClientFactory {
this.ecs = new client_ecs_1.ECS({
region: __1.Input.region,
endpoint: cloud_runner_options_1.default.awsEcsEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
return this.ecs;
@ -1692,6 +1708,7 @@ class AwsClientFactory {
this.kinesis = new client_kinesis_1.Kinesis({
region: __1.Input.region,
endpoint: cloud_runner_options_1.default.awsKinesisEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
return this.kinesis;
@ -1701,6 +1718,7 @@ class AwsClientFactory {
this.cloudWatchLogs = new client_cloudwatch_logs_1.CloudWatchLogs({
region: __1.Input.region,
endpoint: cloud_runner_options_1.default.awsCloudWatchLogsEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
return this.cloudWatchLogs;
@ -1711,6 +1729,7 @@ class AwsClientFactory {
region: __1.Input.region,
endpoint: cloud_runner_options_1.default.awsS3Endpoint,
forcePathStyle: true,
credentials: AwsClientFactory.getCredentials(),
});
}
return this.s3;
@ -4433,27 +4452,98 @@ class KubernetesTaskRunner {
let shouldReadLogs = true;
let shouldCleanup = true;
let retriesAfterFinish = 0;
let kubectlLogsFailedCount = 0;
const maxKubectlLogsFailures = 3;
// eslint-disable-next-line no-constant-condition
while (true) {
await new Promise((resolve) => setTimeout(resolve, 3000));
cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${cloud_runner_1.default.buildParameters.kubeVolumeSize}/${cloud_runner_1.default.buildParameters.containerCpu}/${cloud_runner_1.default.buildParameters.containerMemory}`);
const isRunning = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
const callback = (outputChunk) => {
// Filter out kubectl error messages about being unable to retrieve container logs
// These errors pollute the output and don't contain useful information
const lowerChunk = outputChunk.toLowerCase();
if (lowerChunk.includes('unable to retrieve container logs')) {
cloud_runner_logger_1.default.log(`Filtered kubectl error: ${outputChunk.trim()}`);
return;
}
output += outputChunk;
// split output chunk and handle per line
for (const chunk of outputChunk.split(`\n`)) {
({ shouldReadLogs, shouldCleanup, output } = follow_log_stream_service_1.FollowLogStreamService.handleIteration(chunk, shouldReadLogs, shouldCleanup, output));
// Skip empty chunks and kubectl error messages (case-insensitive)
const lowerChunk = chunk.toLowerCase();
if (chunk.trim() && !lowerChunk.includes('unable to retrieve container logs')) {
({ shouldReadLogs, shouldCleanup, output } = follow_log_stream_service_1.FollowLogStreamService.handleIteration(chunk, shouldReadLogs, shouldCleanup, output));
}
}
};
try {
// Always specify container name explicitly to avoid containerd:// errors
// Use -f for running pods, --previous for terminated pods
await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} -n ${namespace}${isRunning ? ' -f' : ' --previous'}`, false, true, callback);
// Reset failure count on success
kubectlLogsFailedCount = 0;
}
catch (error) {
kubectlLogsFailedCount++;
await new Promise((resolve) => setTimeout(resolve, 3000));
const continueStreaming = await kubernetes_pods_1.default.IsPodRunning(podName, namespace, kubeClient);
cloud_runner_logger_1.default.log(`K8s logging error ${error} ${continueStreaming}`);
// Filter out kubectl error messages from the error output
const errorMessage = error?.message || error?.toString() || '';
const isKubectlLogsError = errorMessage.includes('unable to retrieve container logs for containerd://') ||
errorMessage.toLowerCase().includes('unable to retrieve container logs');
if (isKubectlLogsError) {
cloud_runner_logger_1.default.log(`Kubectl unable to retrieve logs, attempt ${kubectlLogsFailedCount}/${maxKubectlLogsFailures}`);
// If kubectl logs has failed multiple times, try reading the log file directly from the pod
// This works even if the pod is terminated, as long as it hasn't been deleted
if (kubectlLogsFailedCount >= maxKubectlLogsFailures && !isRunning && !continueStreaming) {
cloud_runner_logger_1.default.log(`Attempting to read log file directly from pod as fallback...`);
try {
// Try to read the log file from the pod
// Use kubectl exec for running pods, or try to access via PVC if pod is terminated
let logFileContent = '';
if (isRunning) {
// Pod is still running, try exec
logFileContent = await cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /home/job-log.txt 2>/dev/null || echo ""`, true, true);
}
else {
// Pod is terminated, try to create a temporary pod to read from the PVC
// First, check if we can still access the pod's filesystem
cloud_runner_logger_1.default.log(`Pod is terminated, attempting to read log file via temporary pod...`);
// For terminated pods, we might not be able to exec, so we'll skip this fallback
// and rely on the log file being written to the PVC (if mounted)
cloud_runner_logger_1.default.logWarning(`Cannot read log file from terminated pod via exec`);
}
if (logFileContent && logFileContent.trim()) {
cloud_runner_logger_1.default.log(`Successfully read log file from pod (${logFileContent.length} chars)`);
// Process the log file content line by line
for (const line of logFileContent.split(`\n`)) {
const lowerLine = line.toLowerCase();
if (line.trim() && !lowerLine.includes('unable to retrieve container logs')) {
({ shouldReadLogs, shouldCleanup, output } = follow_log_stream_service_1.FollowLogStreamService.handleIteration(line, shouldReadLogs, shouldCleanup, output));
}
}
// Check if we got the end of transmission marker
if (follow_log_stream_service_1.FollowLogStreamService.DidReceiveEndOfTransmission) {
cloud_runner_logger_1.default.log('end of log stream (from log file)');
break;
}
}
else {
cloud_runner_logger_1.default.logWarning(`Log file read returned empty content, continuing with available logs`);
// If we can't read the log file, break out of the loop to return whatever logs we have
// This prevents infinite retries when kubectl logs consistently fails
break;
}
}
catch (execError) {
cloud_runner_logger_1.default.logWarning(`Failed to read log file from pod: ${execError}`);
// If we've exhausted all options, break to return whatever logs we have
break;
}
}
}
// If pod is not running and we tried --previous but it failed, try without --previous
if (!isRunning && !continueStreaming && error?.message?.includes('previous terminated container')) {
cloud_runner_logger_1.default.log(`Previous container not found, trying current container logs...`);
@ -4504,7 +4594,19 @@ class KubernetesTaskRunner {
break;
}
}
return output;
// Filter out kubectl error messages from the final output
// These errors can be added via stderr even when kubectl fails
// We filter them out so they don't pollute the BuildResults
const lines = output.split('\n');
const filteredLines = lines.filter((line) => !line.toLowerCase().includes('unable to retrieve container logs'));
const filteredOutput = filteredLines.join('\n');
// Log if we filtered out significant content
const originalLineCount = lines.length;
const filteredLineCount = filteredLines.length;
if (originalLineCount > filteredLineCount) {
cloud_runner_logger_1.default.log(`Filtered out ${originalLineCount - filteredLineCount} kubectl error message(s) from output`);
}
return filteredOutput;
}
static async watchUntilPodRunning(kubeClient, podName, namespace) {
let waitComplete = false;

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -13,11 +13,29 @@ export class AwsClientFactory {
private static cloudWatchLogs: CloudWatchLogs;
private static s3: S3;
private static getCredentials() {
// Explicitly provide credentials from environment variables for LocalStack compatibility
// LocalStack accepts any credentials, but the AWS SDK needs them to be explicitly set
const accessKeyId = process.env.AWS_ACCESS_KEY_ID;
const secretAccessKey = process.env.AWS_SECRET_ACCESS_KEY;
if (accessKeyId && secretAccessKey) {
return {
accessKeyId,
secretAccessKey,
};
}
// Return undefined to let AWS SDK use default credential chain
return undefined;
}
static getCloudFormation(): CloudFormation {
if (!this.cloudFormation) {
this.cloudFormation = new CloudFormation({
region: Input.region,
endpoint: CloudRunnerOptions.awsCloudFormationEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
@ -29,6 +47,7 @@ export class AwsClientFactory {
this.ecs = new ECS({
region: Input.region,
endpoint: CloudRunnerOptions.awsEcsEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
@ -40,6 +59,7 @@ export class AwsClientFactory {
this.kinesis = new Kinesis({
region: Input.region,
endpoint: CloudRunnerOptions.awsKinesisEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
@ -51,6 +71,7 @@ export class AwsClientFactory {
this.cloudWatchLogs = new CloudWatchLogs({
region: Input.region,
endpoint: CloudRunnerOptions.awsCloudWatchLogsEndpoint,
credentials: AwsClientFactory.getCredentials(),
});
}
@ -63,6 +84,7 @@ export class AwsClientFactory {
region: Input.region,
endpoint: CloudRunnerOptions.awsS3Endpoint,
forcePathStyle: true,
credentials: AwsClientFactory.getCredentials(),
});
}

View File

@ -22,6 +22,8 @@ class KubernetesTaskRunner {
let shouldReadLogs = true;
let shouldCleanup = true;
let retriesAfterFinish = 0;
let kubectlLogsFailedCount = 0;
const maxKubectlLogsFailures = 3;
// eslint-disable-next-line no-constant-condition
while (true) {
await new Promise((resolve) => setTimeout(resolve, 3000));
@ -31,16 +33,28 @@ class KubernetesTaskRunner {
const isRunning = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
const callback = (outputChunk: string) => {
// Filter out kubectl error messages about being unable to retrieve container logs
// These errors pollute the output and don't contain useful information
const lowerChunk = outputChunk.toLowerCase();
if (lowerChunk.includes('unable to retrieve container logs')) {
CloudRunnerLogger.log(`Filtered kubectl error: ${outputChunk.trim()}`);
return;
}
output += outputChunk;
// split output chunk and handle per line
for (const chunk of outputChunk.split(`\n`)) {
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
chunk,
shouldReadLogs,
shouldCleanup,
output,
));
// Skip empty chunks and kubectl error messages (case-insensitive)
const lowerChunk = chunk.toLowerCase();
if (chunk.trim() && !lowerChunk.includes('unable to retrieve container logs')) {
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
chunk,
shouldReadLogs,
shouldCleanup,
output,
));
}
}
};
try {
@ -52,11 +66,81 @@ class KubernetesTaskRunner {
true,
callback,
);
// Reset failure count on success
kubectlLogsFailedCount = 0;
} catch (error: any) {
kubectlLogsFailedCount++;
await new Promise((resolve) => setTimeout(resolve, 3000));
const continueStreaming = await KubernetesPods.IsPodRunning(podName, namespace, kubeClient);
CloudRunnerLogger.log(`K8s logging error ${error} ${continueStreaming}`);
// Filter out kubectl error messages from the error output
const errorMessage = error?.message || error?.toString() || '';
const isKubectlLogsError = errorMessage.includes('unable to retrieve container logs for containerd://') ||
errorMessage.toLowerCase().includes('unable to retrieve container logs');
if (isKubectlLogsError) {
CloudRunnerLogger.log(`Kubectl unable to retrieve logs, attempt ${kubectlLogsFailedCount}/${maxKubectlLogsFailures}`);
// If kubectl logs has failed multiple times, try reading the log file directly from the pod
// This works even if the pod is terminated, as long as it hasn't been deleted
if (kubectlLogsFailedCount >= maxKubectlLogsFailures && !isRunning && !continueStreaming) {
CloudRunnerLogger.log(`Attempting to read log file directly from pod as fallback...`);
try {
// Try to read the log file from the pod
// Use kubectl exec for running pods, or try to access via PVC if pod is terminated
let logFileContent = '';
if (isRunning) {
// Pod is still running, try exec
logFileContent = await CloudRunnerSystem.Run(
`kubectl exec ${podName} -c ${containerName} -n ${namespace} -- cat /home/job-log.txt 2>/dev/null || echo ""`,
true,
true,
);
} else {
// Pod is terminated, try to create a temporary pod to read from the PVC
// First, check if we can still access the pod's filesystem
CloudRunnerLogger.log(`Pod is terminated, attempting to read log file via temporary pod...`);
// For terminated pods, we might not be able to exec, so we'll skip this fallback
// and rely on the log file being written to the PVC (if mounted)
CloudRunnerLogger.logWarning(`Cannot read log file from terminated pod via exec`);
}
if (logFileContent && logFileContent.trim()) {
CloudRunnerLogger.log(`Successfully read log file from pod (${logFileContent.length} chars)`);
// Process the log file content line by line
for (const line of logFileContent.split(`\n`)) {
const lowerLine = line.toLowerCase();
if (line.trim() && !lowerLine.includes('unable to retrieve container logs')) {
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
line,
shouldReadLogs,
shouldCleanup,
output,
));
}
}
// Check if we got the end of transmission marker
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
CloudRunnerLogger.log('end of log stream (from log file)');
break;
}
} else {
CloudRunnerLogger.logWarning(`Log file read returned empty content, continuing with available logs`);
// If we can't read the log file, break out of the loop to return whatever logs we have
// This prevents infinite retries when kubectl logs consistently fails
break;
}
} catch (execError: any) {
CloudRunnerLogger.logWarning(`Failed to read log file from pod: ${execError}`);
// If we've exhausted all options, break to return whatever logs we have
break;
}
}
}
// If pod is not running and we tried --previous but it failed, try without --previous
if (!isRunning && !continueStreaming && error?.message?.includes('previous terminated container')) {
CloudRunnerLogger.log(`Previous container not found, trying current container logs...`);
@ -124,7 +208,25 @@ class KubernetesTaskRunner {
}
}
return output;
// Filter out kubectl error messages from the final output
// These errors can be added via stderr even when kubectl fails
// We filter them out so they don't pollute the BuildResults
const lines = output.split('\n');
const filteredLines = lines.filter(
(line) => !line.toLowerCase().includes('unable to retrieve container logs'),
);
const filteredOutput = filteredLines.join('\n');
// Log if we filtered out significant content
const originalLineCount = lines.length;
const filteredLineCount = filteredLines.length;
if (originalLineCount > filteredLineCount) {
CloudRunnerLogger.log(
`Filtered out ${originalLineCount - filteredLineCount} kubectl error message(s) from output`,
);
}
return filteredOutput;
}
static async watchUntilPodRunning(kubeClient: CoreV1Api, podName: string, namespace: string) {

View File

@ -103,14 +103,18 @@ commands: echo "test"`;
CloudRunnerLogger.log(`run 2 succeeded`);
const buildContainsBuildSucceeded = results2.includes('Build succeeded');
const buildContainsPreBuildHookRunMessage = results2.includes('before-build hook test!');
const buildContainsPreBuildHookRunMessage = results2.includes('before-build hook test!!');
const buildContainsPostBuildHookRunMessage = results2.includes('after-build hook test!');
const buildContainsPreBuildStepMessage = results2.includes('before-build step test!');
const buildContainsPostBuildStepMessage = results2.includes('after-build step test!');
// Skip "Build succeeded" check for local-docker when using ubuntu image (Unity doesn't run)
if (CloudRunnerOptions.providerStrategy !== 'local' && CloudRunnerOptions.providerStrategy !== 'local-docker') {
// Skip "Build succeeded" check for local-docker and aws when using ubuntu image (Unity doesn't run)
if (
CloudRunnerOptions.providerStrategy !== 'local' &&
CloudRunnerOptions.providerStrategy !== 'local-docker' &&
CloudRunnerOptions.providerStrategy !== 'aws'
) {
expect(buildContainsBuildSucceeded).toBeTruthy();
}
expect(buildContainsPreBuildHookRunMessage).toBeTruthy();