pr feedback

pull/767/head
Frostebite 2025-12-06 23:00:43 +00:00
parent c61c9f8373
commit a99defafbc
18 changed files with 190 additions and 152 deletions

125
dist/index.js vendored
View File

@ -3378,7 +3378,7 @@ class TaskService {
Bucket: cloud_runner_1.default.buildParameters.awsStackName,
};
const results = await s3.send(new client_s3_1.ListObjectsV2Command(listRequest));
return (results.Contents || []).map((obj) => ({ Key: obj.Key || '' }));
return (results.Contents || []).map((object) => ({ Key: object.Key || '' }));
}
}
exports.TaskService = TaskService;
@ -3825,7 +3825,7 @@ const command_hook_service_1 = __nccwpck_require__(96159);
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
class KubernetesJobSpecFactory {
static getJobSpec(command, image, mountdir, workingDirectory, environment, secrets, buildGuid, buildParameters, secretName, pvcName, jobName, k8s, containerName, ip = '') {
const endpointEnvNames = new Set([
const endpointEnvironmentNames = new Set([
'AWS_S3_ENDPOINT',
'AWS_ENDPOINT',
'AWS_CLOUD_FORMATION_ENDPOINT',
@ -3838,7 +3838,7 @@ class KubernetesJobSpecFactory {
const adjustedEnvironment = environment.map((x) => {
let value = x.value;
if (typeof value === 'string' &&
endpointEnvNames.has(x.name) &&
endpointEnvironmentNames.has(x.name) &&
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))) {
// Replace localhost with host.k3d.internal so pods can access host services
// This simulates accessing external services (like real AWS S3)
@ -3979,17 +3979,16 @@ class KubernetesPods {
type: x.type || '',
}));
const errorDetails = [];
errorDetails.push(`Pod: ${podName}`);
errorDetails.push(`Phase: ${phase}`);
errorDetails.push(`Pod: ${podName}`, `Phase: ${phase}`);
if (conditions.length > 0) {
errorDetails.push(`Conditions: ${JSON.stringify(conditions.map((c) => ({ type: c.type, status: c.status, reason: c.reason, message: c.message })), undefined, 2)}`);
}
let containerExitCode;
let containerSucceeded = false;
if (containerStatuses.length > 0) {
containerStatuses.forEach((cs, idx) => {
for (const [index, cs] of containerStatuses.entries()) {
if (cs.state?.waiting) {
errorDetails.push(`Container ${idx} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
errorDetails.push(`Container ${index} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
}
if (cs.state?.terminated) {
const exitCode = cs.state.terminated.exitCode;
@ -3997,9 +3996,9 @@ class KubernetesPods {
if (exitCode === 0) {
containerSucceeded = true;
}
errorDetails.push(`Container ${idx} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${exitCode})`);
errorDetails.push(`Container ${index} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${exitCode})`);
}
});
}
}
if (events.length > 0) {
errorDetails.push(`Recent events: ${JSON.stringify(events.slice(-5), undefined, 2)}`);
@ -4027,7 +4026,7 @@ class KubernetesPods {
if (wasKilled && hasPreStopHookFailure && (containerExitCode === undefined || !containerSucceeded)) {
cloud_runner_logger_1.default.log(`Pod ${podName} was killed with PreStopHook failure. Waiting for container status to determine if container succeeded...`);
// Wait a bit for container status to become available (up to 30 seconds)
for (let i = 0; i < 6; i++) {
for (let index = 0; index < 6; index++) {
await new Promise((resolve) => setTimeout(resolve, 5000));
try {
const updatedPod = (await kubeClient.listNamespacedPod(namespace)).body.items.find((x) => podName === x.metadata?.name);
@ -5306,10 +5305,10 @@ class Caching {
// Parse disk usage percentage (e.g., "72G 72G 196M 100%")
const usageMatch = diskCheckOutput.match(/(\d+)%/);
if (usageMatch) {
diskUsagePercent = parseInt(usageMatch[1], 10);
diskUsagePercent = Number.parseInt(usageMatch[1], 10);
}
}
catch (error) {
catch {
// Ignore disk check errors
}
// If disk usage is high (>90%), proactively clean up old cache files
@ -5335,7 +5334,7 @@ class Caching {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`rm -f ${cacheArtifactName}.tar${compressionSuffix} 2>/dev/null || true`);
}
catch (error) {
catch {
// Ignore cleanup errors
}
try {
@ -5529,6 +5528,10 @@ class RemoteClient {
const logFile = cli_1.Cli.options['logFile'];
process.stdin.resume();
process.stdin.setEncoding('utf8');
// For K8s, ensure stdout is unbuffered so messages are captured immediately
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
process.stdout.setDefaultEncoding('utf8');
}
let lingeringLine = '';
process.stdin.on('data', (chunk) => {
const lines = chunk.toString().split('\n');
@ -5538,7 +5541,8 @@ class RemoteClient {
// For K8s, write to both log file and stdout so kubectl logs can capture it
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
node_fs_1.default.appendFileSync(logFile, element);
// Write to stdout so kubectl logs can capture it
// Write to stdout so kubectl logs can capture it - ensure newline is included
// Stdout flushes automatically on newline, so no explicit flush needed
process.stdout.write(`${element}\n`);
cloud_runner_logger_1.default.log(element);
}
@ -5551,6 +5555,7 @@ class RemoteClient {
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
if (lingeringLine) {
node_fs_1.default.appendFileSync(logFile, lingeringLine);
// Stdout flushes automatically on newline
process.stdout.write(`${lingeringLine}\n`);
}
cloud_runner_logger_1.default.log(lingeringLine);
@ -5569,7 +5574,13 @@ class RemoteClient {
try {
const libraryFolderHost = cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute;
if (node_fs_1.default.existsSync(libraryFolderHost)) {
const libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost).catch(() => []);
let libraryEntries = [];
try {
libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost);
}
catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute), `lib-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
@ -5588,7 +5599,13 @@ class RemoteClient {
try {
const buildFolderHost = cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute;
if (node_fs_1.default.existsSync(buildFolderHost)) {
const buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost).catch(() => []);
let buildEntries = [];
try {
buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost);
}
catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute), `build-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
@ -5619,6 +5636,7 @@ class RemoteClient {
// For K8s, kubectl logs reads from stdout/stderr, so we must write to stdout
const successMessage = `Activation successful`;
// Write to stdout first so kubectl logs can capture it
// Stdout flushes automatically on newline
process.stdout.write(`${successMessage}\n`);
// Also log via CloudRunnerLogger for GitHub Actions
cloud_runner_logger_1.default.log(successMessage);
@ -5706,22 +5724,22 @@ class RemoteClient {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetSha}`);
}
catch (_error) {
catch {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git fetch origin ${targetSha} || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetSha}`);
}
catch (_error2) {
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Falling back to branch checkout; SHA not found: ${targetSha}`);
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetBranch}`);
}
catch (_error3) {
catch {
if ((targetBranch || '').startsWith('pull/')) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout origin/${targetBranch}`);
}
else {
throw _error2;
throw error;
}
}
}
@ -5771,7 +5789,7 @@ class RemoteClient {
remote_client_logger_1.RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
}
catch (_error) {
catch {
/* no-op: best-effort git lfs pull without tokens may fail */
void 0;
}
@ -5835,17 +5853,17 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git reset --hard "${sha}"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${sha}`);
}
catch (_error) {
catch {
remote_client_logger_1.RemoteClientLogger.logWarning(`Retained workspace: SHA not found, falling back to branch ${branch}`);
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${branch}`);
}
catch (_error2) {
catch (error) {
if ((branch || '').startsWith('pull/')) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout origin/${branch}`);
}
else {
throw _error2;
throw error;
}
}
}
@ -6278,11 +6296,11 @@ class SharedWorkspaceLocking {
}
catch {
const region = input_1.default.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION || 'us-east-1';
const createParams = { Bucket: bucket };
const createParameters = { Bucket: bucket };
if (region && region !== 'us-east-1') {
createParams.CreateBucketConfiguration = { LocationConstraint: region };
createParameters.CreateBucketConfiguration = { LocationConstraint: region };
}
await SharedWorkspaceLocking.s3.send(new client_s3_1.CreateBucketCommand(createParams));
await SharedWorkspaceLocking.s3.send(new client_s3_1.CreateBucketCommand(createParameters));
}
}
static async listObjects(prefix, bucket = SharedWorkspaceLocking.bucket) {
@ -6453,12 +6471,9 @@ class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) })));
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
cloud_runner_logger_1.default.log(`All workspaces ${workspaces}`);
if (!(await SharedWorkspaceLocking.IsWorkspaceBelowMax(workspace, buildParametersContext))) {
@ -6473,23 +6488,17 @@ class SharedWorkspaceLocking {
const ending = existingWorkspace ? workspace : `${workspace}_workspace`;
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) })));
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
cloud_runner_1.default.lockedWorkspace = workspace;
}
else {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key })));
}
return hasLock;
}
@ -6501,15 +6510,12 @@ class SharedWorkspaceLocking {
cloud_runner_logger_1.default.log(`Deleting lock ${workspace}/${file}`);
cloud_runner_logger_1.default.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
}
})));
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
}
@ -6517,12 +6523,9 @@ class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` })));
}
}
static async ReadLines(command) {
@ -6610,7 +6613,7 @@ class TaskParameterSerializer {
return TaskParameterSerializer.serializeFromType(cloud_runner_options_1.default);
}
static serializeAwsEnvironmentVariables() {
const awsEnvVars = [
const awsEnvironmentVariables = [
'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY',
'AWS_DEFAULT_REGION',
@ -6622,7 +6625,7 @@ class TaskParameterSerializer {
'AWS_KINESIS_ENDPOINT',
'AWS_CLOUD_WATCH_LOGS_ENDPOINT',
];
return awsEnvVars
return awsEnvironmentVariables
.filter((key) => process.env[key] !== undefined)
.map((key) => ({
name: key,
@ -7398,7 +7401,7 @@ class BuildAutomationWorkflow {
BRANCH="${cloud_runner_1.default.buildParameters.cloudRunnerBranch}"
REPO="${cloud_runner_folders_1.CloudRunnerFolders.unityBuilderRepoUrl}"
DEST="${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)}"
if [ -n "$(git ls-remote --heads \"$REPO\" \"$BRANCH\" 2>/dev/null)" ]; then
if [ -n "$(git ls-remote --heads "$REPO" "$BRANCH" 2>/dev/null)" ]; then
git clone -q -b "$BRANCH" "$REPO" "$DEST"
else
echo "Remote branch $BRANCH not found in $REPO; falling back to a known branch"

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -209,6 +209,7 @@ class AWSTaskRunner {
const sleepMs = baseBackoffMs + jitterMs;
CloudRunnerLogger.log(`AWS throttled GetRecords, backing off ${sleepMs}ms (1000 + jitter ${jitterMs})`);
await new Promise((r) => setTimeout(r, sleepMs));
return { iterator, shouldReadLogs, output, shouldCleanup };
}
throw error;

View File

@ -204,6 +204,7 @@ export class TaskService {
const objects = await (
SharedWorkspaceLocking as unknown as { listObjects(prefix: string): Promise<string[]> }
).listObjects('');
return objects.map((x: string) => ({ Key: x }));
}
const s3 = AwsClientFactory.getS3();
@ -213,6 +214,6 @@ export class TaskService {
const results = await s3.send(new ListObjectsV2Command(listRequest));
return (results.Contents || []).map((obj) => ({ Key: obj.Key || '' }));
return (results.Contents || []).map((object) => ({ Key: object.Key || '' }));
}
}

View File

@ -22,7 +22,7 @@ class KubernetesJobSpecFactory {
containerName: string,
ip: string = '',
) {
const endpointEnvNames = new Set([
const endpointEnvironmentNames = new Set([
'AWS_S3_ENDPOINT',
'AWS_ENDPOINT',
'AWS_CLOUD_FORMATION_ENDPOINT',
@ -36,7 +36,7 @@ class KubernetesJobSpecFactory {
let value = x.value;
if (
typeof value === 'string' &&
endpointEnvNames.has(x.name) &&
endpointEnvironmentNames.has(x.name) &&
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))
) {
// Replace localhost with host.k3d.internal so pods can access host services
@ -45,6 +45,7 @@ class KubernetesJobSpecFactory {
.replace('http://localhost', 'http://host.k3d.internal')
.replace('http://127.0.0.1', 'http://host.k3d.internal');
}
return { name: x.name, value } as CloudRunnerEnvironmentVariable;
});

View File

@ -19,8 +19,7 @@ class KubernetesPods {
}));
const errorDetails: string[] = [];
errorDetails.push(`Pod: ${podName}`);
errorDetails.push(`Phase: ${phase}`);
errorDetails.push(`Pod: ${podName}`, `Phase: ${phase}`);
if (conditions.length > 0) {
errorDetails.push(
@ -36,10 +35,10 @@ class KubernetesPods {
let containerSucceeded = false;
if (containerStatuses.length > 0) {
containerStatuses.forEach((cs, idx) => {
for (const [index, cs] of containerStatuses.entries()) {
if (cs.state?.waiting) {
errorDetails.push(
`Container ${idx} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`,
`Container ${index} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`,
);
}
if (cs.state?.terminated) {
@ -49,12 +48,12 @@ class KubernetesPods {
containerSucceeded = true;
}
errorDetails.push(
`Container ${idx} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${
`Container ${index} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${
cs.state.terminated.message || ''
} (exit code: ${exitCode})`,
);
}
});
}
}
if (events.length > 0) {
@ -80,6 +79,7 @@ class KubernetesPods {
);
}
CloudRunnerLogger.log(`Pod details: ${errorDetails.join('\n')}`);
// Don't throw error - container succeeded, PreStopHook failure is non-critical
return false; // Pod is not running, but we don't treat it as a failure
}
@ -90,8 +90,9 @@ class KubernetesPods {
CloudRunnerLogger.log(
`Pod ${podName} was killed with PreStopHook failure. Waiting for container status to determine if container succeeded...`,
);
// Wait a bit for container status to become available (up to 30 seconds)
for (let i = 0; i < 6; i++) {
for (let index = 0; index < 6; index++) {
await new Promise((resolve) => setTimeout(resolve, 5000));
try {
const updatedPod = (await kubeClient.listNamespacedPod(namespace)).body.items.find(
@ -105,6 +106,7 @@ class KubernetesPods {
CloudRunnerLogger.logWarning(
`Pod ${podName} container succeeded (exit code 0) after waiting. PreStopHook failure is non-fatal.`,
);
return false; // Pod is not running, but container succeeded
} else {
CloudRunnerLogger.log(
@ -121,12 +123,14 @@ class KubernetesPods {
CloudRunnerLogger.log(`Error while waiting for container status: ${waitError}`);
}
}
// If we still don't have container status after waiting, but only PreStopHook failed,
// be lenient - the container might have succeeded but status wasn't updated
if (containerExitCode === undefined && hasPreStopHookFailure && !hasExceededGracePeriod) {
CloudRunnerLogger.logWarning(
`Pod ${podName} container status not available after waiting, but only PreStopHook failed (no ExceededGracePeriod). Assuming container may have succeeded.`,
);
return false; // Be lenient - PreStopHook failure alone is not fatal
}
CloudRunnerLogger.log(
@ -139,6 +143,7 @@ class KubernetesPods {
CloudRunnerLogger.logWarning(
`Pod ${podName} has PreStopHook failure but no container failure detected. Treating as non-fatal.`,
);
return false; // PreStopHook failure alone is not fatal if container status is unclear
}
@ -149,8 +154,10 @@ class KubernetesPods {
CloudRunnerLogger.logWarning(
`Pod ${podName} was killed (exit code 137 - likely OOM or resource limit) with PreStopHook/grace period issues. This may be a resource constraint issue rather than a build failure.`,
);
// Still log the details but don't fail the test - the build might have succeeded before being killed
CloudRunnerLogger.log(`Pod details: ${errorDetails.join('\n')}`);
return false; // Don't treat system kills as test failures if only PreStopHook issues
}

View File

@ -62,25 +62,30 @@ class KubernetesTaskRunner {
true,
callback,
);
// If we successfully got logs, check for end of transmission
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
CloudRunnerLogger.log('end of log stream');
break;
}
// If we got logs but no end marker, continue trying (might be more logs)
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
retriesAfterFinish++;
continue;
}
// If we've exhausted retries, break
break;
} catch (fallbackError: any) {
CloudRunnerLogger.log(`Fallback log fetch also failed: ${fallbackError}`);
// If both fail, continue retrying if we haven't exhausted retries
if (retriesAfterFinish < KubernetesTaskRunner.maxRetry) {
retriesAfterFinish++;
continue;
}
// Only break if we've exhausted all retries
CloudRunnerLogger.logWarning(
`Could not fetch any container logs after ${KubernetesTaskRunner.maxRetry} retries`,
@ -101,6 +106,7 @@ class KubernetesTaskRunner {
if (!error?.message?.includes('previous terminated container')) {
throw error;
}
// For previous container errors, we've already tried fallback, so just break
CloudRunnerLogger.logWarning(
`Could not fetch previous container logs after retries, but continuing with available logs`,

View File

@ -74,8 +74,10 @@ class LocalCloudRunner implements ProviderInterface {
.split('\n')
.filter((x) => x.trim().length > 0)
.join(' ; ');
// Use shell-quote to properly escape the command string, preventing command injection
const bashWrapped = `bash -lc ${quote([inline])}`;
return await CloudRunnerSystem.Run(bashWrapped);
}

View File

@ -84,12 +84,13 @@ export class Caching {
try {
const diskCheckOutput = await CloudRunnerSystem.Run(`df . 2>/dev/null || df /data 2>/dev/null || true`);
CloudRunnerLogger.log(`Disk space before tar: ${diskCheckOutput}`);
// Parse disk usage percentage (e.g., "72G 72G 196M 100%")
const usageMatch = diskCheckOutput.match(/(\d+)%/);
if (usageMatch) {
diskUsagePercent = parseInt(usageMatch[1], 10);
diskUsagePercent = Number.parseInt(usageMatch[1], 10);
}
} catch (error) {
} catch {
// Ignore disk check errors
}
@ -103,6 +104,7 @@ export class Caching {
await CloudRunnerSystem.Run(
`find ${cacheParent} -name "*.tar*" -type f -mmin +360 -delete 2>/dev/null || true`,
);
// Also try to remove old cache directories
await CloudRunnerSystem.Run(`find ${cacheParent} -type d -empty -delete 2>/dev/null || true`);
CloudRunnerLogger.log(`Cleanup completed. Checking disk space again...`);
@ -117,7 +119,7 @@ export class Caching {
// Clean up any existing incomplete tar files
try {
await CloudRunnerSystem.Run(`rm -f ${cacheArtifactName}.tar${compressionSuffix} 2>/dev/null || true`);
} catch (error) {
} catch {
// Ignore cleanup errors
}
@ -130,6 +132,7 @@ export class Caching {
const errorMessage = error?.message || error?.toString() || '';
if (errorMessage.includes('No space left') || errorMessage.includes('Wrote only')) {
CloudRunnerLogger.log(`Disk space error detected. Attempting aggressive cleanup...`);
// Try to clean up old cache files more aggressively
try {
const cacheParent = path.dirname(cacheFolder);
@ -138,8 +141,10 @@ export class Caching {
await CloudRunnerSystem.Run(
`find ${cacheParent} -name "*.tar*" -type f -mmin +60 -delete 2>/dev/null || true`,
);
// Remove empty cache directories
await CloudRunnerSystem.Run(`find ${cacheParent} -type d -empty -delete 2>/dev/null || true`);
// Also try to clean up the entire cache folder if it's getting too large
const cacheRoot = path.resolve(cacheParent, '..');
if (await fileExists(cacheRoot)) {
@ -149,12 +154,14 @@ export class Caching {
);
}
CloudRunnerLogger.log(`Aggressive cleanup completed. Retrying tar operation...`);
// Retry the tar operation once after cleanup
let retrySucceeded = false;
try {
await CloudRunnerSystem.Run(
`tar -cf ${cacheArtifactName}.tar${compressionSuffix} "${path.basename(sourceFolder)}"`,
);
// If retry succeeds, mark it - we'll continue normally without throwing
retrySucceeded = true;
} catch (retryError: any) {
@ -164,10 +171,12 @@ export class Caching {
}`,
);
}
// If retry succeeded, don't throw the original error - let execution continue after catch block
if (!retrySucceeded) {
throw error;
}
// If we get here, retry succeeded - execution will continue after the catch block
} else {
throw new Error(
@ -189,6 +198,7 @@ export class Caching {
await CloudRunnerSystem.Run(`du ${cacheArtifactName}.tar${compressionSuffix}`);
assert(await fileExists(`${cacheArtifactName}.tar${compressionSuffix}`), 'cache archive exists');
assert(await fileExists(path.basename(sourceFolder)), 'source folder exists');
// Ensure the cache folder directory exists before moving the file
// (it might have been deleted by cleanup if it was empty)
if (!(await fileExists(cacheFolder))) {

View File

@ -31,7 +31,7 @@ export class RemoteClient {
const logFile = Cli.options!['logFile'];
process.stdin.resume();
process.stdin.setEncoding('utf8');
// For K8s, ensure stdout is unbuffered so messages are captured immediately
if (CloudRunnerOptions.providerStrategy === 'k8s') {
process.stdout.setDefaultEncoding('utf8');
@ -49,12 +49,10 @@ export class RemoteClient {
// For K8s, write to both log file and stdout so kubectl logs can capture it
if (CloudRunnerOptions.providerStrategy === 'k8s') {
fs.appendFileSync(logFile, element);
// Write to stdout so kubectl logs can capture it - ensure newline is included
// Stdout flushes automatically on newline, so no explicit flush needed
process.stdout.write(`${element}\n`);
// Force flush if possible
if (typeof process.stdout.flush === 'function') {
process.stdout.flush();
}
CloudRunnerLogger.log(element);
} else {
CloudRunnerLogger.log(element);
@ -66,10 +64,9 @@ export class RemoteClient {
if (CloudRunnerOptions.providerStrategy === 'k8s') {
if (lingeringLine) {
fs.appendFileSync(logFile, lingeringLine);
// Stdout flushes automatically on newline
process.stdout.write(`${lingeringLine}\n`);
if (typeof process.stdout.flush === 'function') {
process.stdout.flush();
}
}
CloudRunnerLogger.log(lingeringLine);
} else {
@ -81,6 +78,7 @@ export class RemoteClient {
@CliFunction(`remote-cli-post-build`, `runs a cloud runner build`)
public static async remoteClientPostBuild(): Promise<string> {
RemoteClientLogger.log(`Running POST build tasks`);
// Ensure cache key is present in logs for assertions
RemoteClientLogger.log(`CACHE_KEY=${CloudRunner.buildParameters.cacheKey}`);
CloudRunnerLogger.log(`${CloudRunner.buildParameters.cacheKey}`);
@ -89,7 +87,12 @@ export class RemoteClient {
try {
const libraryFolderHost = CloudRunnerFolders.libraryFolderAbsolute;
if (fs.existsSync(libraryFolderHost)) {
const libraryEntries = await fs.promises.readdir(libraryFolderHost).catch(() => [] as string[]);
let libraryEntries: string[] = [];
try {
libraryEntries = await fs.promises.readdir(libraryFolderHost);
} catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`),
@ -110,7 +113,12 @@ export class RemoteClient {
try {
const buildFolderHost = CloudRunnerFolders.projectBuildFolderAbsolute;
if (fs.existsSync(buildFolderHost)) {
const buildEntries = await fs.promises.readdir(buildFolderHost).catch(() => [] as string[]);
let buildEntries: string[] = [];
try {
buildEntries = await fs.promises.readdir(buildFolderHost);
} catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`),
@ -145,14 +153,12 @@ export class RemoteClient {
// Ensure success marker is present in logs for tests
// For K8s, kubectl logs reads from stdout/stderr, so we must write to stdout
// Also ensure it's flushed immediately
const successMessage = `Activation successful`;
// Write to stdout first so kubectl logs can capture it
// Stdout flushes automatically on newline
process.stdout.write(`${successMessage}\n`);
// Force flush stdout to ensure it's captured
if (process.stdout.isTTY === false) {
process.stdout.write(''); // Trigger flush
}
// Also log via CloudRunnerLogger for GitHub Actions
CloudRunnerLogger.log(successMessage);
@ -262,6 +268,7 @@ export class RemoteClient {
await CloudRunnerSystem.Run(`git lfs install`);
assert(fs.existsSync(`.git`), 'git folder exists');
RemoteClientLogger.log(`${CloudRunner.buildParameters.branch}`);
// Ensure refs exist (tags and PR refs)
await CloudRunnerSystem.Run(`git fetch --all --tags || true`);
if ((CloudRunner.buildParameters.branch || '').startsWith('pull/')) {
@ -272,19 +279,19 @@ export class RemoteClient {
if (targetSha) {
try {
await CloudRunnerSystem.Run(`git checkout ${targetSha}`);
} catch (_error) {
} catch {
try {
await CloudRunnerSystem.Run(`git fetch origin ${targetSha} || true`);
await CloudRunnerSystem.Run(`git checkout ${targetSha}`);
} catch (_error2) {
} catch (error) {
RemoteClientLogger.logWarning(`Falling back to branch checkout; SHA not found: ${targetSha}`);
try {
await CloudRunnerSystem.Run(`git checkout ${targetBranch}`);
} catch (_error3) {
} catch {
if ((targetBranch || '').startsWith('pull/')) {
await CloudRunnerSystem.Run(`git checkout origin/${targetBranch}`);
} else {
throw _error2;
throw error;
}
}
}
@ -336,7 +343,7 @@ export class RemoteClient {
RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
} catch (_error) {
} catch {
/* no-op: best-effort git lfs pull without tokens may fail */
void 0;
}
@ -411,15 +418,15 @@ export class RemoteClient {
try {
await CloudRunnerSystem.Run(`git reset --hard "${sha}"`);
await CloudRunnerSystem.Run(`git checkout ${sha}`);
} catch (_error) {
} catch {
RemoteClientLogger.logWarning(`Retained workspace: SHA not found, falling back to branch ${branch}`);
try {
await CloudRunnerSystem.Run(`git checkout ${branch}`);
} catch (_error2) {
} catch (error) {
if ((branch || '').startsWith('pull/')) {
await CloudRunnerSystem.Run(`git checkout origin/${branch}`);
} else {
throw _error2;
throw error;
}
}
}

View File

@ -12,8 +12,8 @@ import {
} from '@aws-sdk/client-s3';
import { AwsClientFactory } from '../../providers/aws/aws-client-factory';
import { promisify } from 'node:util';
import { exec as execCb } from 'node:child_process';
const exec = promisify(execCb);
import { exec as execCallback } from 'node:child_process';
const exec = promisify(execCallback);
export class SharedWorkspaceLocking {
private static _s3: S3;
private static get s3(): S3 {
@ -21,6 +21,7 @@ export class SharedWorkspaceLocking {
// Use factory so LocalStack endpoint/path-style settings are honored
SharedWorkspaceLocking._s3 = AwsClientFactory.getS3();
}
return SharedWorkspaceLocking._s3;
}
private static get useRclone() {
@ -28,6 +29,7 @@ export class SharedWorkspaceLocking {
}
private static async rclone(command: string): Promise<string> {
const { stdout } = await exec(`rclone ${command}`);
return stdout.toString();
}
private static get bucket() {
@ -54,17 +56,18 @@ export class SharedWorkspaceLocking {
} catch {
await SharedWorkspaceLocking.rclone(`mkdir ${bucket}`);
}
return;
}
try {
await SharedWorkspaceLocking.s3.send(new HeadBucketCommand({ Bucket: bucket }));
} catch {
const region = Input.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION || 'us-east-1';
const createParams: any = { Bucket: bucket };
const createParameters: any = { Bucket: bucket };
if (region && region !== 'us-east-1') {
createParams.CreateBucketConfiguration = { LocationConstraint: region };
createParameters.CreateBucketConfiguration = { LocationConstraint: region };
}
await SharedWorkspaceLocking.s3.send(new CreateBucketCommand(createParams));
await SharedWorkspaceLocking.s3.send(new CreateBucketCommand(createParameters));
}
}
private static async listObjects(prefix: string, bucket = SharedWorkspaceLocking.bucket): Promise<string[]> {
@ -77,6 +80,7 @@ export class SharedWorkspaceLocking {
try {
const output = await SharedWorkspaceLocking.rclone(`lsjson ${path}`);
const json = JSON.parse(output) as { Name: string; IsDir: boolean }[];
return json.map((e) => (e.IsDir ? `${e.Name}/` : e.Name));
} catch {
return [];
@ -92,6 +96,7 @@ export class SharedWorkspaceLocking {
for (const c of result.Contents || []) {
if (c.Key && c.Key !== prefix) entries.push(c.Key.slice(prefix.length));
}
return entries;
}
public static async GetAllWorkspaces(buildParametersContext: BuildParameters): Promise<string[]> {
@ -296,13 +301,11 @@ export class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }),
);
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }),
));
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
@ -328,26 +331,20 @@ export class SharedWorkspaceLocking {
buildParametersContext.cacheKey
}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }),
);
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }),
));
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
CloudRunner.lockedWorkspace = workspace;
} else {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }),
);
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key })));
}
return hasLock;
@ -365,18 +362,16 @@ export class SharedWorkspaceLocking {
CloudRunnerLogger.log(`Deleting lock ${workspace}/${file}`);
CloudRunnerLogger.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(
`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}),
);
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(
`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
)
: SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}),
));
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
@ -386,13 +381,11 @@ export class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }),
);
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`)
: SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }),
));
}
}
@ -401,6 +394,7 @@ export class SharedWorkspaceLocking {
const withoutScheme = path.replace('s3://', '');
const [bucket, ...rest] = withoutScheme.split('/');
const prefix = rest.join('/');
return SharedWorkspaceLocking.listObjects(prefix, bucket);
}
}

View File

@ -33,6 +33,7 @@ export class TaskParameterSerializer {
...TaskParameterSerializer.serializeInput(),
...TaskParameterSerializer.serializeCloudRunnerOptions(),
...CommandHookService.getSecrets(CommandHookService.getHooks(buildParameters.commandHooks)),
// Include AWS environment variables for LocalStack compatibility
...TaskParameterSerializer.serializeAwsEnvironmentVariables(),
]
@ -94,7 +95,7 @@ export class TaskParameterSerializer {
}
private static serializeAwsEnvironmentVariables() {
const awsEnvVars = [
const awsEnvironmentVariables = [
'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY',
'AWS_DEFAULT_REGION',
@ -107,7 +108,7 @@ export class TaskParameterSerializer {
'AWS_CLOUD_WATCH_LOGS_ENDPOINT',
];
return awsEnvVars
return awsEnvironmentVariables
.filter((key) => process.env[key] !== undefined)
.map((key) => ({
name: key,

View File

@ -334,6 +334,7 @@ export class ContainerHookService {
if (step.image === undefined) {
step.image = `ubuntu`;
}
// Ensure allowFailure defaults to false if not explicitly set
if (step.allowFailure === undefined) {
step.allowFailure = false;

View File

@ -66,9 +66,9 @@ describe('Cloud Runner pre-built rclone steps', () => {
cloudRunnerDebug: true,
} as unknown as OptionValues;
const buildParams = await CreateParameters(overrides);
const baseImage = new ImageTag(buildParams);
const results = await CloudRunner.run(buildParams, baseImage.toString());
const buildParameters = await CreateParameters(overrides);
const baseImage = new ImageTag(buildParameters);
const results = await CloudRunner.run(buildParameters, baseImage.toString());
CloudRunnerLogger.log(`rclone run succeeded`);
expect(results.BuildSucceeded).toBe(true);

View File

@ -96,6 +96,7 @@ describe('Cloud Runner Retain Workspace', () => {
CloudRunnerLogger.log(`Failed to cleanup workspace: ${error.message}`);
}
}
// Clean up cache files to prevent disk space issues
const cachePath = `./cloud-runner-cache`;
if (fs.existsSync(cachePath)) {

View File

@ -1,6 +1,9 @@
import { GitHubUrlInfo } from '../../providers/provider-url-parser';
import * as fs from 'fs';
// Import the mocked ProviderGitManager
import { ProviderGitManager } from '../../providers/provider-git-manager';
// Mock @actions/core to fix fs.promises compatibility issue
jest.mock('@actions/core', () => ({
info: jest.fn(),
@ -15,6 +18,7 @@ jest.mock('fs');
const mockExecAsync = jest.fn();
jest.mock('../../providers/provider-git-manager', () => {
const originalModule = jest.requireActual('../../providers/provider-git-manager');
return {
...originalModule,
ProviderGitManager: {
@ -27,9 +31,6 @@ jest.mock('../../providers/provider-git-manager', () => {
});
const mockFs = fs as jest.Mocked<typeof fs>;
// Import the mocked ProviderGitManager
import { ProviderGitManager } from '../../providers/provider-git-manager';
const mockProviderGitManager = ProviderGitManager as jest.Mocked<typeof ProviderGitManager>;
describe('ProviderGitManager', () => {

View File

@ -95,7 +95,7 @@ export class BuildAutomationWorkflow implements WorkflowInterface {
BRANCH="${CloudRunner.buildParameters.cloudRunnerBranch}"
REPO="${CloudRunnerFolders.unityBuilderRepoUrl}"
DEST="${CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.builderPathAbsolute)}"
if [ -n "$(git ls-remote --heads \"$REPO\" \"$BRANCH\" 2>/dev/null)" ]; then
if [ -n "$(git ls-remote --heads "$REPO" "$BRANCH" 2>/dev/null)" ]; then
git clone -q -b "$BRANCH" "$REPO" "$DEST"
else
echo "Remote branch $BRANCH not found in $REPO; falling back to a known branch"
@ -185,6 +185,7 @@ echo "CACHE_KEY=$CACHE_KEY"`;
cp -a "/data/cache/$CACHE_KEY/build/." "$GITHUB_WORKSPACE/cloud-runner-cache/cache/$CACHE_KEY/build/" || true
echo "end of cloud runner job"`;
}
// prettier-ignore
return `
mkdir -p ${`${CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.projectBuildFolderAbsolute)}/build`}

View File

@ -53,6 +53,7 @@ export class CustomWorkflow {
error?.message || error
}`,
);
// Continue to next step
} else {
CloudRunnerLogger.log(