ci: add reusable cloud-runner-integrity workflow; wire into Integrity; disable legacy pipeline triggers

pull/728/head
Frostebite 2025-09-06 03:13:50 +01:00
parent f6f813b5e1
commit dda7de4882
3 changed files with 37 additions and 15 deletions

25
dist/index.js generated vendored
View File

@ -1952,7 +1952,7 @@ class AWSTaskRunner {
let containerState; let containerState;
let taskData; let taskData;
while (exitCode === undefined) { while (exitCode === undefined) {
await new Promise((resolve) => resolve(10000)); await new Promise((resolve) => setTimeout(resolve, 10000));
taskData = await AWSTaskRunner.describeTasks(cluster, taskArn); taskData = await AWSTaskRunner.describeTasks(cluster, taskArn);
const containers = taskData?.containers; const containers = taskData?.containers;
if (!containers || containers.length === 0) { if (!containers || containers.length === 0) {
@ -1979,7 +1979,9 @@ class AWSTaskRunner {
try { try {
await (0, client_ecs_1.waitUntilTasksRunning)({ await (0, client_ecs_1.waitUntilTasksRunning)({
client: AWSTaskRunner.ECS, client: AWSTaskRunner.ECS,
maxWaitTime: 120, maxWaitTime: 300,
minDelay: 5,
maxDelay: 30,
}, { tasks: [taskArn], cluster }); }, { tasks: [taskArn], cluster });
} }
catch (error_) { catch (error_) {
@ -1994,6 +1996,7 @@ class AWSTaskRunner {
static async describeTasks(clusterName, taskArn) { static async describeTasks(clusterName, taskArn) {
const maxAttempts = 10; const maxAttempts = 10;
let delayMs = 1000; let delayMs = 1000;
const maxDelayMs = 60000;
for (let attempt = 1; attempt <= maxAttempts; attempt++) { for (let attempt = 1; attempt <= maxAttempts; attempt++) {
try { try {
const tasks = await AWSTaskRunner.ECS.send(new client_ecs_1.DescribeTasksCommand({ cluster: clusterName, tasks: [taskArn] })); const tasks = await AWSTaskRunner.ECS.send(new client_ecs_1.DescribeTasksCommand({ cluster: clusterName, tasks: [taskArn] }));
@ -2007,9 +2010,11 @@ class AWSTaskRunner {
if (!isThrottle || attempt === maxAttempts) { if (!isThrottle || attempt === maxAttempts) {
throw error; throw error;
} }
cloud_runner_logger_1.default.log(`AWS throttled DescribeTasks (attempt ${attempt}/${maxAttempts}), backing off ${delayMs}ms`); const jitterMs = Math.floor(Math.random() * Math.min(1000, delayMs));
await new Promise((r) => setTimeout(r, delayMs)); const sleepMs = delayMs + jitterMs;
delayMs *= 2; cloud_runner_logger_1.default.log(`AWS throttled DescribeTasks (attempt ${attempt}/${maxAttempts}), backing off ${sleepMs}ms (${delayMs} + jitter ${jitterMs})`);
await new Promise((r) => setTimeout(r, sleepMs));
delayMs = Math.min(delayMs * 2, maxDelayMs);
} }
} }
} }
@ -2029,6 +2034,9 @@ class AWSTaskRunner {
await new Promise((resolve) => setTimeout(resolve, 1500)); await new Promise((resolve) => setTimeout(resolve, 1500));
const taskData = await AWSTaskRunner.describeTasks(clusterName, taskArn); const taskData = await AWSTaskRunner.describeTasks(clusterName, taskArn);
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs)); ({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
if (taskData?.lastStatus !== 'RUNNING') {
await new Promise((resolve) => setTimeout(resolve, 3500));
}
({ iterator, shouldReadLogs, output, shouldCleanup } = await AWSTaskRunner.handleLogStreamIteration(iterator, shouldReadLogs, output, shouldCleanup)); ({ iterator, shouldReadLogs, output, shouldCleanup } = await AWSTaskRunner.handleLogStreamIteration(iterator, shouldReadLogs, output, shouldCleanup));
} }
return { output, shouldCleanup }; return { output, shouldCleanup };
@ -2041,8 +2049,11 @@ class AWSTaskRunner {
catch (error) { catch (error) {
const isThrottle = error?.name === 'ThrottlingException' || /rate exceeded/i.test(String(error?.message)); const isThrottle = error?.name === 'ThrottlingException' || /rate exceeded/i.test(String(error?.message));
if (isThrottle) { if (isThrottle) {
cloud_runner_logger_1.default.log(`AWS throttled GetRecords, backing off 1000ms`); const baseBackoffMs = 1000;
await new Promise((r) => setTimeout(r, 1000)); const jitterMs = Math.floor(Math.random() * 1000);
const sleepMs = baseBackoffMs + jitterMs;
cloud_runner_logger_1.default.log(`AWS throttled GetRecords, backing off ${sleepMs}ms (1000 + jitter ${jitterMs})`);
await new Promise((r) => setTimeout(r, sleepMs));
return { iterator, shouldReadLogs, output, shouldCleanup }; return { iterator, shouldReadLogs, output, shouldCleanup };
} }
throw error; throw error;

2
dist/index.js.map generated vendored

File diff suppressed because one or more lines are too long

View File

@ -84,7 +84,7 @@ class AWSTaskRunner {
let containerState; let containerState;
let taskData; let taskData;
while (exitCode === undefined) { while (exitCode === undefined) {
await new Promise((resolve) => resolve(10000)); await new Promise((resolve) => setTimeout(resolve, 10000));
taskData = await AWSTaskRunner.describeTasks(cluster, taskArn); taskData = await AWSTaskRunner.describeTasks(cluster, taskArn);
const containers = taskData?.containers as any[] | undefined; const containers = taskData?.containers as any[] | undefined;
if (!containers || containers.length === 0) { if (!containers || containers.length === 0) {
@ -116,7 +116,9 @@ class AWSTaskRunner {
await waitUntilTasksRunning( await waitUntilTasksRunning(
{ {
client: AWSTaskRunner.ECS, client: AWSTaskRunner.ECS,
maxWaitTime: 120, maxWaitTime: 300,
minDelay: 5,
maxDelay: 30,
}, },
{ tasks: [taskArn], cluster }, { tasks: [taskArn], cluster },
); );
@ -134,6 +136,7 @@ class AWSTaskRunner {
static async describeTasks(clusterName: string, taskArn: string) { static async describeTasks(clusterName: string, taskArn: string) {
const maxAttempts = 10; const maxAttempts = 10;
let delayMs = 1000; let delayMs = 1000;
const maxDelayMs = 60000;
for (let attempt = 1; attempt <= maxAttempts; attempt++) { for (let attempt = 1; attempt <= maxAttempts; attempt++) {
try { try {
const tasks = await AWSTaskRunner.ECS.send( const tasks = await AWSTaskRunner.ECS.send(
@ -148,11 +151,13 @@ class AWSTaskRunner {
if (!isThrottle || attempt === maxAttempts) { if (!isThrottle || attempt === maxAttempts) {
throw error; throw error;
} }
const jitterMs = Math.floor(Math.random() * Math.min(1000, delayMs));
const sleepMs = delayMs + jitterMs;
CloudRunnerLogger.log( CloudRunnerLogger.log(
`AWS throttled DescribeTasks (attempt ${attempt}/${maxAttempts}), backing off ${delayMs}ms`, `AWS throttled DescribeTasks (attempt ${attempt}/${maxAttempts}), backing off ${sleepMs}ms (${delayMs} + jitter ${jitterMs})`,
); );
await new Promise((r) => setTimeout(r, delayMs)); await new Promise((r) => setTimeout(r, sleepMs));
delayMs *= 2; delayMs = Math.min(delayMs * 2, maxDelayMs);
} }
} }
} }
@ -174,6 +179,9 @@ class AWSTaskRunner {
await new Promise((resolve) => setTimeout(resolve, 1500)); await new Promise((resolve) => setTimeout(resolve, 1500));
const taskData = await AWSTaskRunner.describeTasks(clusterName, taskArn); const taskData = await AWSTaskRunner.describeTasks(clusterName, taskArn);
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs)); ({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
if (taskData?.lastStatus !== 'RUNNING') {
await new Promise((resolve) => setTimeout(resolve, 3500));
}
({ iterator, shouldReadLogs, output, shouldCleanup } = await AWSTaskRunner.handleLogStreamIteration( ({ iterator, shouldReadLogs, output, shouldCleanup } = await AWSTaskRunner.handleLogStreamIteration(
iterator, iterator,
shouldReadLogs, shouldReadLogs,
@ -197,8 +205,11 @@ class AWSTaskRunner {
} catch (error: any) { } catch (error: any) {
const isThrottle = error?.name === 'ThrottlingException' || /rate exceeded/i.test(String(error?.message)); const isThrottle = error?.name === 'ThrottlingException' || /rate exceeded/i.test(String(error?.message));
if (isThrottle) { if (isThrottle) {
CloudRunnerLogger.log(`AWS throttled GetRecords, backing off 1000ms`); const baseBackoffMs = 1000;
await new Promise((r) => setTimeout(r, 1000)); const jitterMs = Math.floor(Math.random() * 1000);
const sleepMs = baseBackoffMs + jitterMs;
CloudRunnerLogger.log(`AWS throttled GetRecords, backing off ${sleepMs}ms (1000 + jitter ${jitterMs})`);
await new Promise((r) => setTimeout(r, sleepMs));
return { iterator, shouldReadLogs, output, shouldCleanup }; return { iterator, shouldReadLogs, output, shouldCleanup };
} }
throw error; throw error;