github ephemeral pipeline support
parent
9ac98b1f51
commit
3987a5687b
|
|
@ -1247,13 +1247,13 @@ class AWSTaskRunner {
|
||||||
cloud_runner_logger_1.default.log('Cloud runner job is starting');
|
cloud_runner_logger_1.default.log('Cloud runner job is starting');
|
||||||
yield AWSTaskRunner.waitUntilTaskRunning(ECS, taskArn, cluster);
|
yield AWSTaskRunner.waitUntilTaskRunning(ECS, taskArn, cluster);
|
||||||
cloud_runner_logger_1.default.log(`Cloud runner job status is running ${(_p = (yield AWSTaskRunner.describeTasks(ECS, cluster, taskArn))) === null || _p === void 0 ? void 0 : _p.lastStatus}`);
|
cloud_runner_logger_1.default.log(`Cloud runner job status is running ${(_p = (yield AWSTaskRunner.describeTasks(ECS, cluster, taskArn))) === null || _p === void 0 ? void 0 : _p.lastStatus}`);
|
||||||
const output = yield this.streamLogsUntilTaskStops(ECS, CF, taskDef, cluster, taskArn, streamName);
|
const { output, shouldCleanup } = yield this.streamLogsUntilTaskStops(ECS, CF, taskDef, cluster, taskArn, streamName);
|
||||||
const taskData = yield AWSTaskRunner.describeTasks(ECS, cluster, taskArn);
|
const taskData = yield AWSTaskRunner.describeTasks(ECS, cluster, taskArn);
|
||||||
const exitCode = (_q = taskData.containers) === null || _q === void 0 ? void 0 : _q[0].exitCode;
|
const exitCode = (_q = taskData.containers) === null || _q === void 0 ? void 0 : _q[0].exitCode;
|
||||||
const wasSuccessful = exitCode === 0 || (exitCode === undefined && taskData.lastStatus === 'RUNNING');
|
const wasSuccessful = exitCode === 0 || (exitCode === undefined && taskData.lastStatus === 'RUNNING');
|
||||||
if (wasSuccessful) {
|
if (wasSuccessful) {
|
||||||
cloud_runner_logger_1.default.log(`Cloud runner job has finished successfully`);
|
cloud_runner_logger_1.default.log(`Cloud runner job has finished successfully`);
|
||||||
return output;
|
return { output, shouldCleanup };
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (taskData.stoppedReason === 'Essential container in task exited' && exitCode === 1) {
|
if (taskData.stoppedReason === 'Essential container in task exited' && exitCode === 1) {
|
||||||
|
|
@ -1305,18 +1305,19 @@ class AWSTaskRunner {
|
||||||
const logBaseUrl = `https://${__1.Input.region}.console.aws.amazon.com/cloudwatch/home?region=${CF.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
|
const logBaseUrl = `https://${__1.Input.region}.console.aws.amazon.com/cloudwatch/home?region=${CF.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
|
||||||
cloud_runner_logger_1.default.log(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
|
cloud_runner_logger_1.default.log(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
|
||||||
let shouldReadLogs = true;
|
let shouldReadLogs = true;
|
||||||
|
let shouldCleanup = true;
|
||||||
let timestamp = 0;
|
let timestamp = 0;
|
||||||
let output = '';
|
let output = '';
|
||||||
while (shouldReadLogs) {
|
while (shouldReadLogs) {
|
||||||
yield new Promise((resolve) => setTimeout(resolve, 1500));
|
yield new Promise((resolve) => setTimeout(resolve, 1500));
|
||||||
const taskData = yield AWSTaskRunner.describeTasks(ECS, clusterName, taskArn);
|
const taskData = yield AWSTaskRunner.describeTasks(ECS, clusterName, taskArn);
|
||||||
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
|
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
|
||||||
({ iterator, shouldReadLogs, output } = yield AWSTaskRunner.handleLogStreamIteration(kinesis, iterator, shouldReadLogs, taskDef, output));
|
({ iterator, shouldReadLogs, output, shouldCleanup } = yield AWSTaskRunner.handleLogStreamIteration(kinesis, iterator, shouldReadLogs, taskDef, output, shouldCleanup));
|
||||||
}
|
}
|
||||||
return output;
|
return { output, shouldCleanup };
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
static handleLogStreamIteration(kinesis, iterator, shouldReadLogs, taskDef, output) {
|
static handleLogStreamIteration(kinesis, iterator, shouldReadLogs, taskDef, output, shouldCleanup) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
const records = yield kinesis
|
const records = yield kinesis
|
||||||
.getRecords({
|
.getRecords({
|
||||||
|
|
@ -1324,8 +1325,8 @@ class AWSTaskRunner {
|
||||||
})
|
})
|
||||||
.promise();
|
.promise();
|
||||||
iterator = records.NextShardIterator || '';
|
iterator = records.NextShardIterator || '';
|
||||||
({ shouldReadLogs, output } = AWSTaskRunner.logRecords(records, iterator, taskDef, shouldReadLogs, output));
|
({ shouldReadLogs, output, shouldCleanup } = AWSTaskRunner.logRecords(records, iterator, taskDef, shouldReadLogs, output, shouldCleanup));
|
||||||
return { iterator, shouldReadLogs, output };
|
return { iterator, shouldReadLogs, output, shouldCleanup };
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
static checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs) {
|
static checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs) {
|
||||||
|
|
@ -1345,7 +1346,7 @@ class AWSTaskRunner {
|
||||||
}
|
}
|
||||||
return { timestamp, shouldReadLogs };
|
return { timestamp, shouldReadLogs };
|
||||||
}
|
}
|
||||||
static logRecords(records, iterator, taskDef, shouldReadLogs, output) {
|
static logRecords(records, iterator, taskDef, shouldReadLogs, output, shouldCleanup) {
|
||||||
if (records.Records.length > 0 && iterator) {
|
if (records.Records.length > 0 && iterator) {
|
||||||
for (let index = 0; index < records.Records.length; index++) {
|
for (let index = 0; index < records.Records.length; index++) {
|
||||||
const json = JSON.parse(zlib.gunzipSync(Buffer.from(records.Records[index].Data, 'base64')).toString('utf8'));
|
const json = JSON.parse(zlib.gunzipSync(Buffer.from(records.Records[index].Data, 'base64')).toString('utf8'));
|
||||||
|
|
@ -1358,6 +1359,7 @@ class AWSTaskRunner {
|
||||||
}
|
}
|
||||||
else if (message.includes('Rebuilding Library because the asset database could not be found!')) {
|
else if (message.includes('Rebuilding Library because the asset database could not be found!')) {
|
||||||
core.warning('LIBRARY NOT FOUND!');
|
core.warning('LIBRARY NOT FOUND!');
|
||||||
|
core.setOutput('library-found', 'false');
|
||||||
}
|
}
|
||||||
else if (message.includes('Build succeeded')) {
|
else if (message.includes('Build succeeded')) {
|
||||||
core.setOutput('build-result', 'success');
|
core.setOutput('build-result', 'success');
|
||||||
|
|
@ -1366,6 +1368,11 @@ class AWSTaskRunner {
|
||||||
core.setOutput('build-result', 'failed');
|
core.setOutput('build-result', 'failed');
|
||||||
core.error('BUILD FAILED!');
|
core.error('BUILD FAILED!');
|
||||||
}
|
}
|
||||||
|
else if (message.includes('cloud runner stop watching job')) {
|
||||||
|
core.setOutput('cloud runner stop watching', 'true');
|
||||||
|
shouldReadLogs = false;
|
||||||
|
shouldCleanup = false;
|
||||||
|
}
|
||||||
message = `[${cloud_runner_statics_1.CloudRunnerStatics.logPrefix}] ${message}`;
|
message = `[${cloud_runner_statics_1.CloudRunnerStatics.logPrefix}] ${message}`;
|
||||||
if (cloud_runner_1.default.buildParameters.cloudRunnerIntegrationTests) {
|
if (cloud_runner_1.default.buildParameters.cloudRunnerIntegrationTests) {
|
||||||
output += message;
|
output += message;
|
||||||
|
|
@ -1375,7 +1382,7 @@ class AWSTaskRunner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return { shouldReadLogs, output };
|
return { shouldReadLogs, output, shouldCleanup };
|
||||||
}
|
}
|
||||||
static getLogStream(kinesis, kinesisStreamName) {
|
static getLogStream(kinesis, kinesisStreamName) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
|
|
@ -1546,21 +1553,24 @@ class AWSBuildEnvironment {
|
||||||
yield new aws_base_stack_1.AWSBaseStack(this.baseStackName).setupBaseStack(CF);
|
yield new aws_base_stack_1.AWSBaseStack(this.baseStackName).setupBaseStack(CF);
|
||||||
const taskDef = yield new aws_job_stack_1.AWSJobStack(this.baseStackName).setupCloudFormations(CF, buildGuid, image, entrypoint, commands, mountdir, workingdir, secrets);
|
const taskDef = yield new aws_job_stack_1.AWSJobStack(this.baseStackName).setupCloudFormations(CF, buildGuid, image, entrypoint, commands, mountdir, workingdir, secrets);
|
||||||
let postRunTaskTimeMs;
|
let postRunTaskTimeMs;
|
||||||
let output = '';
|
|
||||||
try {
|
try {
|
||||||
const postSetupStacksTimeMs = Date.now();
|
const postSetupStacksTimeMs = Date.now();
|
||||||
cloud_runner_logger_1.default.log(`Setup job time: ${Math.floor((postSetupStacksTimeMs - startTimeMs) / 1000)}s`);
|
cloud_runner_logger_1.default.log(`Setup job time: ${Math.floor((postSetupStacksTimeMs - startTimeMs) / 1000)}s`);
|
||||||
output = yield aws_task_runner_1.default.runTask(taskDef, ECS, CF, environment, buildGuid, commands);
|
const { output, shouldCleanup } = yield aws_task_runner_1.default.runTask(taskDef, ECS, CF, environment, buildGuid, commands);
|
||||||
postRunTaskTimeMs = Date.now();
|
postRunTaskTimeMs = Date.now();
|
||||||
cloud_runner_logger_1.default.log(`Run job time: ${Math.floor((postRunTaskTimeMs - postSetupStacksTimeMs) / 1000)}s`);
|
cloud_runner_logger_1.default.log(`Run job time: ${Math.floor((postRunTaskTimeMs - postSetupStacksTimeMs) / 1000)}s`);
|
||||||
}
|
if (shouldCleanup) {
|
||||||
finally {
|
yield this.cleanupResources(CF, taskDef);
|
||||||
yield this.cleanupResources(CF, taskDef);
|
}
|
||||||
const postCleanupTimeMs = Date.now();
|
const postCleanupTimeMs = Date.now();
|
||||||
if (postRunTaskTimeMs !== undefined)
|
if (postRunTaskTimeMs !== undefined)
|
||||||
cloud_runner_logger_1.default.log(`Cleanup job time: ${Math.floor((postCleanupTimeMs - postRunTaskTimeMs) / 1000)}s`);
|
cloud_runner_logger_1.default.log(`Cleanup job time: ${Math.floor((postCleanupTimeMs - postRunTaskTimeMs) / 1000)}s`);
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
yield this.cleanupResources(CF, taskDef);
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
return output;
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
cleanupResources(CF, taskDef) {
|
cleanupResources(CF, taskDef) {
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -58,13 +58,20 @@ class AWSTaskRunner {
|
||||||
CloudRunnerLogger.log(
|
CloudRunnerLogger.log(
|
||||||
`Cloud runner job status is running ${(await AWSTaskRunner.describeTasks(ECS, cluster, taskArn))?.lastStatus}`,
|
`Cloud runner job status is running ${(await AWSTaskRunner.describeTasks(ECS, cluster, taskArn))?.lastStatus}`,
|
||||||
);
|
);
|
||||||
const output = await this.streamLogsUntilTaskStops(ECS, CF, taskDef, cluster, taskArn, streamName);
|
const { output, shouldCleanup } = await this.streamLogsUntilTaskStops(
|
||||||
|
ECS,
|
||||||
|
CF,
|
||||||
|
taskDef,
|
||||||
|
cluster,
|
||||||
|
taskArn,
|
||||||
|
streamName,
|
||||||
|
);
|
||||||
const taskData = await AWSTaskRunner.describeTasks(ECS, cluster, taskArn);
|
const taskData = await AWSTaskRunner.describeTasks(ECS, cluster, taskArn);
|
||||||
const exitCode = taskData.containers?.[0].exitCode;
|
const exitCode = taskData.containers?.[0].exitCode;
|
||||||
const wasSuccessful = exitCode === 0 || (exitCode === undefined && taskData.lastStatus === 'RUNNING');
|
const wasSuccessful = exitCode === 0 || (exitCode === undefined && taskData.lastStatus === 'RUNNING');
|
||||||
if (wasSuccessful) {
|
if (wasSuccessful) {
|
||||||
CloudRunnerLogger.log(`Cloud runner job has finished successfully`);
|
CloudRunnerLogger.log(`Cloud runner job has finished successfully`);
|
||||||
return output;
|
return { output, shouldCleanup };
|
||||||
} else {
|
} else {
|
||||||
if (taskData.stoppedReason === 'Essential container in task exited' && exitCode === 1) {
|
if (taskData.stoppedReason === 'Essential container in task exited' && exitCode === 1) {
|
||||||
throw new Error('Container exited with code 1');
|
throw new Error('Container exited with code 1');
|
||||||
|
|
@ -121,21 +128,23 @@ class AWSTaskRunner {
|
||||||
const logBaseUrl = `https://${Input.region}.console.aws.amazon.com/cloudwatch/home?region=${CF.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
|
const logBaseUrl = `https://${Input.region}.console.aws.amazon.com/cloudwatch/home?region=${CF.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
|
||||||
CloudRunnerLogger.log(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
|
CloudRunnerLogger.log(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
|
||||||
let shouldReadLogs = true;
|
let shouldReadLogs = true;
|
||||||
|
let shouldCleanup = true;
|
||||||
let timestamp: number = 0;
|
let timestamp: number = 0;
|
||||||
let output = '';
|
let output = '';
|
||||||
while (shouldReadLogs) {
|
while (shouldReadLogs) {
|
||||||
await new Promise((resolve) => setTimeout(resolve, 1500));
|
await new Promise((resolve) => setTimeout(resolve, 1500));
|
||||||
const taskData = await AWSTaskRunner.describeTasks(ECS, clusterName, taskArn);
|
const taskData = await AWSTaskRunner.describeTasks(ECS, clusterName, taskArn);
|
||||||
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
|
({ timestamp, shouldReadLogs } = AWSTaskRunner.checkStreamingShouldContinue(taskData, timestamp, shouldReadLogs));
|
||||||
({ iterator, shouldReadLogs, output } = await AWSTaskRunner.handleLogStreamIteration(
|
({ iterator, shouldReadLogs, output, shouldCleanup } = await AWSTaskRunner.handleLogStreamIteration(
|
||||||
kinesis,
|
kinesis,
|
||||||
iterator,
|
iterator,
|
||||||
shouldReadLogs,
|
shouldReadLogs,
|
||||||
taskDef,
|
taskDef,
|
||||||
output,
|
output,
|
||||||
|
shouldCleanup,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
return output;
|
return { output, shouldCleanup };
|
||||||
}
|
}
|
||||||
|
|
||||||
private static async handleLogStreamIteration(
|
private static async handleLogStreamIteration(
|
||||||
|
|
@ -144,6 +153,7 @@ class AWSTaskRunner {
|
||||||
shouldReadLogs: boolean,
|
shouldReadLogs: boolean,
|
||||||
taskDef: CloudRunnerAWSTaskDef,
|
taskDef: CloudRunnerAWSTaskDef,
|
||||||
output: string,
|
output: string,
|
||||||
|
shouldCleanup: boolean,
|
||||||
) {
|
) {
|
||||||
const records = await kinesis
|
const records = await kinesis
|
||||||
.getRecords({
|
.getRecords({
|
||||||
|
|
@ -151,8 +161,15 @@ class AWSTaskRunner {
|
||||||
})
|
})
|
||||||
.promise();
|
.promise();
|
||||||
iterator = records.NextShardIterator || '';
|
iterator = records.NextShardIterator || '';
|
||||||
({ shouldReadLogs, output } = AWSTaskRunner.logRecords(records, iterator, taskDef, shouldReadLogs, output));
|
({ shouldReadLogs, output, shouldCleanup } = AWSTaskRunner.logRecords(
|
||||||
return { iterator, shouldReadLogs, output };
|
records,
|
||||||
|
iterator,
|
||||||
|
taskDef,
|
||||||
|
shouldReadLogs,
|
||||||
|
output,
|
||||||
|
shouldCleanup,
|
||||||
|
));
|
||||||
|
return { iterator, shouldReadLogs, output, shouldCleanup };
|
||||||
}
|
}
|
||||||
|
|
||||||
private static checkStreamingShouldContinue(taskData: AWS.ECS.Task, timestamp: number, shouldReadLogs: boolean) {
|
private static checkStreamingShouldContinue(taskData: AWS.ECS.Task, timestamp: number, shouldReadLogs: boolean) {
|
||||||
|
|
@ -179,6 +196,7 @@ class AWSTaskRunner {
|
||||||
taskDef: CloudRunnerAWSTaskDef,
|
taskDef: CloudRunnerAWSTaskDef,
|
||||||
shouldReadLogs: boolean,
|
shouldReadLogs: boolean,
|
||||||
output: string,
|
output: string,
|
||||||
|
shouldCleanup: boolean,
|
||||||
) {
|
) {
|
||||||
if (records.Records.length > 0 && iterator) {
|
if (records.Records.length > 0 && iterator) {
|
||||||
for (let index = 0; index < records.Records.length; index++) {
|
for (let index = 0; index < records.Records.length; index++) {
|
||||||
|
|
@ -193,11 +211,16 @@ class AWSTaskRunner {
|
||||||
shouldReadLogs = false;
|
shouldReadLogs = false;
|
||||||
} else if (message.includes('Rebuilding Library because the asset database could not be found!')) {
|
} else if (message.includes('Rebuilding Library because the asset database could not be found!')) {
|
||||||
core.warning('LIBRARY NOT FOUND!');
|
core.warning('LIBRARY NOT FOUND!');
|
||||||
|
core.setOutput('library-found', 'false');
|
||||||
} else if (message.includes('Build succeeded')) {
|
} else if (message.includes('Build succeeded')) {
|
||||||
core.setOutput('build-result', 'success');
|
core.setOutput('build-result', 'success');
|
||||||
} else if (message.includes('Build fail')) {
|
} else if (message.includes('Build fail')) {
|
||||||
core.setOutput('build-result', 'failed');
|
core.setOutput('build-result', 'failed');
|
||||||
core.error('BUILD FAILED!');
|
core.error('BUILD FAILED!');
|
||||||
|
} else if (message.includes('cloud runner stop watching job')) {
|
||||||
|
core.setOutput('cloud runner stop watching', 'true');
|
||||||
|
shouldReadLogs = false;
|
||||||
|
shouldCleanup = false;
|
||||||
}
|
}
|
||||||
message = `[${CloudRunnerStatics.logPrefix}] ${message}`;
|
message = `[${CloudRunnerStatics.logPrefix}] ${message}`;
|
||||||
if (CloudRunner.buildParameters.cloudRunnerIntegrationTests) {
|
if (CloudRunner.buildParameters.cloudRunnerIntegrationTests) {
|
||||||
|
|
@ -208,7 +231,7 @@ class AWSTaskRunner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return { shouldReadLogs, output };
|
return { shouldReadLogs, output, shouldCleanup };
|
||||||
}
|
}
|
||||||
|
|
||||||
private static async getLogStream(kinesis: AWS.Kinesis, kinesisStreamName: string) {
|
private static async getLogStream(kinesis: AWS.Kinesis, kinesisStreamName: string) {
|
||||||
|
|
|
||||||
|
|
@ -66,20 +66,23 @@ class AWSBuildEnvironment implements ProviderInterface {
|
||||||
);
|
);
|
||||||
|
|
||||||
let postRunTaskTimeMs;
|
let postRunTaskTimeMs;
|
||||||
let output = '';
|
|
||||||
try {
|
try {
|
||||||
const postSetupStacksTimeMs = Date.now();
|
const postSetupStacksTimeMs = Date.now();
|
||||||
CloudRunnerLogger.log(`Setup job time: ${Math.floor((postSetupStacksTimeMs - startTimeMs) / 1000)}s`);
|
CloudRunnerLogger.log(`Setup job time: ${Math.floor((postSetupStacksTimeMs - startTimeMs) / 1000)}s`);
|
||||||
output = await AWSTaskRunner.runTask(taskDef, ECS, CF, environment, buildGuid, commands);
|
const { output, shouldCleanup } = await AWSTaskRunner.runTask(taskDef, ECS, CF, environment, buildGuid, commands);
|
||||||
postRunTaskTimeMs = Date.now();
|
postRunTaskTimeMs = Date.now();
|
||||||
CloudRunnerLogger.log(`Run job time: ${Math.floor((postRunTaskTimeMs - postSetupStacksTimeMs) / 1000)}s`);
|
CloudRunnerLogger.log(`Run job time: ${Math.floor((postRunTaskTimeMs - postSetupStacksTimeMs) / 1000)}s`);
|
||||||
} finally {
|
if (shouldCleanup) {
|
||||||
await this.cleanupResources(CF, taskDef);
|
await this.cleanupResources(CF, taskDef);
|
||||||
|
}
|
||||||
const postCleanupTimeMs = Date.now();
|
const postCleanupTimeMs = Date.now();
|
||||||
if (postRunTaskTimeMs !== undefined)
|
if (postRunTaskTimeMs !== undefined)
|
||||||
CloudRunnerLogger.log(`Cleanup job time: ${Math.floor((postCleanupTimeMs - postRunTaskTimeMs) / 1000)}s`);
|
CloudRunnerLogger.log(`Cleanup job time: ${Math.floor((postCleanupTimeMs - postRunTaskTimeMs) / 1000)}s`);
|
||||||
|
return output;
|
||||||
|
} catch (error) {
|
||||||
|
await this.cleanupResources(CF, taskDef);
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
return output;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async cleanupResources(CF: SDK.CloudFormation, taskDef: CloudRunnerAWSTaskDef) {
|
async cleanupResources(CF: SDK.CloudFormation, taskDef: CloudRunnerAWSTaskDef) {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue