pr feedback

pull/767/head
Frostebite 2025-12-09 20:22:53 +00:00
parent 37495c11b9
commit ebbb1d4150
5 changed files with 209 additions and 142 deletions

164
dist/index.js vendored
View File

@ -5565,73 +5565,80 @@ class RemoteClient {
});
}
static async remoteClientPostBuild() {
remote_client_logger_1.RemoteClientLogger.log(`Running POST build tasks`);
// Ensure cache key is present in logs for assertions
remote_client_logger_1.RemoteClientLogger.log(`CACHE_KEY=${cloud_runner_1.default.buildParameters.cacheKey}`);
cloud_runner_logger_1.default.log(`${cloud_runner_1.default.buildParameters.cacheKey}`);
// Guard: only push Library cache if the folder exists and has contents
try {
const libraryFolderHost = cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute;
if (node_fs_1.default.existsSync(libraryFolderHost)) {
let libraryEntries = [];
try {
libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost);
}
catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute), `lib-${cloud_runner_1.default.buildParameters.buildGuid}`);
remote_client_logger_1.RemoteClientLogger.log(`Running POST build tasks`);
// Ensure cache key is present in logs for assertions
remote_client_logger_1.RemoteClientLogger.log(`CACHE_KEY=${cloud_runner_1.default.buildParameters.cacheKey}`);
cloud_runner_logger_1.default.log(`${cloud_runner_1.default.buildParameters.cacheKey}`);
// Guard: only push Library cache if the folder exists and has contents
try {
const libraryFolderHost = cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute;
if (node_fs_1.default.existsSync(libraryFolderHost)) {
let libraryEntries = [];
try {
libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost);
}
catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute), `lib-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Library cache push (folder is empty)`);
}
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Library cache push (folder is empty)`);
remote_client_logger_1.RemoteClientLogger.log(`Skipping Library cache push (folder missing)`);
}
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Library cache push (folder missing)`);
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Library cache push skipped with error: ${error.message}`);
}
}
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Library cache push skipped with error: ${error.message}`);
}
// Guard: only push Build cache if the folder exists and has contents
try {
const buildFolderHost = cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute;
if (node_fs_1.default.existsSync(buildFolderHost)) {
let buildEntries = [];
try {
buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost);
}
catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute), `build-${cloud_runner_1.default.buildParameters.buildGuid}`);
// Guard: only push Build cache if the folder exists and has contents
try {
const buildFolderHost = cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute;
if (node_fs_1.default.existsSync(buildFolderHost)) {
let buildEntries = [];
try {
buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost);
}
catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute), `build-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Build cache push (folder is empty)`);
}
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Build cache push (folder is empty)`);
remote_client_logger_1.RemoteClientLogger.log(`Skipping Build cache push (folder missing)`);
}
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping Build cache push (folder missing)`);
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Build cache push skipped with error: ${error.message}`);
}
if (!build_parameters_1.default.shouldUseRetainedWorkspaceMode(cloud_runner_1.default.buildParameters)) {
const uniqueJobFolderLinux = cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute);
if (node_fs_1.default.existsSync(cloud_runner_folders_1.CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute) || node_fs_1.default.existsSync(uniqueJobFolderLinux)) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`rm -r ${uniqueJobFolderLinux} || true`);
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping cleanup; unique job folder missing`);
}
}
await RemoteClient.runCustomHookFiles(`after-build`);
// WIP - need to give the pod permissions to create config map
await remote_client_logger_1.RemoteClientLogger.handleLogManagementPostJob();
}
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Build cache push skipped with error: ${error.message}`);
// Log error but don't fail - post-build tasks are best-effort
remote_client_logger_1.RemoteClientLogger.logWarning(`Post-build task error: ${error.message}`);
cloud_runner_logger_1.default.log(`Post-build task error: ${error.message}`);
}
if (!build_parameters_1.default.shouldUseRetainedWorkspaceMode(cloud_runner_1.default.buildParameters)) {
const uniqueJobFolderLinux = cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute);
if (node_fs_1.default.existsSync(cloud_runner_folders_1.CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute) || node_fs_1.default.existsSync(uniqueJobFolderLinux)) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`rm -r ${uniqueJobFolderLinux} || true`);
}
else {
remote_client_logger_1.RemoteClientLogger.log(`Skipping cleanup; unique job folder missing`);
}
}
await RemoteClient.runCustomHookFiles(`after-build`);
// WIP - need to give the pod permissions to create config map
await remote_client_logger_1.RemoteClientLogger.handleLogManagementPostJob();
// Ensure success marker is present in logs for tests
// Ensure success marker is always present in logs for tests, even if post-build tasks failed
// For K8s, kubectl logs reads from stdout/stderr, so we must write to stdout
// For all providers, we write to stdout so it gets piped through the log stream
// The log stream will capture it and add it to BuildResults
@ -6903,9 +6910,15 @@ class ContainerHookService {
hook: after
commands: |
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 cp /data/cache/$CACHE_KEY/build/build-${cloud_runner_1.default.buildParameters.buildGuid}.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} s3://${cloud_runner_1.default.buildParameters.awsStackName}/cloud-runner-cache/$CACHE_KEY/build/build-$BUILD_GUID.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} || true
@ -6927,9 +6940,15 @@ class ContainerHookService {
commands: |
mkdir -p /data/cache/$CACHE_KEY/build/
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 ls ${cloud_runner_1.default.buildParameters.awsStackName}/cloud-runner-cache/ || true
@ -6985,9 +7004,15 @@ class ContainerHookService {
hook: after
commands: |
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 cp --recursive /data/cache/$CACHE_KEY/lfs s3://${cloud_runner_1.default.buildParameters.awsStackName}/cloud-runner-cache/$CACHE_KEY/lfs || true
@ -7013,9 +7038,15 @@ class ContainerHookService {
mkdir -p /data/cache/$CACHE_KEY/Library/
mkdir -p /data/cache/$CACHE_KEY/lfs/
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 ls ${cloud_runner_1.default.buildParameters.awsStackName}/cloud-runner-cache/ || true
@ -7475,7 +7506,10 @@ echo "CACHE_KEY=$CACHE_KEY"`;
tar -cf "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" --files-from /dev/null || touch "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar"
fi
# Run post-build tasks and pipe output through log stream to capture "Activation successful"
# Use set +e to allow the command to fail without exiting the script, then restore set -e behavior
set +e
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "Post-build command completed with warnings"
set -e
# Write end marker and pipe through log stream
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
echo "---${cloud_runner_1.default.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -80,81 +80,87 @@ export class RemoteClient {
@CliFunction(`remote-cli-post-build`, `runs a cloud runner build`)
public static async remoteClientPostBuild(): Promise<string> {
RemoteClientLogger.log(`Running POST build tasks`);
// Ensure cache key is present in logs for assertions
RemoteClientLogger.log(`CACHE_KEY=${CloudRunner.buildParameters.cacheKey}`);
CloudRunnerLogger.log(`${CloudRunner.buildParameters.cacheKey}`);
// Guard: only push Library cache if the folder exists and has contents
try {
const libraryFolderHost = CloudRunnerFolders.libraryFolderAbsolute;
if (fs.existsSync(libraryFolderHost)) {
let libraryEntries: string[] = [];
try {
libraryEntries = await fs.promises.readdir(libraryFolderHost);
} catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`),
CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.libraryFolderAbsolute),
`lib-${CloudRunner.buildParameters.buildGuid}`,
);
RemoteClientLogger.log(`Running POST build tasks`);
// Ensure cache key is present in logs for assertions
RemoteClientLogger.log(`CACHE_KEY=${CloudRunner.buildParameters.cacheKey}`);
CloudRunnerLogger.log(`${CloudRunner.buildParameters.cacheKey}`);
// Guard: only push Library cache if the folder exists and has contents
try {
const libraryFolderHost = CloudRunnerFolders.libraryFolderAbsolute;
if (fs.existsSync(libraryFolderHost)) {
let libraryEntries: string[] = [];
try {
libraryEntries = await fs.promises.readdir(libraryFolderHost);
} catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`),
CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.libraryFolderAbsolute),
`lib-${CloudRunner.buildParameters.buildGuid}`,
);
} else {
RemoteClientLogger.log(`Skipping Library cache push (folder is empty)`);
}
} else {
RemoteClientLogger.log(`Skipping Library cache push (folder is empty)`);
RemoteClientLogger.log(`Skipping Library cache push (folder missing)`);
}
} else {
RemoteClientLogger.log(`Skipping Library cache push (folder missing)`);
} catch (error: any) {
RemoteClientLogger.logWarning(`Library cache push skipped with error: ${error.message}`);
}
} catch (error: any) {
RemoteClientLogger.logWarning(`Library cache push skipped with error: ${error.message}`);
}
// Guard: only push Build cache if the folder exists and has contents
try {
const buildFolderHost = CloudRunnerFolders.projectBuildFolderAbsolute;
if (fs.existsSync(buildFolderHost)) {
let buildEntries: string[] = [];
try {
buildEntries = await fs.promises.readdir(buildFolderHost);
} catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`),
CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.projectBuildFolderAbsolute),
`build-${CloudRunner.buildParameters.buildGuid}`,
);
// Guard: only push Build cache if the folder exists and has contents
try {
const buildFolderHost = CloudRunnerFolders.projectBuildFolderAbsolute;
if (fs.existsSync(buildFolderHost)) {
let buildEntries: string[] = [];
try {
buildEntries = await fs.promises.readdir(buildFolderHost);
} catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await Caching.PushToCache(
CloudRunnerFolders.ToLinuxFolder(`${CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`),
CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.projectBuildFolderAbsolute),
`build-${CloudRunner.buildParameters.buildGuid}`,
);
} else {
RemoteClientLogger.log(`Skipping Build cache push (folder is empty)`);
}
} else {
RemoteClientLogger.log(`Skipping Build cache push (folder is empty)`);
RemoteClientLogger.log(`Skipping Build cache push (folder missing)`);
}
} else {
RemoteClientLogger.log(`Skipping Build cache push (folder missing)`);
} catch (error: any) {
RemoteClientLogger.logWarning(`Build cache push skipped with error: ${error.message}`);
}
if (!BuildParameters.shouldUseRetainedWorkspaceMode(CloudRunner.buildParameters)) {
const uniqueJobFolderLinux = CloudRunnerFolders.ToLinuxFolder(
CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute,
);
if (fs.existsSync(CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute) || fs.existsSync(uniqueJobFolderLinux)) {
await CloudRunnerSystem.Run(`rm -r ${uniqueJobFolderLinux} || true`);
} else {
RemoteClientLogger.log(`Skipping cleanup; unique job folder missing`);
}
}
await RemoteClient.runCustomHookFiles(`after-build`);
// WIP - need to give the pod permissions to create config map
await RemoteClientLogger.handleLogManagementPostJob();
} catch (error: any) {
RemoteClientLogger.logWarning(`Build cache push skipped with error: ${error.message}`);
// Log error but don't fail - post-build tasks are best-effort
RemoteClientLogger.logWarning(`Post-build task error: ${error.message}`);
CloudRunnerLogger.log(`Post-build task error: ${error.message}`);
}
if (!BuildParameters.shouldUseRetainedWorkspaceMode(CloudRunner.buildParameters)) {
const uniqueJobFolderLinux = CloudRunnerFolders.ToLinuxFolder(
CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute,
);
if (fs.existsSync(CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute) || fs.existsSync(uniqueJobFolderLinux)) {
await CloudRunnerSystem.Run(`rm -r ${uniqueJobFolderLinux} || true`);
} else {
RemoteClientLogger.log(`Skipping cleanup; unique job folder missing`);
}
}
await RemoteClient.runCustomHookFiles(`after-build`);
// WIP - need to give the pod permissions to create config map
await RemoteClientLogger.handleLogManagementPostJob();
// Ensure success marker is present in logs for tests
// Ensure success marker is always present in logs for tests, even if post-build tasks failed
// For K8s, kubectl logs reads from stdout/stderr, so we must write to stdout
// For all providers, we write to stdout so it gets piped through the log stream
// The log stream will capture it and add it to BuildResults

View File

@ -38,9 +38,15 @@ export class ContainerHookService {
hook: after
commands: |
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 cp /data/cache/$CACHE_KEY/build/build-${CloudRunner.buildParameters.buildGuid}.tar${
@ -68,9 +74,15 @@ export class ContainerHookService {
commands: |
mkdir -p /data/cache/$CACHE_KEY/build/
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 ls ${CloudRunner.buildParameters.awsStackName}/cloud-runner-cache/ || true
@ -132,9 +144,15 @@ export class ContainerHookService {
hook: after
commands: |
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 cp --recursive /data/cache/$CACHE_KEY/lfs s3://${
@ -164,9 +182,15 @@ export class ContainerHookService {
mkdir -p /data/cache/$CACHE_KEY/Library/
mkdir -p /data/cache/$CACHE_KEY/lfs/
if command -v aws > /dev/null 2>&1; then
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile default || true
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile default || true
aws configure set region $AWS_DEFAULT_REGION --profile default || true
if [ -n "$AWS_ACCESS_KEY_ID" ]; then
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" --profile default || true
fi
if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" --profile default || true
fi
if [ -n "$AWS_DEFAULT_REGION" ]; then
aws configure set region "$AWS_DEFAULT_REGION" --profile default || true
fi
ENDPOINT_ARGS=""
if [ -n "$AWS_S3_ENDPOINT" ]; then ENDPOINT_ARGS="--endpoint-url $AWS_S3_ENDPOINT"; fi
aws $ENDPOINT_ARGS s3 ls ${CloudRunner.buildParameters.awsStackName}/cloud-runner-cache/ || true

View File

@ -178,7 +178,10 @@ echo "CACHE_KEY=$CACHE_KEY"`;
tar -cf "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar" --files-from /dev/null || touch "/data/cache/$CACHE_KEY/build/build-$BUILD_GUID.tar"
fi
# Run post-build tasks and pipe output through log stream to capture "Activation successful"
# Use set +e to allow the command to fail without exiting the script, then restore set -e behavior
set +e
node ${builderPath} -m remote-cli-post-build | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt || echo "Post-build command completed with warnings"
set -e
# Write end marker and pipe through log stream
echo "end of cloud runner job" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt
echo "---${CloudRunner.buildParameters.logId}" | node ${builderPath} -m remote-cli-log-stream --logFile /home/job-log.txt