ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

pull/732/head
Frostebite 2025-09-08 22:06:22 +01:00
parent 7cf0e31de5
commit 688f60145e
2 changed files with 156 additions and 12 deletions

166
dist/index.js generated vendored
View File

@ -3207,6 +3207,7 @@ const base_stack_formation_1 = __nccwpck_require__(29643);
const aws_task_runner_1 = __importDefault(__nccwpck_require__(15518));
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
const aws_client_factory_1 = __nccwpck_require__(30161);
const shared_workspace_locking_1 = __importDefault(__nccwpck_require__(71372));
class TaskService {
static async watch() {
// eslint-disable-next-line no-unused-vars
@ -3339,6 +3340,10 @@ class TaskService {
}
static async getLocks() {
process.env.AWS_REGION = input_1.default.region;
if (cloud_runner_1.default.buildParameters.storageProvider === 'rclone') {
const objects = await shared_workspace_locking_1.default.listObjects('');
return objects.map((x) => ({ Key: x }));
}
const s3 = aws_client_factory_1.AwsClientFactory.getS3();
const listRequest = {
Bucket: cloud_runner_1.default.buildParameters.awsStackName,
@ -4912,6 +4917,7 @@ class RemoteClient {
// Best effort: try plain pull first (works for public repos or pre-configured auth)
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
}
@ -4929,6 +4935,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global --unset-all url."git@github.com".insteadOf || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global url."https://${gitPrivateToken}@github.com/".insteadOf "https://github.com/"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Successfully pulled LFS files with GIT_PRIVATE_TOKEN`);
return;
}
@ -4946,6 +4953,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global --unset-all url."git@github.com".insteadOf || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global url."https://${githubToken}@github.com/".insteadOf "https://github.com/"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Successfully pulled LFS files with GITHUB_TOKEN`);
return;
}
@ -4970,6 +4978,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git fetch origin +refs/pull/*:refs/remotes/origin/pull/* || true`);
}
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`);
const sha = cloud_runner_1.default.buildParameters.gitSha;
const branch = cloud_runner_1.default.buildParameters.branch;
try {
@ -5369,6 +5378,9 @@ const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
const input_1 = __importDefault(__nccwpck_require__(91933));
const client_s3_1 = __nccwpck_require__(19250);
const aws_client_factory_1 = __nccwpck_require__(30161);
const node_util_1 = __nccwpck_require__(47261);
const node_child_process_1 = __nccwpck_require__(17718);
const exec = (0, node_util_1.promisify)(node_child_process_1.exec);
class SharedWorkspaceLocking {
static get s3() {
if (!SharedWorkspaceLocking._s3) {
@ -5377,11 +5389,22 @@ class SharedWorkspaceLocking {
}
return SharedWorkspaceLocking._s3;
}
static get useRclone() {
return cloud_runner_1.default.buildParameters.storageProvider === 'rclone';
}
static async rclone(command) {
const { stdout } = await exec(`rclone ${command}`);
return stdout.toString();
}
static get bucket() {
return cloud_runner_1.default.buildParameters.awsStackName;
return SharedWorkspaceLocking.useRclone
? cloud_runner_1.default.buildParameters.rcloneRemote
: cloud_runner_1.default.buildParameters.awsStackName;
}
static get workspaceBucketRoot() {
return `s3://${SharedWorkspaceLocking.bucket}/`;
return SharedWorkspaceLocking.useRclone
? `${SharedWorkspaceLocking.bucket}/`
: `s3://${SharedWorkspaceLocking.bucket}/`;
}
static get workspaceRoot() {
return `${SharedWorkspaceLocking.workspaceBucketRoot}locks/`;
@ -5391,6 +5414,15 @@ class SharedWorkspaceLocking {
}
static async ensureBucketExists() {
const bucket = SharedWorkspaceLocking.bucket;
if (SharedWorkspaceLocking.useRclone) {
try {
await SharedWorkspaceLocking.rclone(`lsf ${bucket}`);
}
catch {
await SharedWorkspaceLocking.rclone(`mkdir ${bucket}`);
}
return;
}
try {
await SharedWorkspaceLocking.s3.send(new client_s3_1.HeadBucketCommand({ Bucket: bucket }));
}
@ -5408,6 +5440,17 @@ class SharedWorkspaceLocking {
if (prefix !== '' && !prefix.endsWith('/')) {
prefix += '/';
}
if (SharedWorkspaceLocking.useRclone) {
const path = `${bucket}/${prefix}`;
try {
const output = await SharedWorkspaceLocking.rclone(`lsjson ${path}`);
const json = JSON.parse(output);
return json.map((e) => (e.IsDir ? `${e.Name}/` : e.Name));
}
catch {
return [];
}
}
const result = await SharedWorkspaceLocking.s3.send(new client_s3_1.ListObjectsV2Command({ Bucket: bucket, Prefix: prefix, Delimiter: '/' }));
const entries = [];
for (const p of result.CommonPrefixes || []) {
@ -5560,7 +5603,12 @@ class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
}
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
cloud_runner_logger_1.default.log(`All workspaces ${workspaces}`);
if (!(await SharedWorkspaceLocking.IsWorkspaceBelowMax(workspace, buildParametersContext))) {
@ -5575,13 +5623,23 @@ class SharedWorkspaceLocking {
const ending = existingWorkspace ? workspace : `${workspace}_workspace`;
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
}
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
cloud_runner_1.default.lockedWorkspace = workspace;
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
}
}
return hasLock;
}
@ -5593,10 +5651,15 @@ class SharedWorkspaceLocking {
cloud_runner_logger_1.default.log(`Deleting lock ${workspace}/${file}`);
cloud_runner_logger_1.default.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
}
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
}
@ -5604,11 +5667,16 @@ class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
}
}
}
static async ReadLines(command) {
const path = command.replace('aws s3 ls', '').trim();
const path = command.replace('aws s3 ls', '').replace('rclone lsf', '').trim();
const withoutScheme = path.replace('s3://', '');
const [bucket, ...rest] = withoutScheme.split('/');
const prefix = rest.join('/');
@ -6106,6 +6174,62 @@ class ContainerHookService {
else
echo "AWS CLI not available, skipping aws-s3-pull-cache"
fi
- name: rclone-upload-build
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/build/build-${cloud_runner_1.default.buildParameters.buildGuid}.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/build/ || true
rm /data/cache/$CACHE_KEY/build/build-${cloud_runner_1.default.buildParameters.buildGuid}.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} || true
else
echo "rclone not available, skipping rclone-upload-build"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-pull-build
image: rclone/rclone
commands: |
mkdir -p /data/cache/$CACHE_KEY/build/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} /data/cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} || true
else
echo "rclone not available, skipping rclone-pull-build"
fi
secrets:
- name: BUILD_GUID_TARGET
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-upload-cache
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/lfs ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/lfs || true
rm -r /data/cache/$CACHE_KEY/lfs || true
rclone copy /data/cache/$CACHE_KEY/Library ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/Library || true
rm -r /data/cache/$CACHE_KEY/Library || true
else
echo "rclone not available, skipping rclone-upload-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-pull-cache
image: rclone/rclone
hook: before
commands: |
mkdir -p /data/cache/$CACHE_KEY/Library/
mkdir -p /data/cache/$CACHE_KEY/lfs/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/Library /data/cache/$CACHE_KEY/Library/ || true
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/lfs /data/cache/$CACHE_KEY/lfs/ || true
else
echo "rclone not available, skipping rclone-pull-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: debug-cache
image: ubuntu
hook: after
@ -6459,6 +6583,18 @@ echo "CACHE_KEY=$CACHE_KEY"`;
cp -r "${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(node_path_1.default.join(ubuntuPlatformsFolder, 'steps'))}" "/steps"
chmod -R +x "/entrypoint.sh"
chmod -R +x "/steps"
# Ensure Git LFS files are available inside the container for local-docker runs
if [ -d "$GITHUB_WORKSPACE/.git" ]; then
echo "Ensuring Git LFS content is pulled"
(cd "$GITHUB_WORKSPACE" \
&& git lfs install || true \
&& git config --global filter.lfs.smudge "git-lfs smudge -- %f" \
&& git config --global filter.lfs.process "git-lfs filter-process" \
&& git lfs pull || true \
&& git lfs checkout || true)
else
echo "Skipping Git LFS pull: no .git directory in workspace"
fi
# Normalize potential CRLF line endings and create safe stubs for missing tooling
if command -v sed > /dev/null 2>&1; then
sed -i 's/\r$//' "/entrypoint.sh" || true
@ -359926,6 +360062,14 @@ module.exports = require("net");
/***/ }),
/***/ 17718:
/***/ ((module) => {
"use strict";
module.exports = require("node:child_process");
/***/ }),
/***/ 40027:
/***/ ((module) => {

2
dist/index.js.map generated vendored

File diff suppressed because one or more lines are too long