Cloud runner develop rclone (#732)

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping

* ci(k8s): remove in-cluster LocalStack; use host LocalStack via localhost:4566 for all; rely on k3d host mapping
cloud-runner-develop
Frostebite 2025-09-09 21:25:12 +01:00 committed by GitHub
parent 98963da430
commit bd1be2e474
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 482 additions and 33 deletions

View File

@ -36,6 +36,25 @@ When these variables are set, Unity Builder will direct its CloudFormation, ECS,
to the emulator instead of the real AWS services. See `.github/workflows/cloud-runner-integrity-localstack.yml` for an
example configuration.
## Rclone storage provider (experimental)
Unity Builder can use rclone as an alternative storage provider for cache and build artifacts in Cloud Runner.
- Inputs:
- `storageProvider`: set to `rclone` to enable rclone-backed storage (default is `s3`).
- `rcloneRemote`: rclone remote to use, e.g. `s3:my-bucket`, `gcs:my-bucket`, `local:./path`.
When `storageProvider=rclone`:
- Retained workspace locking uses rclone operations instead of S3 (touch/delete/ls).
- Built-in container hooks are available:
- `rclone-pull-cache`, `rclone-upload-cache`
- `rclone-pull-build`, `rclone-upload-build`
You must ensure the `rclone` CLI is available in the container runtime. The built-in hooks use the `rclone/rclone` image.
Example (local testing): set `RCLONE_REMOTE=local:./temp/rclone-remote` and include the rclone hooks via `containerHookFiles`.
## Community
Feel free to join us on

188
dist/index.js generated vendored
View File

@ -349,6 +349,8 @@ class BuildParameters {
awsKinesisEndpoint: cloud_runner_options_1.default.awsKinesisEndpoint,
awsCloudWatchLogsEndpoint: cloud_runner_options_1.default.awsCloudWatchLogsEndpoint,
awsS3Endpoint: cloud_runner_options_1.default.awsS3Endpoint,
storageProvider: cloud_runner_options_1.default.storageProvider,
rcloneRemote: cloud_runner_options_1.default.rcloneRemote,
gitSha: input_1.default.gitSha,
logId: (0, nanoid_1.customAlphabet)(cloud_runner_constants_1.default.alphabet, 9)(),
buildGuid: cloud_runner_guid_1.default.generateGuid(input_1.default.runNumber, input_1.default.targetPlatform),
@ -1319,6 +1321,15 @@ class CloudRunnerOptions {
return CloudRunnerOptions.getInput('awsS3Endpoint') || CloudRunnerOptions.awsEndpoint;
}
// ### ### ###
// Storage
// ### ### ###
static get storageProvider() {
return CloudRunnerOptions.getInput('storageProvider') || 's3';
}
static get rcloneRemote() {
return CloudRunnerOptions.getInput('rcloneRemote') || '';
}
// ### ### ###
// K8s
// ### ### ###
static get kubeConfig() {
@ -3196,6 +3207,7 @@ const base_stack_formation_1 = __nccwpck_require__(29643);
const aws_task_runner_1 = __importDefault(__nccwpck_require__(15518));
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
const aws_client_factory_1 = __nccwpck_require__(30161);
const shared_workspace_locking_1 = __importDefault(__nccwpck_require__(71372));
class TaskService {
static async watch() {
// eslint-disable-next-line no-unused-vars
@ -3328,6 +3340,10 @@ class TaskService {
}
static async getLocks() {
process.env.AWS_REGION = input_1.default.region;
if (cloud_runner_1.default.buildParameters.storageProvider === 'rclone') {
const objects = await shared_workspace_locking_1.default.listObjects('');
return objects.map((x) => ({ Key: x }));
}
const s3 = aws_client_factory_1.AwsClientFactory.getS3();
const listRequest = {
Bucket: cloud_runner_1.default.buildParameters.awsStackName,
@ -4392,6 +4408,17 @@ class LocalCloudRunner {
cloud_runner_logger_1.default.log(image);
cloud_runner_logger_1.default.log(buildGuid);
cloud_runner_logger_1.default.log(commands);
// On Windows, many built-in hooks use POSIX shell syntax. Execute via bash if available.
if (process.platform === 'win32') {
const inline = commands
.replace(/"/g, '\\"')
.replace(/\r/g, '')
.split('\n')
.filter((x) => x.trim().length > 0)
.join(' ; ');
const bashWrapped = `bash -lc "${inline}"`;
return await cloud_runner_system_1.CloudRunnerSystem.Run(bashWrapped);
}
return await cloud_runner_system_1.CloudRunnerSystem.Run(commands);
}
}
@ -4901,6 +4928,7 @@ class RemoteClient {
// Best effort: try plain pull first (works for public repos or pre-configured auth)
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
}
@ -4918,6 +4946,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global --unset-all url."git@github.com".insteadOf || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global url."https://${gitPrivateToken}@github.com/".insteadOf "https://github.com/"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Successfully pulled LFS files with GIT_PRIVATE_TOKEN`);
return;
}
@ -4935,6 +4964,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global --unset-all url."git@github.com".insteadOf || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global url."https://${githubToken}@github.com/".insteadOf "https://github.com/"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`, true);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`, true);
remote_client_logger_1.RemoteClientLogger.log(`Successfully pulled LFS files with GITHUB_TOKEN`);
return;
}
@ -4959,6 +4989,7 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git fetch origin +refs/pull/*:refs/remotes/origin/pull/* || true`);
}
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs pull`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs checkout || true`);
const sha = cloud_runner_1.default.buildParameters.gitSha;
const branch = cloud_runner_1.default.buildParameters.branch;
try {
@ -5358,6 +5389,9 @@ const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
const input_1 = __importDefault(__nccwpck_require__(91933));
const client_s3_1 = __nccwpck_require__(19250);
const aws_client_factory_1 = __nccwpck_require__(30161);
const node_util_1 = __nccwpck_require__(47261);
const node_child_process_1 = __nccwpck_require__(17718);
const exec = (0, node_util_1.promisify)(node_child_process_1.exec);
class SharedWorkspaceLocking {
static get s3() {
if (!SharedWorkspaceLocking._s3) {
@ -5366,11 +5400,22 @@ class SharedWorkspaceLocking {
}
return SharedWorkspaceLocking._s3;
}
static get useRclone() {
return cloud_runner_1.default.buildParameters.storageProvider === 'rclone';
}
static async rclone(command) {
const { stdout } = await exec(`rclone ${command}`);
return stdout.toString();
}
static get bucket() {
return cloud_runner_1.default.buildParameters.awsStackName;
return SharedWorkspaceLocking.useRclone
? cloud_runner_1.default.buildParameters.rcloneRemote
: cloud_runner_1.default.buildParameters.awsStackName;
}
static get workspaceBucketRoot() {
return `s3://${SharedWorkspaceLocking.bucket}/`;
return SharedWorkspaceLocking.useRclone
? `${SharedWorkspaceLocking.bucket}/`
: `s3://${SharedWorkspaceLocking.bucket}/`;
}
static get workspaceRoot() {
return `${SharedWorkspaceLocking.workspaceBucketRoot}locks/`;
@ -5380,6 +5425,15 @@ class SharedWorkspaceLocking {
}
static async ensureBucketExists() {
const bucket = SharedWorkspaceLocking.bucket;
if (SharedWorkspaceLocking.useRclone) {
try {
await SharedWorkspaceLocking.rclone(`lsf ${bucket}`);
}
catch {
await SharedWorkspaceLocking.rclone(`mkdir ${bucket}`);
}
return;
}
try {
await SharedWorkspaceLocking.s3.send(new client_s3_1.HeadBucketCommand({ Bucket: bucket }));
}
@ -5397,6 +5451,17 @@ class SharedWorkspaceLocking {
if (prefix !== '' && !prefix.endsWith('/')) {
prefix += '/';
}
if (SharedWorkspaceLocking.useRclone) {
const path = `${bucket}/${prefix}`;
try {
const output = await SharedWorkspaceLocking.rclone(`lsjson ${path}`);
const json = JSON.parse(output);
return json.map((e) => (e.IsDir ? `${e.Name}/` : e.Name));
}
catch {
return [];
}
}
const result = await SharedWorkspaceLocking.s3.send(new client_s3_1.ListObjectsV2Command({ Bucket: bucket, Prefix: prefix, Delimiter: '/' }));
const entries = [];
for (const p of result.CommonPrefixes || []) {
@ -5549,7 +5614,12 @@ class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
}
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
cloud_runner_logger_1.default.log(`All workspaces ${workspaces}`);
if (!(await SharedWorkspaceLocking.IsWorkspaceBelowMax(workspace, buildParametersContext))) {
@ -5564,13 +5634,23 @@ class SharedWorkspaceLocking {
const ending = existingWorkspace ? workspace : `${workspace}_workspace`;
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
}
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
cloud_runner_1.default.lockedWorkspace = workspace;
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
}
}
return hasLock;
}
@ -5582,10 +5662,15 @@ class SharedWorkspaceLocking {
cloud_runner_logger_1.default.log(`Deleting lock ${workspace}/${file}`);
cloud_runner_logger_1.default.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
}
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
}
@ -5593,11 +5678,16 @@ class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
}
}
}
static async ReadLines(command) {
const path = command.replace('aws s3 ls', '').trim();
const path = command.replace('aws s3 ls', '').replace('rclone lsf', '').trim();
const withoutScheme = path.replace('s3://', '');
const [bucket, ...rest] = withoutScheme.split('/');
const prefix = rest.join('/');
@ -6095,6 +6185,62 @@ class ContainerHookService {
else
echo "AWS CLI not available, skipping aws-s3-pull-cache"
fi
- name: rclone-upload-build
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/build/build-${cloud_runner_1.default.buildParameters.buildGuid}.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/build/ || true
rm /data/cache/$CACHE_KEY/build/build-${cloud_runner_1.default.buildParameters.buildGuid}.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} || true
else
echo "rclone not available, skipping rclone-upload-build"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-pull-build
image: rclone/rclone
commands: |
mkdir -p /data/cache/$CACHE_KEY/build/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} /data/cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${cloud_runner_1.default.buildParameters.useCompressionStrategy ? '.lz4' : ''} || true
else
echo "rclone not available, skipping rclone-pull-build"
fi
secrets:
- name: BUILD_GUID_TARGET
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-upload-cache
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/lfs ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/lfs || true
rm -r /data/cache/$CACHE_KEY/lfs || true
rclone copy /data/cache/$CACHE_KEY/Library ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/Library || true
rm -r /data/cache/$CACHE_KEY/Library || true
else
echo "rclone not available, skipping rclone-upload-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: rclone-pull-cache
image: rclone/rclone
hook: before
commands: |
mkdir -p /data/cache/$CACHE_KEY/Library/
mkdir -p /data/cache/$CACHE_KEY/lfs/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/Library /data/cache/$CACHE_KEY/Library/ || true
rclone copy ${cloud_runner_1.default.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/lfs /data/cache/$CACHE_KEY/lfs/ || true
else
echo "rclone not available, skipping rclone-pull-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${cloud_runner_1.default.buildParameters.rcloneRemote || ``}
- name: debug-cache
image: ubuntu
hook: after
@ -6448,6 +6594,18 @@ echo "CACHE_KEY=$CACHE_KEY"`;
cp -r "${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(node_path_1.default.join(ubuntuPlatformsFolder, 'steps'))}" "/steps"
chmod -R +x "/entrypoint.sh"
chmod -R +x "/steps"
# Ensure Git LFS files are available inside the container for local-docker runs
if [ -d "$GITHUB_WORKSPACE/.git" ]; then
echo "Ensuring Git LFS content is pulled"
(cd "$GITHUB_WORKSPACE" \
&& git lfs install || true \
&& git config --global filter.lfs.smudge "git-lfs smudge -- %f" \
&& git config --global filter.lfs.process "git-lfs filter-process" \
&& git lfs pull || true \
&& git lfs checkout || true)
else
echo "Skipping Git LFS pull: no .git directory in workspace"
fi
# Normalize potential CRLF line endings and create safe stubs for missing tooling
if command -v sed > /dev/null 2>&1; then
sed -i 's/\r$//' "/entrypoint.sh" || true
@ -359915,6 +360073,14 @@ module.exports = require("net");
/***/ }),
/***/ 17718:
/***/ ((module) => {
"use strict";
module.exports = require("node:child_process");
/***/ }),
/***/ 40027:
/***/ ((module) => {

2
dist/index.js.map generated vendored

File diff suppressed because one or more lines are too long

View File

@ -62,6 +62,8 @@ class BuildParameters {
public awsKinesisEndpoint?: string;
public awsCloudWatchLogsEndpoint?: string;
public awsS3Endpoint?: string;
public storageProvider!: string;
public rcloneRemote!: string;
public kubeConfig!: string;
public containerMemory!: string;
public containerCpu!: string;
@ -211,6 +213,8 @@ class BuildParameters {
awsKinesisEndpoint: CloudRunnerOptions.awsKinesisEndpoint,
awsCloudWatchLogsEndpoint: CloudRunnerOptions.awsCloudWatchLogsEndpoint,
awsS3Endpoint: CloudRunnerOptions.awsS3Endpoint,
storageProvider: CloudRunnerOptions.storageProvider,
rcloneRemote: CloudRunnerOptions.rcloneRemote,
gitSha: Input.gitSha,
logId: customAlphabet(CloudRunnerConstants.alphabet, 9)(),
buildGuid: CloudRunnerBuildGuid.generateGuid(Input.runNumber, Input.targetPlatform),

View File

@ -219,6 +219,18 @@ class CloudRunnerOptions {
return CloudRunnerOptions.getInput('awsS3Endpoint') || CloudRunnerOptions.awsEndpoint;
}
// ### ### ###
// Storage
// ### ### ###
static get storageProvider(): string {
return CloudRunnerOptions.getInput('storageProvider') || 's3';
}
static get rcloneRemote(): string {
return CloudRunnerOptions.getInput('rcloneRemote') || '';
}
// ### ### ###
// K8s
// ### ### ###

View File

@ -12,6 +12,7 @@ import { BaseStackFormation } from '../cloud-formations/base-stack-formation';
import AwsTaskRunner from '../aws-task-runner';
import CloudRunner from '../../../cloud-runner';
import { AwsClientFactory } from '../aws-client-factory';
import SharedWorkspaceLocking from '../../../services/core/shared-workspace-locking';
export class TaskService {
static async watch() {
@ -182,6 +183,10 @@ export class TaskService {
}
public static async getLocks() {
process.env.AWS_REGION = Input.region;
if (CloudRunner.buildParameters.storageProvider === 'rclone') {
const objects = await (SharedWorkspaceLocking as any).listObjects('');
return objects.map((x: string) => ({ Key: x }));
}
const s3 = AwsClientFactory.getS3();
const listRequest = {
Bucket: CloudRunner.buildParameters.awsStackName,

View File

@ -66,6 +66,18 @@ class LocalCloudRunner implements ProviderInterface {
CloudRunnerLogger.log(buildGuid);
CloudRunnerLogger.log(commands);
// On Windows, many built-in hooks use POSIX shell syntax. Execute via bash if available.
if (process.platform === 'win32') {
const inline = commands
.replace(/"/g, '\\"')
.replace(/\r/g, '')
.split('\n')
.filter((x) => x.trim().length > 0)
.join(' ; ');
const bashWrapped = `bash -lc "${inline}"`;
return await CloudRunnerSystem.Run(bashWrapped);
}
return await CloudRunnerSystem.Run(commands);
}
}

View File

@ -304,6 +304,7 @@ export class RemoteClient {
// Best effort: try plain pull first (works for public repos or pre-configured auth)
try {
await CloudRunnerSystem.Run(`git lfs pull`, true);
await CloudRunnerSystem.Run(`git lfs checkout || true`, true);
RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
@ -324,6 +325,7 @@ export class RemoteClient {
`git config --global url."https://${gitPrivateToken}@github.com/".insteadOf "https://github.com/"`,
);
await CloudRunnerSystem.Run(`git lfs pull`, true);
await CloudRunnerSystem.Run(`git lfs checkout || true`, true);
RemoteClientLogger.log(`Successfully pulled LFS files with GIT_PRIVATE_TOKEN`);
return;
@ -344,6 +346,7 @@ export class RemoteClient {
`git config --global url."https://${githubToken}@github.com/".insteadOf "https://github.com/"`,
);
await CloudRunnerSystem.Run(`git lfs pull`, true);
await CloudRunnerSystem.Run(`git lfs checkout || true`, true);
RemoteClientLogger.log(`Successfully pulled LFS files with GITHUB_TOKEN`);
return;
@ -374,6 +377,7 @@ export class RemoteClient {
await CloudRunnerSystem.Run(`git fetch origin +refs/pull/*:refs/remotes/origin/pull/* || true`);
}
await CloudRunnerSystem.Run(`git lfs pull`);
await CloudRunnerSystem.Run(`git lfs checkout || true`);
const sha = CloudRunner.buildParameters.gitSha;
const branch = CloudRunner.buildParameters.branch;
try {

View File

@ -11,6 +11,9 @@ import {
S3,
} from '@aws-sdk/client-s3';
import { AwsClientFactory } from '../../providers/aws/aws-client-factory';
import { promisify } from 'node:util';
import { exec as execCb } from 'node:child_process';
const exec = promisify(execCb);
export class SharedWorkspaceLocking {
private static _s3: S3;
private static get s3(): S3 {
@ -20,11 +23,22 @@ export class SharedWorkspaceLocking {
}
return SharedWorkspaceLocking._s3;
}
private static get useRclone() {
return CloudRunner.buildParameters.storageProvider === 'rclone';
}
private static async rclone(command: string): Promise<string> {
const { stdout } = await exec(`rclone ${command}`);
return stdout.toString();
}
private static get bucket() {
return CloudRunner.buildParameters.awsStackName;
return SharedWorkspaceLocking.useRclone
? CloudRunner.buildParameters.rcloneRemote
: CloudRunner.buildParameters.awsStackName;
}
public static get workspaceBucketRoot() {
return `s3://${SharedWorkspaceLocking.bucket}/`;
return SharedWorkspaceLocking.useRclone
? `${SharedWorkspaceLocking.bucket}/`
: `s3://${SharedWorkspaceLocking.bucket}/`;
}
public static get workspaceRoot() {
return `${SharedWorkspaceLocking.workspaceBucketRoot}locks/`;
@ -34,6 +48,14 @@ export class SharedWorkspaceLocking {
}
private static async ensureBucketExists(): Promise<void> {
const bucket = SharedWorkspaceLocking.bucket;
if (SharedWorkspaceLocking.useRclone) {
try {
await SharedWorkspaceLocking.rclone(`lsf ${bucket}`);
} catch {
await SharedWorkspaceLocking.rclone(`mkdir ${bucket}`);
}
return;
}
try {
await SharedWorkspaceLocking.s3.send(new HeadBucketCommand({ Bucket: bucket }));
} catch {
@ -50,6 +72,16 @@ export class SharedWorkspaceLocking {
if (prefix !== '' && !prefix.endsWith('/')) {
prefix += '/';
}
if (SharedWorkspaceLocking.useRclone) {
const path = `${bucket}/${prefix}`;
try {
const output = await SharedWorkspaceLocking.rclone(`lsjson ${path}`);
const json = JSON.parse(output) as { Name: string; IsDir: boolean }[];
return json.map((e) => (e.IsDir ? `${e.Name}/` : e.Name));
} catch {
return [];
}
}
const result = await SharedWorkspaceLocking.s3.send(
new ListObjectsV2Command({ Bucket: bucket, Prefix: prefix, Delimiter: '/' }),
);
@ -264,9 +296,13 @@ export class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }),
);
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }),
);
}
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
@ -292,18 +328,26 @@ export class SharedWorkspaceLocking {
buildParametersContext.cacheKey
}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }),
);
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }),
);
}
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
CloudRunner.lockedWorkspace = workspace;
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }),
);
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }),
);
}
}
return hasLock;
@ -321,12 +365,18 @@ export class SharedWorkspaceLocking {
CloudRunnerLogger.log(`Deleting lock ${workspace}/${file}`);
CloudRunnerLogger.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}),
);
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(
`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}),
);
}
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
@ -336,14 +386,18 @@ export class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }),
);
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
} else {
await SharedWorkspaceLocking.s3.send(
new DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }),
);
}
}
}
public static async ReadLines(command: string): Promise<string[]> {
const path = command.replace('aws s3 ls', '').trim();
const path = command.replace('aws s3 ls', '').replace('rclone lsf', '').trim();
const withoutScheme = path.replace('s3://', '');
const [bucket, ...rest] = withoutScheme.split('/');
const prefix = rest.join('/');

View File

@ -182,6 +182,80 @@ export class ContainerHookService {
else
echo "AWS CLI not available, skipping aws-s3-pull-cache"
fi
- name: rclone-upload-build
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/build/build-${CloudRunner.buildParameters.buildGuid}.tar${
CloudRunner.buildParameters.useCompressionStrategy ? '.lz4' : ''
} ${CloudRunner.buildParameters.rcloneRemote}/cloud-runner-cache/$CACHE_KEY/build/ || true
rm /data/cache/$CACHE_KEY/build/build-${CloudRunner.buildParameters.buildGuid}.tar${
CloudRunner.buildParameters.useCompressionStrategy ? '.lz4' : ''
} || true
else
echo "rclone not available, skipping rclone-upload-build"
fi
secrets:
- name: RCLONE_REMOTE
value: ${CloudRunner.buildParameters.rcloneRemote || ``}
- name: rclone-pull-build
image: rclone/rclone
commands: |
mkdir -p /data/cache/$CACHE_KEY/build/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${
CloudRunner.buildParameters.rcloneRemote
}/cloud-runner-cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${
CloudRunner.buildParameters.useCompressionStrategy ? '.lz4' : ''
} /data/cache/$CACHE_KEY/build/build-$BUILD_GUID_TARGET.tar${
CloudRunner.buildParameters.useCompressionStrategy ? '.lz4' : ''
} || true
else
echo "rclone not available, skipping rclone-pull-build"
fi
secrets:
- name: BUILD_GUID_TARGET
- name: RCLONE_REMOTE
value: ${CloudRunner.buildParameters.rcloneRemote || ``}
- name: rclone-upload-cache
image: rclone/rclone
hook: after
commands: |
if command -v rclone > /dev/null 2>&1; then
rclone copy /data/cache/$CACHE_KEY/lfs ${
CloudRunner.buildParameters.rcloneRemote
}/cloud-runner-cache/$CACHE_KEY/lfs || true
rm -r /data/cache/$CACHE_KEY/lfs || true
rclone copy /data/cache/$CACHE_KEY/Library ${
CloudRunner.buildParameters.rcloneRemote
}/cloud-runner-cache/$CACHE_KEY/Library || true
rm -r /data/cache/$CACHE_KEY/Library || true
else
echo "rclone not available, skipping rclone-upload-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${CloudRunner.buildParameters.rcloneRemote || ``}
- name: rclone-pull-cache
image: rclone/rclone
hook: before
commands: |
mkdir -p /data/cache/$CACHE_KEY/Library/
mkdir -p /data/cache/$CACHE_KEY/lfs/
if command -v rclone > /dev/null 2>&1; then
rclone copy ${
CloudRunner.buildParameters.rcloneRemote
}/cloud-runner-cache/$CACHE_KEY/Library /data/cache/$CACHE_KEY/Library/ || true
rclone copy ${
CloudRunner.buildParameters.rcloneRemote
}/cloud-runner-cache/$CACHE_KEY/lfs /data/cache/$CACHE_KEY/lfs/ || true
else
echo "rclone not available, skipping rclone-pull-cache"
fi
secrets:
- name: RCLONE_REMOTE
value: ${CloudRunner.buildParameters.rcloneRemote || ``}
- name: debug-cache
image: ubuntu
hook: after

View File

@ -0,0 +1,87 @@
import CloudRunner from '../cloud-runner';
import { BuildParameters, ImageTag } from '../..';
import UnityVersioning from '../../unity-versioning';
import { Cli } from '../../cli/cli';
import CloudRunnerLogger from '../services/core/cloud-runner-logger';
import { v4 as uuidv4 } from 'uuid';
import setups from './cloud-runner-suite.test';
import { CloudRunnerSystem } from '../services/core/cloud-runner-system';
import { OptionValues } from 'commander';
async function CreateParameters(overrides: OptionValues | undefined) {
if (overrides) {
Cli.options = overrides;
}
return await BuildParameters.create();
}
describe('Cloud Runner pre-built rclone steps', () => {
it('Responds', () => {});
it('Simple test to check if file is loaded', () => {
expect(true).toBe(true);
});
setups();
(() => {
// Determine environment capability to run rclone operations
const isCI = process.env.GITHUB_ACTIONS === 'true';
const isWindows = process.platform === 'win32';
let rcloneAvailable = false;
let bashAvailable = !isWindows; // assume available on non-Windows
if (!isCI) {
try {
const { execSync } = require('child_process');
execSync('rclone version', { stdio: 'ignore' });
rcloneAvailable = true;
} catch {
rcloneAvailable = false;
}
if (isWindows) {
try {
const { execSync } = require('child_process');
execSync('bash --version', { stdio: 'ignore' });
bashAvailable = true;
} catch {
bashAvailable = false;
}
}
}
const hasRcloneRemote = Boolean(process.env.RCLONE_REMOTE || process.env.rcloneRemote);
const shouldRunRclone = (isCI && hasRcloneRemote) || (rcloneAvailable && (!isWindows || bashAvailable));
if (shouldRunRclone) {
it('Run build and prebuilt rclone cache pull, cache push and upload build', async () => {
const remote = process.env.RCLONE_REMOTE || process.env.rcloneRemote || 'local:./temp/rclone-remote';
const overrides = {
versioning: 'None',
projectPath: 'test-project',
unityVersion: UnityVersioning.determineUnityVersion('test-project', UnityVersioning.read('test-project')),
targetPlatform: 'StandaloneLinux64',
cacheKey: `test-case-${uuidv4()}`,
containerHookFiles: `rclone-pull-cache,rclone-upload-cache,rclone-upload-build`,
storageProvider: 'rclone',
rcloneRemote: remote,
cloudRunnerDebug: true,
} as unknown as OptionValues;
const buildParams = await CreateParameters(overrides);
const baseImage = new ImageTag(buildParams);
const results = await CloudRunner.run(buildParams, baseImage.toString());
CloudRunnerLogger.log(`rclone run succeeded`);
expect(results.BuildSucceeded).toBe(true);
// List remote root to validate the remote is accessible (best-effort)
try {
const lines = await CloudRunnerSystem.RunAndReadLines(`rclone lsf ${remote}`);
CloudRunnerLogger.log(lines.join(','));
} catch {}
}, 1_000_000_000);
} else {
it.skip('Run build and prebuilt rclone steps - rclone not configured', () => {
CloudRunnerLogger.log('rclone not configured (no CLI/remote); skipping rclone test');
});
}
})();
});

View File

@ -147,6 +147,18 @@ echo "CACHE_KEY=$CACHE_KEY"`;
cp -r "${CloudRunnerFolders.ToLinuxFolder(path.join(ubuntuPlatformsFolder, 'steps'))}" "/steps"
chmod -R +x "/entrypoint.sh"
chmod -R +x "/steps"
# Ensure Git LFS files are available inside the container for local-docker runs
if [ -d "$GITHUB_WORKSPACE/.git" ]; then
echo "Ensuring Git LFS content is pulled"
(cd "$GITHUB_WORKSPACE" \
&& git lfs install || true \
&& git config --global filter.lfs.smudge "git-lfs smudge -- %f" \
&& git config --global filter.lfs.process "git-lfs filter-process" \
&& git lfs pull || true \
&& git lfs checkout || true)
else
echo "Skipping Git LFS pull: no .git directory in workspace"
fi
# Normalize potential CRLF line endings and create safe stubs for missing tooling
if command -v sed > /dev/null 2>&1; then
sed -i 's/\r$//' "/entrypoint.sh" || true