style: format aws-task-runner.ts to satisfy Prettier
parent
d3e23a8c70
commit
8f66ff2893
|
@ -5248,32 +5248,61 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|||
};
|
||||
Object.defineProperty(exports, "__esModule", ({ value: true }));
|
||||
exports.SharedWorkspaceLocking = void 0;
|
||||
const cloud_runner_system_1 = __nccwpck_require__(4197);
|
||||
const node_fs_1 = __importDefault(__nccwpck_require__(87561));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(42864));
|
||||
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
|
||||
const input_1 = __importDefault(__nccwpck_require__(91933));
|
||||
const client_s3_1 = __nccwpck_require__(19250);
|
||||
class SharedWorkspaceLocking {
|
||||
static get s3() {
|
||||
if (!SharedWorkspaceLocking._s3) {
|
||||
const region = input_1.default.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION || 'us-east-1';
|
||||
SharedWorkspaceLocking._s3 = new client_s3_1.S3({ region });
|
||||
}
|
||||
return SharedWorkspaceLocking._s3;
|
||||
}
|
||||
static get bucket() {
|
||||
return cloud_runner_1.default.buildParameters.awsStackName;
|
||||
}
|
||||
static get workspaceBucketRoot() {
|
||||
return `s3://${cloud_runner_1.default.buildParameters.awsStackName}/`;
|
||||
return `s3://${SharedWorkspaceLocking.bucket}/`;
|
||||
}
|
||||
static get workspaceRoot() {
|
||||
return `${SharedWorkspaceLocking.workspaceBucketRoot}locks/`;
|
||||
}
|
||||
static get workspacePrefix() {
|
||||
return `locks/`;
|
||||
}
|
||||
static async listObjects(prefix, bucket = SharedWorkspaceLocking.bucket) {
|
||||
if (prefix !== '' && !prefix.endsWith('/')) {
|
||||
prefix += '/';
|
||||
}
|
||||
const result = await SharedWorkspaceLocking.s3.send(new client_s3_1.ListObjectsV2Command({ Bucket: bucket, Prefix: prefix, Delimiter: '/' }));
|
||||
const entries = [];
|
||||
for (const p of result.CommonPrefixes || []) {
|
||||
if (p.Prefix)
|
||||
entries.push(p.Prefix.slice(prefix.length));
|
||||
}
|
||||
for (const c of result.Contents || []) {
|
||||
if (c.Key && c.Key !== prefix)
|
||||
entries.push(c.Key.slice(prefix.length));
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
static async GetAllWorkspaces(buildParametersContext) {
|
||||
if (!(await SharedWorkspaceLocking.DoesCacheKeyTopLevelExist(buildParametersContext))) {
|
||||
return [];
|
||||
}
|
||||
return (await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/`))
|
||||
return (await SharedWorkspaceLocking.listObjects(`${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`))
|
||||
.map((x) => x.replace(`/`, ``))
|
||||
.filter((x) => x.endsWith(`_workspace`))
|
||||
.map((x) => x.split(`_`)[1]);
|
||||
}
|
||||
static async DoesCacheKeyTopLevelExist(buildParametersContext) {
|
||||
try {
|
||||
const rootLines = await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceBucketRoot}`);
|
||||
const rootLines = await SharedWorkspaceLocking.listObjects('');
|
||||
const lockFolderExists = rootLines.map((x) => x.replace(`/`, ``)).includes(`locks`);
|
||||
if (lockFolderExists) {
|
||||
const lines = await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceRoot}`);
|
||||
const lines = await SharedWorkspaceLocking.listObjects(SharedWorkspaceLocking.workspacePrefix);
|
||||
return lines.map((x) => x.replace(`/`, ``)).includes(buildParametersContext.cacheKey);
|
||||
}
|
||||
else {
|
||||
|
@ -5291,7 +5320,7 @@ class SharedWorkspaceLocking {
|
|||
if (!(await SharedWorkspaceLocking.DoesWorkspaceExist(workspace, buildParametersContext))) {
|
||||
return [];
|
||||
}
|
||||
return (await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/`))
|
||||
return (await SharedWorkspaceLocking.listObjects(`${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`))
|
||||
.map((x) => x.replace(`/`, ``))
|
||||
.filter((x) => x.includes(workspace) && x.endsWith(`_lock`));
|
||||
}
|
||||
|
@ -5377,7 +5406,7 @@ class SharedWorkspaceLocking {
|
|||
if (!(await SharedWorkspaceLocking.DoesWorkspaceExist(workspace, buildParametersContext))) {
|
||||
throw new Error("Workspace doesn't exist, can't call get all locks");
|
||||
}
|
||||
return (await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/`))
|
||||
return (await SharedWorkspaceLocking.listObjects(`${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`))
|
||||
.map((x) => x.replace(`/`, ``))
|
||||
.filter((x) => x.includes(workspace) && x.endsWith(`_workspace`))
|
||||
.map((x) => Number(x))[0];
|
||||
|
@ -5386,7 +5415,7 @@ class SharedWorkspaceLocking {
|
|||
if (!(await SharedWorkspaceLocking.DoesWorkspaceExist(workspace, buildParametersContext))) {
|
||||
throw new Error(`workspace doesn't exist ${workspace}`);
|
||||
}
|
||||
const files = await SharedWorkspaceLocking.ReadLines(`aws s3 ls ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/`);
|
||||
const files = await SharedWorkspaceLocking.listObjects(`${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`);
|
||||
const lockFilesExist = files.filter((x) => {
|
||||
return x.includes(workspace) && x.endsWith(`_lock`);
|
||||
}).length > 0;
|
||||
|
@ -5397,10 +5426,8 @@ class SharedWorkspaceLocking {
|
|||
throw new Error(`${workspace} already exists`);
|
||||
}
|
||||
const timestamp = Date.now();
|
||||
const file = `${timestamp}_${workspace}_workspace`;
|
||||
node_fs_1.default.writeFileSync(file, '');
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`aws s3 cp ./${file} ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`, false, true);
|
||||
node_fs_1.default.rmSync(file);
|
||||
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
|
||||
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
|
||||
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
|
||||
cloud_runner_logger_1.default.log(`All workspaces ${workspaces}`);
|
||||
if (!(await SharedWorkspaceLocking.IsWorkspaceBelowMax(workspace, buildParametersContext))) {
|
||||
|
@ -5413,16 +5440,14 @@ class SharedWorkspaceLocking {
|
|||
static async LockWorkspace(workspace, runId, buildParametersContext) {
|
||||
const existingWorkspace = workspace.endsWith(`_workspace`);
|
||||
const ending = existingWorkspace ? workspace : `${workspace}_workspace`;
|
||||
const file = `${Date.now()}_${runId}_${ending}_lock`;
|
||||
node_fs_1.default.writeFileSync(file, '');
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`aws s3 cp ./${file} ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`, false, true);
|
||||
node_fs_1.default.rmSync(file);
|
||||
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${Date.now()}_${runId}_${ending}_lock`;
|
||||
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: '' }));
|
||||
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
|
||||
if (hasLock) {
|
||||
cloud_runner_1.default.lockedWorkspace = workspace;
|
||||
}
|
||||
else {
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`aws s3 rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`, false, true);
|
||||
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
|
||||
}
|
||||
return hasLock;
|
||||
}
|
||||
|
@ -5432,14 +5457,27 @@ class SharedWorkspaceLocking {
|
|||
cloud_runner_logger_1.default.log(`All Locks ${files} ${workspace} ${runId}`);
|
||||
cloud_runner_logger_1.default.log(`Deleting lock ${workspace}/${file}`);
|
||||
cloud_runner_logger_1.default.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`aws s3 rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`, false, true);
|
||||
if (file) {
|
||||
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
|
||||
Bucket: SharedWorkspaceLocking.bucket,
|
||||
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
|
||||
}));
|
||||
}
|
||||
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
|
||||
}
|
||||
static async CleanupWorkspace(workspace, buildParametersContext) {
|
||||
await cloud_runner_system_1.CloudRunnerSystem.Run(`aws s3 rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey} --exclude "*" --include "*_${workspace}_*"`, false, true);
|
||||
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
|
||||
const files = await SharedWorkspaceLocking.listObjects(prefix);
|
||||
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
|
||||
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
|
||||
}
|
||||
}
|
||||
static async ReadLines(command) {
|
||||
return cloud_runner_system_1.CloudRunnerSystem.RunAndReadLines(command);
|
||||
const path = command.replace('aws s3 ls', '').trim();
|
||||
const withoutScheme = path.replace('s3://', '');
|
||||
const [bucket, ...rest] = withoutScheme.split('/');
|
||||
const prefix = rest.join('/');
|
||||
return SharedWorkspaceLocking.listObjects(prefix, bucket);
|
||||
}
|
||||
}
|
||||
exports.SharedWorkspaceLocking = SharedWorkspaceLocking;
|
||||
|
@ -6195,7 +6233,19 @@ class BuildAutomationWorkflow {
|
|||
${buildHooks.filter((x) => x.hook.includes(`after`)).map((x) => x.commands) || ' '}`;
|
||||
}
|
||||
static setupCommands(builderPath, isContainerized) {
|
||||
const commands = `mkdir -p ${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)} && git clone -q -b ${cloud_runner_1.default.buildParameters.cloudRunnerBranch} ${cloud_runner_folders_1.CloudRunnerFolders.unityBuilderRepoUrl} "${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)}" && chmod +x ${builderPath}`;
|
||||
const commands = `mkdir -p ${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)}
|
||||
BRANCH="${cloud_runner_1.default.buildParameters.cloudRunnerBranch}"
|
||||
REPO="${cloud_runner_folders_1.CloudRunnerFolders.unityBuilderRepoUrl}"
|
||||
DEST="${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)}"
|
||||
if git ls-remote --heads "$REPO" "$BRANCH" >/dev/null 2>&1; then
|
||||
git clone -q -b "$BRANCH" "$REPO" "$DEST"
|
||||
else
|
||||
echo "Remote branch $BRANCH not found in $REPO; falling back to a known branch"
|
||||
git clone -q -b cloud-runner-develop "$REPO" "$DEST" \
|
||||
|| git clone -q -b main "$REPO" "$DEST" \
|
||||
|| git clone -q "$REPO" "$DEST"
|
||||
fi
|
||||
chmod +x ${builderPath}`;
|
||||
if (isContainerized) {
|
||||
const cloneBuilderCommands = `if [ -e "${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute)}" ] && [ -e "${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(node_path_1.default.join(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute, `.git`))}" ] ; then echo "Builder Already Exists!" && (command -v tree > /dev/null 2>&1 && tree ${cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute} || ls -la ${cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute}); else ${commands} ; fi`;
|
||||
return `export GIT_DISCOVERY_ACROSS_FILESYSTEM=1
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -89,9 +89,19 @@ export class BuildAutomationWorkflow implements WorkflowInterface {
|
|||
private static setupCommands(builderPath: string, isContainerized: boolean) {
|
||||
const commands = `mkdir -p ${CloudRunnerFolders.ToLinuxFolder(
|
||||
CloudRunnerFolders.builderPathAbsolute,
|
||||
)} && git clone -q -b ${CloudRunner.buildParameters.cloudRunnerBranch} ${
|
||||
CloudRunnerFolders.unityBuilderRepoUrl
|
||||
} "${CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.builderPathAbsolute)}" && chmod +x ${builderPath}`;
|
||||
)}
|
||||
BRANCH="${CloudRunner.buildParameters.cloudRunnerBranch}"
|
||||
REPO="${CloudRunnerFolders.unityBuilderRepoUrl}"
|
||||
DEST="${CloudRunnerFolders.ToLinuxFolder(CloudRunnerFolders.builderPathAbsolute)}"
|
||||
if git ls-remote --heads "$REPO" "$BRANCH" >/dev/null 2>&1; then
|
||||
git clone -q -b "$BRANCH" "$REPO" "$DEST"
|
||||
else
|
||||
echo "Remote branch $BRANCH not found in $REPO; falling back to a known branch"
|
||||
git clone -q -b cloud-runner-develop "$REPO" "$DEST" \
|
||||
|| git clone -q -b main "$REPO" "$DEST" \
|
||||
|| git clone -q "$REPO" "$DEST"
|
||||
fi
|
||||
chmod +x ${builderPath}`;
|
||||
|
||||
if (isContainerized) {
|
||||
const cloneBuilderCommands = `if [ -e "${CloudRunnerFolders.ToLinuxFolder(
|
||||
|
|
Loading…
Reference in New Issue