merge remote

pull/310/head
Frostebite 2021-09-15 04:20:04 +01:00
parent 33896798fd
commit 7706f45feb
7 changed files with 269 additions and 295 deletions

310
dist/index.js vendored
View File

@ -640,6 +640,7 @@ class AWSBuildEnvironment {
return fs.readFileSync(`${__dirname}/cloud-formations/task-def-formation.yml`, 'utf8');
}
cleanupResources(CF, taskDef) {
var _a;
return __awaiter(this, void 0, void 0, function* () {
core.info('Cleanup starting');
yield CF.deleteStack({
@ -651,10 +652,12 @@ class AWSBuildEnvironment {
yield CF.waitFor('stackDeleteComplete', {
StackName: taskDef.taskDefStackName,
}).promise();
// Currently too slow and causes too much waiting
yield CF.waitFor('stackDeleteComplete', {
StackName: taskDef.taskDefStackNameTTL,
}).promise();
const stacks = (_a = (yield CF.listStacks().promise()).StackSummaries) === null || _a === void 0 ? void 0 : _a.filter((x) => x.StackStatus !== 'DELETE_COMPLETE');
core.info(`Deleted Stacks: ${taskDef.taskDefStackName}, ${taskDef.taskDefStackNameTTL}`);
core.info(`Stacks: ${JSON.stringify(stacks, undefined, 4)}`);
core.info('Cleanup complete');
});
}
@ -776,58 +779,89 @@ class AWSBuildRunner {
static streamLogsUntilTaskStops(ECS, CF, taskDef, clusterName, taskArn, kinesisStreamName) {
var _a;
return __awaiter(this, void 0, void 0, function* () {
// watching logs
const kinesis = new AWS.Kinesis();
const stream = yield kinesis
const stream = yield AWSBuildRunner.getLogStream(kinesis, kinesisStreamName);
let iterator = yield AWSBuildRunner.getLogIterator(kinesis, stream);
core.info(`Cloud runner job status is ${(_a = (yield AWSBuildRunner.describeTasks(ECS, clusterName, taskArn))) === null || _a === void 0 ? void 0 : _a.lastStatus}`);
const logBaseUrl = `https://${AWS.config.region}.console.aws.amazon.com/cloudwatch/home?region=${AWS.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
core.info(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
let readingLogs = true;
let timestamp = 0;
while (readingLogs) {
yield new Promise((resolve) => setTimeout(resolve, 1500));
const taskData = yield AWSBuildRunner.describeTasks(ECS, clusterName, taskArn);
({ timestamp, readingLogs } = AWSBuildRunner.checkStreamingShouldContinue(taskData, timestamp, readingLogs));
({ iterator, readingLogs } = yield AWSBuildRunner.handleLogStreamIteration(kinesis, iterator, readingLogs, taskDef));
}
});
}
static handleLogStreamIteration(kinesis, iterator, readingLogs, taskDef) {
return __awaiter(this, void 0, void 0, function* () {
const records = yield kinesis
.getRecords({
ShardIterator: iterator,
})
.promise();
iterator = records.NextShardIterator || '';
readingLogs = AWSBuildRunner.logRecords(records, iterator, taskDef, readingLogs);
return { iterator, readingLogs };
});
}
static checkStreamingShouldContinue(taskData, timestamp, readingLogs) {
if ((taskData === null || taskData === void 0 ? void 0 : taskData.lastStatus) !== 'RUNNING') {
if (timestamp === 0) {
core.info('Cloud runner job stopped, streaming end of logs');
timestamp = Date.now();
}
if (timestamp !== 0 && Date.now() - timestamp < 30000) {
core.info('Cloud runner status is not RUNNING for 30 seconds, last query for logs');
readingLogs = false;
}
core.info(`Status of job: ${taskData.lastStatus}`);
}
return { timestamp, readingLogs };
}
static logRecords(records, iterator, taskDef, readingLogs) {
if (records.Records.length > 0 && iterator) {
for (let index = 0; index < records.Records.length; index++) {
const json = JSON.parse(zlib.gunzipSync(Buffer.from(records.Records[index].Data, 'base64')).toString('utf8'));
if (json.messageType === 'DATA_MESSAGE') {
for (let logEventsIndex = 0; logEventsIndex < json.logEvents.length; logEventsIndex++) {
if (json.logEvents[logEventsIndex].message.includes(taskDef.logid)) {
core.info('End of cloud runner job logs');
readingLogs = false;
}
else {
const message = json.logEvents[logEventsIndex].message;
if (message.includes('Rebuilding Library because the asset database could not be found!')) {
core.warning('LIBRARY NOT FOUND!');
}
core.info(message);
}
}
}
}
}
return readingLogs;
}
static getLogStream(kinesis, kinesisStreamName) {
return __awaiter(this, void 0, void 0, function* () {
return yield kinesis
.describeStream({
StreamName: kinesisStreamName,
})
.promise();
let iterator = (yield kinesis
});
}
static getLogIterator(kinesis, stream) {
return __awaiter(this, void 0, void 0, function* () {
return ((yield kinesis
.getShardIterator({
ShardIteratorType: 'TRIM_HORIZON',
StreamName: stream.StreamDescription.StreamName,
ShardId: stream.StreamDescription.Shards[0].ShardId,
})
.promise()).ShardIterator || '';
core.info(`Cloud runner job status is ${(_a = (yield AWSBuildRunner.describeTasks(ECS, clusterName, taskArn))) === null || _a === void 0 ? void 0 : _a.lastStatus}`);
const logBaseUrl = `https://${AWS.config.region}.console.aws.amazon.com/cloudwatch/home?region=${AWS.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
core.info(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
let readingLogs = true;
while (readingLogs) {
yield new Promise((resolve) => setTimeout(resolve, 1500));
const taskData = yield AWSBuildRunner.describeTasks(ECS, clusterName, taskArn);
if ((taskData === null || taskData === void 0 ? void 0 : taskData.lastStatus) !== 'RUNNING') {
core.info('Task not runner, job ended');
readingLogs = false;
}
const records = yield kinesis
.getRecords({
ShardIterator: iterator,
})
.promise();
iterator = records.NextShardIterator || '';
if (records.Records.length > 0 && iterator) {
for (let index = 0; index < records.Records.length; index++) {
const json = JSON.parse(zlib.gunzipSync(Buffer.from(records.Records[index].Data, 'base64')).toString('utf8'));
if (json.messageType === 'DATA_MESSAGE') {
for (let logEventsIndex = 0; logEventsIndex < json.logEvents.length; logEventsIndex++) {
if (json.logEvents[logEventsIndex].message.includes(taskDef.logid)) {
core.info('End of cloud runner job logs');
readingLogs = false;
}
else {
const message = json.logEvents[logEventsIndex].message;
if (message.includes('Rebuilding Library because the asset database could not be found!')) {
core.warning('LIBRARY NOT FOUND!');
}
core.info(message);
}
}
}
}
}
}
.promise()).ShardIterator || '');
});
}
}
@ -1201,32 +1235,20 @@ class CloudRunner {
return __awaiter(this, void 0, void 0, function* () {
core.info('Starting step 1/4 clone and restore cache)');
yield this.CloudRunnerProviderPlatform.runBuildTask(this.buildGuid, 'alpine/git', [
` printenv
apk update -q
apk add unzip zip git-lfs jq tree -q
mkdir -p ${this.buildPathFull}
mkdir -p ${this.builderPathFull}
mkdir -p ${this.repoPathFull}
${this.getCloneBuilder()}
echo ' '
echo 'Initializing source repository for cloning with caching of LFS files'
${this.getCloneNoLFSCommand()}
echo 'Source repository initialized'
echo ' '
${process.env.DEBUG ? '' : '#'}echo $LFS_ASSETS_HASH
${process.env.DEBUG ? '' : '#'}echo 'Large File before LFS caching and pull'
${process.env.DEBUG ? '' : '#'}ls -alh "${this.lfsDirectory}"
${process.env.DEBUG ? '' : '#'}echo ' '
echo 'Starting checks of cache for the Unity project Library and git LFS files'
${this.getHandleCachingCommand()}
${process.env.DEBUG ? '' : '#'}echo 'Caching complete'
${process.env.DEBUG ? '' : '#'}echo ' '
${process.env.DEBUG ? '' : '#'}echo 'Large File after LFS caching and pull'
${process.env.DEBUG ? '' : '#'}ls -alh "${this.lfsDirectory}"
${process.env.DEBUG ? '' : '#'}echo ' '
${process.env.DEBUG ? '' : '#'}tree -L 4 "${this.buildPathFull}"
${process.env.DEBUG ? '' : '#'}ls -lh "/${buildVolumeFolder}"
${process.env.DEBUG ? '' : '#'}echo ' '
` printenv
apk update -q
apk add unzip zip git-lfs jq tree -q
mkdir -p ${this.buildPathFull}
mkdir -p ${this.builderPathFull}
mkdir -p ${this.repoPathFull}
${this.getCloneBuilder()}
echo ' '
echo 'Initializing source repository for cloning with caching of LFS files'
${this.getCloneNoLFSCommand()}
echo 'Source repository initialized'
echo ' '
echo 'Starting checks of cache for the Unity project Library and git LFS files'
${this.getHandleCachingCommand()}
`,
], `/${buildVolumeFolder}`, `/${buildVolumeFolder}/`, CloudRunner.defaultGitShaEnvironmentVariable, this.defaultSecrets);
});
@ -1235,17 +1257,17 @@ class CloudRunner {
return __awaiter(this, void 0, void 0, function* () {
core.info('Starting part 2/4 (build unity project)');
yield this.CloudRunnerProviderPlatform.runBuildTask(this.buildGuid, baseImage.toString(), [
`
printenv
export GITHUB_WORKSPACE="${this.repoPathFull}"
cp -r "${this.builderPathFull}/dist/default-build-script/" "/UnityBuilderAction"
cp -r "${this.builderPathFull}/dist/entrypoint.sh" "/entrypoint.sh"
cp -r "${this.builderPathFull}/dist/steps/" "/steps"
chmod -R +x "/entrypoint.sh"
chmod -R +x "/steps"
/entrypoint.sh
${process.env.DEBUG ? '' : '#'}tree -L 4 "${this.buildPathFull}"
${process.env.DEBUG ? '' : '#'}ls -lh "/${buildVolumeFolder}"
`
printenv
export GITHUB_WORKSPACE="${this.repoPathFull}"
cp -r "${this.builderPathFull}/dist/default-build-script/" "/UnityBuilderAction"
cp -r "${this.builderPathFull}/dist/entrypoint.sh" "/entrypoint.sh"
cp -r "${this.builderPathFull}/dist/steps/" "/steps"
chmod -R +x "/entrypoint.sh"
chmod -R +x "/steps"
/entrypoint.sh
${process.env.DEBUG ? '' : '#'}tree -L 4 "${this.buildPathFull}"
${process.env.DEBUG ? '' : '#'}ls -lh "/${buildVolumeFolder}"
`,
], `/${buildVolumeFolder}`, `/${this.projectPathFull}`, CloudRunner.readBuildEnvironmentVariables(), this.defaultSecrets);
});
@ -1255,23 +1277,23 @@ class CloudRunner {
core.info('Starting step 3/4 build compression');
// Cleanup
yield this.CloudRunnerProviderPlatform.runBuildTask(this.buildGuid, 'alpine', [
`
printenv
apk update -q
apk add zip tree -q
${process.env.DEBUG ? '' : '#'}tree -L 4 "$repoPathFull"
${process.env.DEBUG ? '' : '#'}ls -lh "$repoPathFull"
cd "$libraryFolderFull/.."
zip -r "lib-$BUILDID.zip" "./Library"
mv "lib-$BUILDID.zip" "/$cacheFolderFull/lib"
cd "$repoPathFull"
ls -lh "$repoPathFull"
zip -r "build-$BUILDID.zip" "./${CloudRunner.buildParams.buildPath}"
mv "build-$BUILDID.zip" "/$cacheFolderFull/build-$BUILDID.zip"
${process.env.DEBUG ? '' : '#'}tree -L 4 "/$cacheFolderFull"
${process.env.DEBUG ? '' : '#'}tree -L 4 "/$cacheFolderFull/.."
${process.env.DEBUG ? '' : '#'}tree -L 4 "$repoPathFull"
${process.env.DEBUG ? '' : '#'}ls -lh "$repoPathFull"
`
printenv
apk update -q
apk add zip tree -q
${process.env.DEBUG ? '' : '#'}tree -L 4 "$repoPathFull"
${process.env.DEBUG ? '' : '#'}ls -lh "$repoPathFull"
cd "$libraryFolderFull/.."
zip -r "lib-$BUILDID.zip" "./Library"
mv "lib-$BUILDID.zip" "/$cacheFolderFull/lib"
cd "$repoPathFull"
ls -lh "$repoPathFull"
zip -r "build-$BUILDID.zip" "./${CloudRunner.buildParams.buildPath}"
mv "build-$BUILDID.zip" "/$cacheFolderFull/build-$BUILDID.zip"
${process.env.DEBUG ? '' : '#'}tree -L 4 "/$cacheFolderFull"
${process.env.DEBUG ? '' : '#'}tree -L 4 "/$cacheFolderFull/.."
${process.env.DEBUG ? '' : '#'}tree -L 4 "$repoPathFull"
${process.env.DEBUG ? '' : '#'}ls -lh "$repoPathFull"
`,
], `/${buildVolumeFolder}`, `/${buildVolumeFolder}`, [
...CloudRunner.defaultGitShaEnvironmentVariable,
@ -1892,17 +1914,35 @@ class KubernetesStorage {
static getPVCPhase(kubeClient, name, namespace) {
var _a;
return __awaiter(this, void 0, void 0, function* () {
return (_a = (yield kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body.status) === null || _a === void 0 ? void 0 : _a.phase;
try {
return (_a = (yield kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body.status) === null || _a === void 0 ? void 0 : _a.phase;
}
catch (error) {
core.error('Failed to get PVC phase');
core.error(JSON.stringify(error, undefined, 4));
throw error;
}
});
}
static watchUntilPVCNotPending(kubeClient, name, namespace) {
return __awaiter(this, void 0, void 0, function* () {
core.info(`watch Until PVC Not Pending ${name} ${namespace}`);
core.info(`${yield this.getPVCPhase(kubeClient, name, namespace)}`);
yield async_wait_until_1.default(() => __awaiter(this, void 0, void 0, function* () { return (yield this.getPVCPhase(kubeClient, name, namespace)) !== 'Pending'; }), {
timeout: 500000,
intervalBetweenAttempts: 15000,
});
try {
core.info(`watch Until PVC Not Pending ${name} ${namespace}`);
core.info(`${yield this.getPVCPhase(kubeClient, name, namespace)}`);
yield async_wait_until_1.default(() => __awaiter(this, void 0, void 0, function* () {
return (yield this.getPVCPhase(kubeClient, name, namespace)) !== 'Pending';
}), {
timeout: 500000,
intervalBetweenAttempts: 15000,
});
}
catch (error) {
core.error('Failed to watch PVC');
core.error(error);
core.error(JSON.stringify(error, undefined, 4));
core.error(`PVC Body: ${JSON.stringify((yield kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body, undefined, 4)}`);
throw error;
}
});
}
static createPVC(pvcName, buildParameters, kubeClient, namespace) {
@ -1915,7 +1955,7 @@ class KubernetesStorage {
};
pvc.spec = {
accessModes: ['ReadWriteOnce'],
storageClassName: process.env.K8s_STORAGE_CLASS || 'fileserver',
storageClassName: process.env.K8s_STORAGE_CLASS || 'standard',
resources: {
requests: {
storage: buildParameters.kubeVolumeSize,
@ -1941,66 +1981,6 @@ class KubernetesStorage {
}
}
exports.default = KubernetesStorage;
/*
It's possible now with Cloud Filestore.
First create a Filestore instance.
gcloud filestore instances create nfs-server
--project=[PROJECT_ID]
--zone=us-central1-c
--tier=STANDARD
--file-share=name="vol1",capacity=1TB
--network=name="default",reserved-ip-range="10.0.0.0/29"
Then create a persistent volume in GKE.
apiVersion: v1
kind: PersistentVolume
metadata:
name: fileserver
spec:
capacity:
storage: 1T
accessModes:
- ReadWriteMany
nfs:
path: /vol1
server: [IP_ADDRESS]
[IP_ADDRESS] is available in filestore instance details.
You can now request a persistent volume claim.
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: fileserver-claim
spec:
accessModes:
- ReadWriteMany
storageClassName: "fileserver"
resources:
requests:
storage: 100G
Finally, mount the volume in your pod.
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: my container
image: nginx:latest
volumeMounts:
- mountPath: /workdir
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: fileserver-claim
readOnly: false
Solution is detailed here : https://cloud.google.com/filestore/docs/accessing-fileshares
*/
/***/ }),

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View File

@ -362,7 +362,6 @@ class AWSBuildEnvironment implements CloudRunnerProviderInterface {
await CF.deleteStack({
StackName: taskDef.taskDefStackName,
}).promise();
await CF.deleteStack({
StackName: taskDef.taskDefStackNameTTL,
}).promise();
@ -370,12 +369,15 @@ class AWSBuildEnvironment implements CloudRunnerProviderInterface {
await CF.waitFor('stackDeleteComplete', {
StackName: taskDef.taskDefStackName,
}).promise();
// Currently too slow and causes too much waiting
await CF.waitFor('stackDeleteComplete', {
StackName: taskDef.taskDefStackNameTTL,
}).promise();
const stacks = (await CF.listStacks().promise()).StackSummaries?.filter((x) => x.StackStatus !== 'DELETE_COMPLETE');
core.info(`Deleted Stacks: ${taskDef.taskDefStackName}, ${taskDef.taskDefStackNameTTL}`);
core.info(`Stacks: ${JSON.stringify(stacks, undefined, 4)}`);
core.info('Cleanup complete');
}
}

View File

@ -103,25 +103,9 @@ class AWSBuildRunner {
taskArn: string,
kinesisStreamName: string,
) {
// watching logs
const kinesis = new AWS.Kinesis();
const stream = await kinesis
.describeStream({
StreamName: kinesisStreamName,
})
.promise();
let iterator =
(
await kinesis
.getShardIterator({
ShardIteratorType: 'TRIM_HORIZON',
StreamName: stream.StreamDescription.StreamName,
ShardId: stream.StreamDescription.Shards[0].ShardId,
})
.promise()
).ShardIterator || '';
const stream = await AWSBuildRunner.getLogStream(kinesis, kinesisStreamName);
let iterator = await AWSBuildRunner.getLogIterator(kinesis, stream);
core.info(
`Cloud runner job status is ${(await AWSBuildRunner.describeTasks(ECS, clusterName, taskArn))?.lastStatus}`,
@ -130,41 +114,96 @@ class AWSBuildRunner {
const logBaseUrl = `https://${AWS.config.region}.console.aws.amazon.com/cloudwatch/home?region=${AWS.config.region}#logsV2:log-groups/log-group/${taskDef.taskDefStackName}`;
core.info(`You can also see the logs at AWS Cloud Watch: ${logBaseUrl}`);
let readingLogs = true;
let timestamp: number = 0;
while (readingLogs) {
await new Promise((resolve) => setTimeout(resolve, 1500));
const taskData = await AWSBuildRunner.describeTasks(ECS, clusterName, taskArn);
if (taskData?.lastStatus !== 'RUNNING') {
core.info('Task not runner, job ended');
({ timestamp, readingLogs } = AWSBuildRunner.checkStreamingShouldContinue(taskData, timestamp, readingLogs));
({ iterator, readingLogs } = await AWSBuildRunner.handleLogStreamIteration(
kinesis,
iterator,
readingLogs,
taskDef,
));
}
}
private static async handleLogStreamIteration(
kinesis: AWS.Kinesis,
iterator: string,
readingLogs: boolean,
taskDef: CloudRunnerTaskDef,
) {
const records = await kinesis
.getRecords({
ShardIterator: iterator,
})
.promise();
iterator = records.NextShardIterator || '';
readingLogs = AWSBuildRunner.logRecords(records, iterator, taskDef, readingLogs);
return { iterator, readingLogs };
}
private static checkStreamingShouldContinue(taskData: AWS.ECS.Task, timestamp: number, readingLogs: boolean) {
if (taskData?.lastStatus !== 'RUNNING') {
if (timestamp === 0) {
core.info('Cloud runner job stopped, streaming end of logs');
timestamp = Date.now();
}
if (timestamp !== 0 && Date.now() - timestamp < 30000) {
core.info('Cloud runner status is not RUNNING for 30 seconds, last query for logs');
readingLogs = false;
}
const records = await kinesis
.getRecords({
ShardIterator: iterator,
})
.promise();
iterator = records.NextShardIterator || '';
if (records.Records.length > 0 && iterator) {
for (let index = 0; index < records.Records.length; index++) {
const json = JSON.parse(
zlib.gunzipSync(Buffer.from(records.Records[index].Data as string, 'base64')).toString('utf8'),
);
if (json.messageType === 'DATA_MESSAGE') {
for (let logEventsIndex = 0; logEventsIndex < json.logEvents.length; logEventsIndex++) {
if (json.logEvents[logEventsIndex].message.includes(taskDef.logid)) {
core.info('End of cloud runner job logs');
readingLogs = false;
} else {
const message = json.logEvents[logEventsIndex].message;
if (message.includes('Rebuilding Library because the asset database could not be found!')) {
core.warning('LIBRARY NOT FOUND!');
}
core.info(message);
core.info(`Status of job: ${taskData.lastStatus}`);
}
return { timestamp, readingLogs };
}
private static logRecords(records, iterator: string, taskDef: CloudRunnerTaskDef, readingLogs: boolean) {
if (records.Records.length > 0 && iterator) {
for (let index = 0; index < records.Records.length; index++) {
const json = JSON.parse(
zlib.gunzipSync(Buffer.from(records.Records[index].Data as string, 'base64')).toString('utf8'),
);
if (json.messageType === 'DATA_MESSAGE') {
for (let logEventsIndex = 0; logEventsIndex < json.logEvents.length; logEventsIndex++) {
if (json.logEvents[logEventsIndex].message.includes(taskDef.logid)) {
core.info('End of cloud runner job logs');
readingLogs = false;
} else {
const message = json.logEvents[logEventsIndex].message;
if (message.includes('Rebuilding Library because the asset database could not be found!')) {
core.warning('LIBRARY NOT FOUND!');
}
core.info(message);
}
}
}
}
}
return readingLogs;
}
private static async getLogStream(kinesis: AWS.Kinesis, kinesisStreamName: string) {
return await kinesis
.describeStream({
StreamName: kinesisStreamName,
})
.promise();
}
private static async getLogIterator(kinesis: AWS.Kinesis, stream) {
return (
(
await kinesis
.getShardIterator({
ShardIteratorType: 'TRIM_HORIZON',
StreamName: stream.StreamDescription.StreamName,
ShardId: stream.StreamDescription.Shards[0].ShardId,
})
.promise()
).ShardIterator || ''
);
}
}
export default AWSBuildRunner;

View File

@ -308,20 +308,8 @@ class CloudRunner {
${this.getCloneNoLFSCommand()}
echo 'Source repository initialized'
echo ' '
${process.env.DEBUG ? '' : '#'}echo $LFS_ASSETS_HASH
${process.env.DEBUG ? '' : '#'}echo 'Large File before LFS caching and pull'
${process.env.DEBUG ? '' : '#'}ls -alh "${this.lfsDirectory}"
${process.env.DEBUG ? '' : '#'}echo ' '
echo 'Starting checks of cache for the Unity project Library and git LFS files'
${this.getHandleCachingCommand()}
${process.env.DEBUG ? '' : '#'}echo 'Caching complete'
${process.env.DEBUG ? '' : '#'}echo ' '
${process.env.DEBUG ? '' : '#'}echo 'Large File after LFS caching and pull'
${process.env.DEBUG ? '' : '#'}ls -alh "${this.lfsDirectory}"
${process.env.DEBUG ? '' : '#'}echo ' '
${process.env.DEBUG ? '' : '#'}tree -L 4 "${this.buildPathFull}"
${process.env.DEBUG ? '' : '#'}ls -lh "/${buildVolumeFolder}"
${process.env.DEBUG ? '' : '#'}echo ' '
`,
],
`/${buildVolumeFolder}`,

View File

@ -31,16 +31,41 @@ class KubernetesStorage {
}
public static async getPVCPhase(kubeClient: k8s.CoreV1Api, name: string, namespace: string) {
return (await kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body.status?.phase;
try {
return (await kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body.status?.phase;
} catch (error) {
core.error('Failed to get PVC phase');
core.error(JSON.stringify(error, undefined, 4));
throw error;
}
}
public static async watchUntilPVCNotPending(kubeClient: k8s.CoreV1Api, name: string, namespace: string) {
core.info(`watch Until PVC Not Pending ${name} ${namespace}`);
core.info(`${await this.getPVCPhase(kubeClient, name, namespace)}`);
await waitUntil(async () => (await this.getPVCPhase(kubeClient, name, namespace)) !== 'Pending', {
timeout: 500000,
intervalBetweenAttempts: 15000,
});
try {
core.info(`watch Until PVC Not Pending ${name} ${namespace}`);
core.info(`${await this.getPVCPhase(kubeClient, name, namespace)}`);
await waitUntil(
async () => {
return (await this.getPVCPhase(kubeClient, name, namespace)) !== 'Pending';
},
{
timeout: 500000,
intervalBetweenAttempts: 15000,
},
);
} catch (error) {
core.error('Failed to watch PVC');
core.error(error);
core.error(JSON.stringify(error, undefined, 4));
core.error(
`PVC Body: ${JSON.stringify(
(await kubeClient.readNamespacedPersistentVolumeClaim(name, namespace)).body,
undefined,
4,
)}`,
);
throw error;
}
}
private static async createPVC(
@ -57,7 +82,7 @@ class KubernetesStorage {
};
pvc.spec = {
accessModes: ['ReadWriteOnce'],
storageClassName: process.env.K8s_STORAGE_CLASS || 'fileserver',
storageClassName: process.env.K8s_STORAGE_CLASS || 'standard',
resources: {
requests: {
storage: buildParameters.kubeVolumeSize,
@ -85,64 +110,3 @@ class KubernetesStorage {
}
export default KubernetesStorage;
/*
It's possible now with Cloud Filestore.
First create a Filestore instance.
gcloud filestore instances create nfs-server
--project=[PROJECT_ID]
--zone=us-central1-c
--tier=STANDARD
--file-share=name="vol1",capacity=1TB
--network=name="default",reserved-ip-range="10.0.0.0/29"
Then create a persistent volume in GKE.
apiVersion: v1
kind: PersistentVolume
metadata:
name: fileserver
spec:
capacity:
storage: 1T
accessModes:
- ReadWriteMany
nfs:
path: /vol1
server: [IP_ADDRESS]
[IP_ADDRESS] is available in filestore instance details.
You can now request a persistent volume claim.
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: fileserver-claim
spec:
accessModes:
- ReadWriteMany
storageClassName: "fileserver"
resources:
requests:
storage: 100G
Finally, mount the volume in your pod.
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: my container
image: nginx:latest
volumeMounts:
- mountPath: /workdir
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: fileserver-claim
readOnly: false
Solution is detailed here : https://cloud.google.com/filestore/docs/accessing-fileshares
*/

View File

@ -2,7 +2,7 @@ Arguments:
C:\Program Files\nodejs\node.exe C:\Program Files (x86)\Yarn\bin\yarn.js
PATH:
C:\Users\Mark\AppData\Local\Temp\yarn--1628884715167-0.3847568910300898;C:\Users\Mark\Documents\GitHub\unity-builder\node_modules\.bin;C:\Users\Mark\AppData\Local\Yarn\Data\link\node_modules\.bin;C:\Program Files\libexec\lib\node_modules\npm\bin\node-gyp-bin;C:\Program Files\lib\node_modules\npm\bin\node-gyp-bin;C:\Program Files\nodejs\node_modules\npm\bin\node-gyp-bin;C:\Users\Mark\AppData\Local\cloud-code\installer\google-cloud-sdk\bin;c:\Users\Mark\AppData\Roaming\Code\User\globalStorage\microsoft-isvexptools.powerplatform-vscode\pac\tools;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Windows\System32\OpenSSH\;C:\Program Files\NVIDIA Corporation\NVIDIA NvDLISR;C:\Program Files (x86)\NVIDIA Corporation\PhysX\Common;C:\Program Files\Git LFS;C:\ProgramData\chocolatey\bin;C:\Program Files\dotnet\;C:\Program Files (x86)\Yarn\bin\;C:\Program Files\nodejs\;C:\Program Files\Git\cmd;C:\Program Files\Docker\Docker\resources\bin;C:\ProgramData\DockerDesktop\version-bin;C:\Program Files\Amazon\AWSCLIV2\;C:\Program Files (x86)\Google\Cloud SDK\google-cloud-sdk\bin;C:\Users\Mark\AppData\Local\Microsoft\PowerAppsCLI\;C:\Users\Mark\AppData\Local\Microsoft\WindowsApps;C:\Users\Mark\AppData\Local\Programs\Microsoft VS Code\bin;C:\Users\Mark\AppData\Local\GitHubDesktop\bin;C:\Users\Mark\.dotnet\tools;C:\Users\Mark\AppData\Local\Yarn\bin;C:\Users\Mark\AppData\Roaming\npm;C:\Program Files (x86)\GitHub CLI\
C:\Users\Mark\AppData\Local\Temp\yarn--1631471515116-0.29510230599108156;C:\Users\Mark\Documents\GitHub\unity-builder\node_modules\.bin;C:\Users\Mark\AppData\Local\Yarn\Data\link\node_modules\.bin;C:\Program Files\libexec\lib\node_modules\npm\bin\node-gyp-bin;C:\Program Files\lib\node_modules\npm\bin\node-gyp-bin;C:\Program Files\nodejs\node_modules\npm\bin\node-gyp-bin;C:\Users\Mark\AppData\Local\cloud-code\installer\google-cloud-sdk\bin;c:\Users\Mark\AppData\Roaming\Code\User\globalStorage\microsoft-isvexptools.powerplatform-vscode\pac\tools;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Windows\System32\OpenSSH\;C:\Program Files\NVIDIA Corporation\NVIDIA NvDLISR;C:\Program Files (x86)\NVIDIA Corporation\PhysX\Common;C:\Program Files\Git LFS;C:\ProgramData\chocolatey\bin;C:\Program Files\dotnet\;C:\Program Files (x86)\Yarn\bin\;C:\Program Files\nodejs\;C:\Program Files\Git\cmd;C:\Program Files\Docker\Docker\resources\bin;C:\ProgramData\DockerDesktop\version-bin;C:\Program Files\Amazon\AWSCLIV2\;C:\Program Files (x86)\Google\Cloud SDK\google-cloud-sdk\bin;C:\Users\Mark\AppData\Local\Microsoft\PowerAppsCLI\;C:\Users\Mark\AppData\Local\Microsoft\WindowsApps;C:\Users\Mark\AppData\Local\Programs\Microsoft VS Code\bin;C:\Users\Mark\AppData\Local\GitHubDesktop\bin;C:\Users\Mark\.dotnet\tools;C:\Users\Mark\AppData\Local\Yarn\bin;C:\Users\Mark\AppData\Roaming\npm;C:\Program Files (x86)\GitHub CLI\
Yarn version:
1.22.5
@ -41,7 +41,8 @@ npm manifest:
"aws-sdk": "^2.812.0",
"base-64": "^1.0.0",
"nanoid": "3.1.20",
"semver": "^7.3.2"
"semver": "^7.3.2",
"yaml": "^1.10.2"
},
"devDependencies": {
"@types/jest": "^26.0.15",
@ -5857,7 +5858,7 @@ Lockfile:
resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
yaml@^1.10.0, yaml@^1.7.2:
yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2:
version "1.10.2"
resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b"
integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==