ls active directory before lfs install
parent
fd4aa8b7e9
commit
e2e84372e3
|
|
@ -961,9 +961,10 @@ class SetupCloudRunnerRepository {
|
|||
remote_client_logger_1.RemoteClientLogger.log(`Cloning the repository being built:`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global filter.lfs.smudge "git-lfs smudge --skip -- %f"`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git config --global filter.lfs.process "git-lfs filter-process --skip"`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs install`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`ls`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git clone ${cloud_runner_folders_1.CloudRunnerFolders.targetBuildRepoUrl} ${path_1.default.resolve(`..`, path_1.default.basename(cloud_runner_folders_1.CloudRunnerFolders.repoPathFull))}`);
|
||||
console_1.assert(fs_1.default.existsSync(`.git`));
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git lfs install`);
|
||||
console_1.assert(fs_1.default.existsSync(`.git`), 'git folder exists');
|
||||
remote_client_logger_1.RemoteClientLogger.log(`${cloud_runner_1.default.buildParameters.branch}`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${cloud_runner_1.default.buildParameters.branch}`);
|
||||
console_1.assert(fs_1.default.existsSync(path_1.default.join(`.git`, `lfs`)), 'LFS folder should not exist before caching');
|
||||
|
|
@ -1969,7 +1970,6 @@ const async_wait_until_1 = __importDefault(__nccwpck_require__(41299));
|
|||
const kubernetes_job_spec_factory_1 = __importDefault(__nccwpck_require__(1739));
|
||||
const kubernetes_service_account_1 = __importDefault(__nccwpck_require__(42915));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
|
||||
const kubernetes_rook_1 = __importDefault(__nccwpck_require__(20859));
|
||||
const depdency_override_service_1 = __importDefault(__nccwpck_require__(69862));
|
||||
class Kubernetes {
|
||||
constructor(buildParameters) {
|
||||
|
|
@ -1999,7 +1999,6 @@ class Kubernetes {
|
|||
this.pvcName = `unity-builder-pvc-${buildGuid}`;
|
||||
this.cleanupCronJobName = `unity-builder-cronjob-${buildGuid}`;
|
||||
this.serviceAccountName = `service-account-${buildGuid}`;
|
||||
yield kubernetes_rook_1.default.InitRook(buildParameters.kubeStorageClass);
|
||||
if (yield depdency_override_service_1.default.CheckHealth()) {
|
||||
yield depdency_override_service_1.default.TryStartDependencies();
|
||||
}
|
||||
|
|
@ -2273,103 +2272,6 @@ class KubernetesJobSpecFactory {
|
|||
exports["default"] = KubernetesJobSpecFactory;
|
||||
|
||||
|
||||
/***/ }),
|
||||
|
||||
/***/ 20859:
|
||||
/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) {
|
||||
|
||||
"use strict";
|
||||
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", ({ value: true }));
|
||||
const fs_1 = __importDefault(__nccwpck_require__(57147));
|
||||
const cloud_runner_system_1 = __nccwpck_require__(66879);
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
|
||||
class KubernetesRook {
|
||||
static InitRook(storageName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (storageName === '' && (yield cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl`))) {
|
||||
storageName = KubernetesRook.rookStorageName;
|
||||
cloud_runner_logger_1.default.log('Using rook storage as no kubeStorageClass provided');
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`
|
||||
git clone --single-branch --branch v1.8.6 https://github.com/rook/rook.git
|
||||
cd rook/deploy/examples
|
||||
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml
|
||||
kubectl apply -f cluster.yaml
|
||||
`);
|
||||
fs_1.default.writeFileSync('filesystem.yaml', `
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephFilesystem
|
||||
metadata:
|
||||
name: myfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 3
|
||||
dataPools:
|
||||
- name: replicated
|
||||
replicated:
|
||||
size: 3
|
||||
preserveFilesystemOnDelete: true
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
||||
`);
|
||||
fs_1.default.writeFileSync('storageclass.yaml', `
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${storageName}
|
||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
||||
parameters:
|
||||
# clusterID is the namespace where the rook cluster is running
|
||||
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||
clusterID: rook-ceph
|
||||
|
||||
# CephFS filesystem name into which the volume shall be created
|
||||
fsName: myfs
|
||||
|
||||
# Ceph pool into which the volume shall be created
|
||||
# Required for provisionVolume: "true"
|
||||
pool: myfs-replicated
|
||||
|
||||
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
|
||||
# in the same namespace as the cluster.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
reclaimPolicy: Delete
|
||||
`);
|
||||
yield cloud_runner_system_1.CloudRunnerSystem.Run(`
|
||||
kubectl apply -f storageclass.yaml -f filesystem.yaml
|
||||
`);
|
||||
}
|
||||
else {
|
||||
cloud_runner_logger_1.default.log(`Using kubeStorageClass ${storageName}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
KubernetesRook.rookStorageName = 'rook-cephfs-game-ci';
|
||||
exports["default"] = KubernetesRook;
|
||||
|
||||
|
||||
/***/ }),
|
||||
|
||||
/***/ 71586:
|
||||
|
|
@ -2527,7 +2429,6 @@ const core = __importStar(__nccwpck_require__(42186));
|
|||
const k8s = __importStar(__nccwpck_require__(89679));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
|
||||
const yaml_1 = __importDefault(__nccwpck_require__(44603));
|
||||
const kubernetes_rook_1 = __importDefault(__nccwpck_require__(20859));
|
||||
class KubernetesStorage {
|
||||
static createPersistentVolumeClaim(buildParameters, pvcName, kubeClient, namespace) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
|
|
@ -2594,7 +2495,7 @@ class KubernetesStorage {
|
|||
};
|
||||
pvc.spec = {
|
||||
accessModes: ['ReadWriteOnce'],
|
||||
storageClassName: buildParameters.kubeStorageClass === '' ? kubernetes_rook_1.default.rookStorageName : buildParameters.kubeStorageClass,
|
||||
storageClassName: buildParameters.kubeStorageClass === '' ? 'standard' : buildParameters.kubeStorageClass,
|
||||
resources: {
|
||||
requests: {
|
||||
storage: buildParameters.kubeVolumeSize,
|
||||
|
|
@ -3040,15 +2941,15 @@ class TaskParameterSerializer {
|
|||
},
|
||||
{
|
||||
name: 'UNITY_SERIAL',
|
||||
value: __1.Input.unitySerial,
|
||||
value: __1.Input.queryOverrides['UNITY_SERIAL'],
|
||||
},
|
||||
{
|
||||
name: 'UNITY_USERNAME',
|
||||
value: __1.Input.unityUsername,
|
||||
value: __1.Input.queryOverrides['UNITY_EMAIL'],
|
||||
},
|
||||
{
|
||||
name: 'UNITY_PASSWORD',
|
||||
value: __1.Input.unityPassword,
|
||||
value: __1.Input.queryOverrides['UNITY_PASSWORD'],
|
||||
},
|
||||
...TaskParameterSerializer.serializeBuildParamsAndInput,
|
||||
];
|
||||
|
|
@ -4059,15 +3960,6 @@ class Input {
|
|||
static get region() {
|
||||
return Input.getInput('region') || 'eu-west-2';
|
||||
}
|
||||
static get unitySerial() {
|
||||
return Input.getInput('UNITY_SERIAL') || false;
|
||||
}
|
||||
static get unityUsername() {
|
||||
return Input.getInput('UNITY_USERNAME') || false;
|
||||
}
|
||||
static get unityPassword() {
|
||||
return Input.getInput('UNITY_PASSWORD') || false;
|
||||
}
|
||||
static get githubRepo() {
|
||||
return Input.getInput('GITHUB_REPOSITORY') || Input.getInput('GITHUB_REPO') || false;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -47,14 +47,15 @@ export class SetupCloudRunnerRepository {
|
|||
RemoteClientLogger.log(`Cloning the repository being built:`);
|
||||
await CloudRunnerSystem.Run(`git config --global filter.lfs.smudge "git-lfs smudge --skip -- %f"`);
|
||||
await CloudRunnerSystem.Run(`git config --global filter.lfs.process "git-lfs filter-process --skip"`);
|
||||
await CloudRunnerSystem.Run(`git lfs install`);
|
||||
await CloudRunnerSystem.Run(`ls`);
|
||||
await CloudRunnerSystem.Run(
|
||||
`git clone ${CloudRunnerFolders.targetBuildRepoUrl} ${path.resolve(
|
||||
`..`,
|
||||
path.basename(CloudRunnerFolders.repoPathFull),
|
||||
)}`,
|
||||
);
|
||||
assert(fs.existsSync(`.git`));
|
||||
await CloudRunnerSystem.Run(`git lfs install`);
|
||||
assert(fs.existsSync(`.git`), 'git folder exists');
|
||||
RemoteClientLogger.log(`${CloudRunner.buildParameters.branch}`);
|
||||
await CloudRunnerSystem.Run(`git checkout ${CloudRunner.buildParameters.branch}`);
|
||||
assert(fs.existsSync(path.join(`.git`, `lfs`)), 'LFS folder should not exist before caching');
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import KubernetesJobSpecFactory from './kubernetes-job-spec-factory';
|
|||
import KubernetesServiceAccount from './kubernetes-service-account';
|
||||
import CloudRunnerLogger from '../services/cloud-runner-logger';
|
||||
import { CoreV1Api } from '@kubernetes/client-node';
|
||||
import KubernetesRook from './kubernetes-rook';
|
||||
import DependencyOverrideService from '../services/depdency-override-service';
|
||||
|
||||
class Kubernetes implements CloudRunnerProviderInterface {
|
||||
|
|
@ -52,7 +51,6 @@ class Kubernetes implements CloudRunnerProviderInterface {
|
|||
this.pvcName = `unity-builder-pvc-${buildGuid}`;
|
||||
this.cleanupCronJobName = `unity-builder-cronjob-${buildGuid}`;
|
||||
this.serviceAccountName = `service-account-${buildGuid}`;
|
||||
await KubernetesRook.InitRook(buildParameters.kubeStorageClass);
|
||||
if (await DependencyOverrideService.CheckHealth()) {
|
||||
await DependencyOverrideService.TryStartDependencies();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,80 +0,0 @@
|
|||
import fs from 'fs';
|
||||
import { CloudRunnerSystem } from '../../cli/remote-client/remote-client-services/cloud-runner-system';
|
||||
import CloudRunnerLogger from '../services/cloud-runner-logger';
|
||||
|
||||
class KubernetesRook {
|
||||
public static readonly rookStorageName = 'rook-cephfs-game-ci';
|
||||
public static async InitRook(storageName) {
|
||||
if (storageName === '' && (await CloudRunnerSystem.Run(`kubectl`))) {
|
||||
storageName = KubernetesRook.rookStorageName;
|
||||
CloudRunnerLogger.log('Using rook storage as no kubeStorageClass provided');
|
||||
await CloudRunnerSystem.Run(`
|
||||
git clone --single-branch --branch v1.8.6 https://github.com/rook/rook.git
|
||||
cd rook/deploy/examples
|
||||
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml
|
||||
kubectl apply -f cluster.yaml
|
||||
`);
|
||||
fs.writeFileSync(
|
||||
'filesystem.yaml',
|
||||
`
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephFilesystem
|
||||
metadata:
|
||||
name: myfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 3
|
||||
dataPools:
|
||||
- name: replicated
|
||||
replicated:
|
||||
size: 3
|
||||
preserveFilesystemOnDelete: true
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
||||
`,
|
||||
);
|
||||
fs.writeFileSync(
|
||||
'storageclass.yaml',
|
||||
`
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${storageName}
|
||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
||||
parameters:
|
||||
# clusterID is the namespace where the rook cluster is running
|
||||
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||
clusterID: rook-ceph
|
||||
|
||||
# CephFS filesystem name into which the volume shall be created
|
||||
fsName: myfs
|
||||
|
||||
# Ceph pool into which the volume shall be created
|
||||
# Required for provisionVolume: "true"
|
||||
pool: myfs-replicated
|
||||
|
||||
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
|
||||
# in the same namespace as the cluster.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
reclaimPolicy: Delete
|
||||
`,
|
||||
);
|
||||
await CloudRunnerSystem.Run(`
|
||||
kubectl apply -f storageclass.yaml -f filesystem.yaml
|
||||
`);
|
||||
} else {
|
||||
CloudRunnerLogger.log(`Using kubeStorageClass ${storageName}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default KubernetesRook;
|
||||
|
|
@ -4,7 +4,6 @@ import * as k8s from '@kubernetes/client-node';
|
|||
import BuildParameters from '../../build-parameters';
|
||||
import CloudRunnerLogger from '../services/cloud-runner-logger';
|
||||
import YAML from 'yaml';
|
||||
import KubernetesRook from './kubernetes-rook';
|
||||
|
||||
class KubernetesStorage {
|
||||
public static async createPersistentVolumeClaim(
|
||||
|
|
@ -86,8 +85,7 @@ class KubernetesStorage {
|
|||
};
|
||||
pvc.spec = {
|
||||
accessModes: ['ReadWriteOnce'],
|
||||
storageClassName:
|
||||
buildParameters.kubeStorageClass === '' ? KubernetesRook.rookStorageName : buildParameters.kubeStorageClass,
|
||||
storageClassName: buildParameters.kubeStorageClass === '' ? 'standard' : buildParameters.kubeStorageClass,
|
||||
resources: {
|
||||
requests: {
|
||||
storage: buildParameters.kubeVolumeSize,
|
||||
|
|
|
|||
|
|
@ -21,15 +21,15 @@ export class TaskParameterSerializer {
|
|||
},
|
||||
{
|
||||
name: 'UNITY_SERIAL',
|
||||
value: Input.unitySerial,
|
||||
value: Input.queryOverrides['UNITY_SERIAL'],
|
||||
},
|
||||
{
|
||||
name: 'UNITY_USERNAME',
|
||||
value: Input.unityUsername,
|
||||
value: Input.queryOverrides['UNITY_EMAIL'],
|
||||
},
|
||||
{
|
||||
name: 'UNITY_PASSWORD',
|
||||
value: Input.unityPassword,
|
||||
value: Input.queryOverrides['UNITY_PASSWORD'],
|
||||
},
|
||||
...TaskParameterSerializer.serializeBuildParamsAndInput,
|
||||
];
|
||||
|
|
|
|||
|
|
@ -94,15 +94,6 @@ class Input {
|
|||
static get region(): string {
|
||||
return Input.getInput('region') || 'eu-west-2';
|
||||
}
|
||||
static get unitySerial(): string {
|
||||
return Input.getInput('UNITY_SERIAL') || false;
|
||||
}
|
||||
static get unityUsername(): string {
|
||||
return Input.getInput('UNITY_USERNAME') || false;
|
||||
}
|
||||
static get unityPassword(): string {
|
||||
return Input.getInput('UNITY_PASSWORD') || false;
|
||||
}
|
||||
|
||||
static get githubRepo() {
|
||||
return Input.getInput('GITHUB_REPOSITORY') || Input.getInput('GITHUB_REPO') || false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue