unity-builder/src/model/cloud-runner/providers/k8s/kubernetes-task-runner.ts

163 lines
5.7 KiB
TypeScript
Raw Normal View History

2023-02-27 19:05:28 +00:00
import { CoreV1Api, KubeConfig } from '@kubernetes/client-node';
Cloud Runner v0 - Reliable and trimmed down cloud runner (#353) * Update cloud-runner-aws-pipeline.yml * Update cloud-runner-k8s-pipeline.yml * yarn build * yarn build * correct branch ref * correct branch ref passed to target repo * Create k8s-tests.yml * Delete k8s-tests.yml * correct branch ref passed to target repo * correct branch ref passed to target repo * Always describe AWS tasks for now, because unstable error handling * Remove unused tree commands * Use lfs guid sum * Simple override cache push * Simple override cache push and pull override to allow pure cloud storage driven caching * Removal of early branch (breaks lfs caching) * Remove unused tree commands * Update action.yml * Update action.yml * Support cache and input override commands as input + full support custom hooks * Increase k8s timeout * replace filename being appended for unknclear reason * cache key should not contain whitespaces * Always try and deploy rook for k8s * Apply k8s files for rook * Update action.yml * Apply k8s files for rook * Apply k8s files for rook * cache test and action description for kuber storage class * Correct test and implement dependency health check and start * GCP-secret run, cache key * lfs smudge set explicit and undo explicit * Run using external secret provider to speed up input * Update cloud-runner-aws-pipeline.yml * Add nodejs as build step dependency * Add nodejs as build step dependency * Cloud Runner Tests must be specified to capture logs from cloud runner for tests * Cloud Runner Tests must be specified to capture logs from cloud runner for tests * Refactor and cleanup - no async input, combined setup/build, removed github logs for cli runs * Refactor and cleanup - no async input, combined setup/build, removed github logs for cli runs * Refactor and cleanup - no async input, combined setup/build, removed github logs for cli runs * Refactor and cleanup - no async input, combined setup/build, removed github logs for cli runs * Refactor and cleanup - no async input, combined setup/build, removed github logs for cli runs * better defaults for new inputs * better defaults * merge latest * force build update * use npm n to update node in unity builder * use npm n to update node in unity builder * use npm n to update node in unity builder * correct new line * quiet zipping * quiet zipping * default secrets for unity username and password * default secrets for unity username and password * ls active directory before lfs install * Get cloud runner secrets from * Get cloud runner secrets from * Cleanup setup of default secrets * Various fixes * Cleanup setup of default secrets * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * Various fixes * AWS secrets manager support * less caching logs * default k8s storage class to pd-standard * more readable build commands * Capture aws exit code 1 reliably * Always replace /head from branch * k8s default storage class to standard-rwo * cleanup * further cleanup input * further cleanup input * further cleanup input * further cleanup input * further cleanup input * folder sizes to inspect caching * dir command for local cloud runner test * k8s wait for pending because pvc will not create earlier * prefer k8s standard storage * handle empty string as cloud runner cluster input * local-system is now used for cloud runner test implementation AND correctly unset test CLI input * local-system is now used for cloud runner test implementation AND correctly unset test CLI input * fix unterminated quote * fix unterminated quote * do not share build parameters in tests - in cloud runner this will cause conflicts with resouces of the same name * remove head and heads from branch prefix * fix reversed caching direction of cache-push * fixes * fixes * fixes * cachePull cli * fixes * fixes * fixes * fixes * fixes * order cache test to be first * order cache test to be first * fixes * populate cache key instead of using branch * cleanup cli * garbage-collect-aws cli can iterate over aws resources and cli scans all ts files * import cli methods * import cli files explicitly * import cli files explicitly * import cli files explicitly * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * import cli methods * log parameters in cloud runner parameter test * log parameters in cloud runner parameter test * log parameters in cloud runner parameter test * Cloud runner param test before caching because we have a fast local cache test now * Using custom build path relative to repo root rather than project root * aws-garbage-collect at end of pipeline * aws-garbage-collect do not actually delete anything for now - just list * remove some legacy du commands * Update cloud-runner-aws-pipeline.yml * log contents after cache pull and fix some scenarios with duplicate secrets * log contents after cache pull and fix some scenarios with duplicate secrets * log contents after cache pull and fix some scenarios with duplicate secrets * PR comments * Replace guid with uuid package * use fileExists lambda instead of stat to check file exists in caching * build failed results in core error message * Delete sample.txt
2022-04-10 23:00:37 +00:00
import CloudRunnerLogger from '../../services/cloud-runner-logger';
2022-02-01 02:31:20 +00:00
import * as core from '@actions/core';
import waitUntil from 'async-wait-until';
import { FollowLogStreamService } from '../../services/follow-log-stream-service';
2023-02-16 17:24:05 +00:00
import { CloudRunnerSystem } from '../../services/cloud-runner-system';
2023-03-17 22:09:41 +00:00
import CloudRunner from '../../cloud-runner';
2022-02-01 02:31:20 +00:00
class KubernetesTaskRunner {
2023-03-25 02:35:31 +00:00
static lastReceivedTimestamp: number = 0;
2023-03-17 23:44:25 +00:00
static lastReceivedMessage: string = ``;
2022-02-01 02:31:20 +00:00
static async runTask(
kubeConfig: KubeConfig,
kubeClient: CoreV1Api,
jobName: string,
podName: string,
containerName: string,
namespace: string,
) {
2023-03-24 01:04:31 +00:00
const lastReceivedMessage =
2023-03-25 02:53:39 +00:00
KubernetesTaskRunner.lastReceivedTimestamp > 0
2023-03-24 22:25:55 +00:00
? `\nLast Log Message "${this.lastReceivedMessage}" ${this.lastReceivedTimestamp}`
: ``;
2023-02-16 00:20:23 +00:00
CloudRunnerLogger.log(
2023-03-24 23:43:39 +00:00
`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} ${CloudRunner.buildParameters.kubeVolumeSize}/${CloudRunner.buildParameters.containerCpu}/${CloudRunner.buildParameters.containerMemory}\n${lastReceivedMessage}`,
2023-02-16 00:20:23 +00:00
);
2022-02-01 02:31:20 +00:00
let output = '';
let didStreamAnyLogs: boolean = false;
let shouldReadLogs = true;
let shouldCleanup = true;
2023-02-01 23:00:41 +00:00
2023-03-25 00:19:11 +00:00
// eslint-disable-next-line no-constant-condition
while (true) {
let sinceTime = ``;
2023-03-25 02:53:39 +00:00
if (KubernetesTaskRunner.lastReceivedTimestamp > 0) {
2023-03-25 00:19:11 +00:00
const currentDate = new Date(KubernetesTaskRunner.lastReceivedTimestamp);
const dateTimeIsoString = currentDate.toISOString();
2023-03-25 01:26:35 +00:00
sinceTime = ` --since-time="${dateTimeIsoString}"`;
2023-03-25 00:19:11 +00:00
}
let lastMessageSeenIncludedInChunk = false;
let lastMessageSeen = false;
2023-03-07 16:07:01 +00:00
2023-03-25 00:19:11 +00:00
// using this instead of Kube
try {
const logs = await CloudRunnerSystem.Run(
`kubectl logs ${podName} -f -c ${containerName} --timestamps${sinceTime}`,
false,
true,
);
const splitLogs = logs.split(`\n`);
for (const chunk of splitLogs) {
if (
chunk.replace(/\s/g, ``) === KubernetesTaskRunner.lastReceivedMessage.replace(/\s/g, ``) &&
KubernetesTaskRunner.lastReceivedMessage.replace(/\s/g, ``) !== ``
) {
CloudRunnerLogger.log(`Previous log message found ${chunk}`);
lastMessageSeenIncludedInChunk = true;
}
2023-03-17 22:45:05 +00:00
}
for (const chunk of splitLogs) {
const newDate = Date.parse(`${chunk.toString().split(`Z `)[0]}Z`);
if (chunk.replace(/\s/g, ``) === KubernetesTaskRunner.lastReceivedMessage.replace(/\s/g, ``)) {
lastMessageSeen = true;
}
if (lastMessageSeenIncludedInChunk && !lastMessageSeen) {
continue;
}
didStreamAnyLogs = true;
const message = CloudRunner.buildParameters.cloudRunnerDebug ? chunk : chunk.split(`Z `)[1];
KubernetesTaskRunner.lastReceivedMessage = chunk;
KubernetesTaskRunner.lastReceivedTimestamp = newDate;
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
message,
shouldReadLogs,
shouldCleanup,
output,
));
2023-03-24 23:43:39 +00:00
}
} catch (error: any) {
if (error.includes(`s`)) {
break;
2023-03-24 23:43:39 +00:00
}
2023-03-25 00:19:11 +00:00
}
if (!didStreamAnyLogs) {
core.error('Failed to stream any logs, listing namespace events, check for an error with the container');
core.error(
JSON.stringify(
{
events: (await kubeClient.listNamespacedEvent(namespace)).body.items
.filter((x) => {
return x.involvedObject.name === podName || x.involvedObject.name === jobName;
})
.map((x) => {
return {
type: x.involvedObject.kind,
name: x.involvedObject.name,
message: x.message,
};
}),
},
undefined,
4,
),
);
throw new Error(`No logs streamed from k8s`);
}
if (FollowLogStreamService.DidReceiveEndOfTransmission) {
CloudRunnerLogger.log('end of log stream');
break;
2022-02-01 02:31:20 +00:00
}
}
2022-02-01 02:31:20 +00:00
return output;
}
static async watchUntilPodRunning(kubeClient: CoreV1Api, podName: string, namespace: string) {
let success: boolean = false;
2023-03-06 18:27:58 +00:00
let message = ``;
2022-02-01 02:31:20 +00:00
CloudRunnerLogger.log(`Watching ${podName} ${namespace}`);
await waitUntil(
async () => {
const status = await kubeClient.readNamespacedPodStatus(podName, namespace);
const phase = status?.body.status?.phase;
success = phase === 'Running';
2023-03-06 18:27:58 +00:00
message = `Phase:${status.body.status?.phase} \n Reason:${
status.body.status?.conditions?.[0].reason || ''
} \n Message:${status.body.status?.conditions?.[0].message || ''}`;
2023-03-17 21:40:36 +00:00
// CloudRunnerLogger.log(
// JSON.stringify(
// (await kubeClient.listNamespacedEvent(namespace)).body.items
// .map((x) => {
// return {
// message: x.message || ``,
// name: x.metadata.name || ``,
// reason: x.reason || ``,
// };
// })
// .filter((x) => x.name.includes(podName)),
// undefined,
// 4,
// ),
// );
2022-02-01 02:31:20 +00:00
if (success || phase !== 'Pending') return true;
2022-02-01 02:31:20 +00:00
return false;
},
{
timeout: 2000000,
intervalBetweenAttempts: 15000,
},
);
2023-03-07 16:07:01 +00:00
if (!success) {
CloudRunnerLogger.log(message);
}
2022-02-01 02:31:20 +00:00
return success;
}
}
export default KubernetesTaskRunner;