fix logstream error k8s
parent
d570e8bac6
commit
239c747b47
|
|
@ -2967,6 +2967,7 @@ const kubernetes_job_spec_factory_1 = __importDefault(__nccwpck_require__(3610))
|
|||
const kubernetes_service_account_1 = __importDefault(__nccwpck_require__(47319));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
|
||||
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
|
||||
const kubernetes_pods_1 = __importDefault(__nccwpck_require__(90740));
|
||||
class Kubernetes {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
constructor(buildParameters) {
|
||||
|
|
@ -2978,6 +2979,7 @@ class Kubernetes {
|
|||
this.containerName = '';
|
||||
this.cleanupCronJobName = '';
|
||||
this.serviceAccountName = '';
|
||||
Kubernetes.Instance = this;
|
||||
this.kubeConfig = new k8s.KubeConfig();
|
||||
this.kubeConfig.loadFromDefault();
|
||||
this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api);
|
||||
|
|
@ -3057,7 +3059,7 @@ class Kubernetes {
|
|||
});
|
||||
}
|
||||
runTaskInWorkflow(buildGuid, image, commands, mountdir, workingdir, environment, secrets) {
|
||||
var _a, _b, _c, _d, _e;
|
||||
var _a, _b;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
cloud_runner_logger_1.default.log('Cloud Runner K8s workflow!');
|
||||
|
|
@ -3077,10 +3079,9 @@ class Kubernetes {
|
|||
yield kubernetes_task_runner_1.default.watchUntilPodRunning(this.kubeClient, this.podName, this.namespace);
|
||||
cloud_runner_logger_1.default.log('Pod running, streaming logs');
|
||||
output = yield kubernetes_task_runner_1.default.runTask(this.kubeConfig, this.kubeClient, this.jobName, this.podName, 'main', this.namespace);
|
||||
const pods = (yield this.kubeClient.listNamespacedPod(this.namespace)).body.items.filter((x) => { var _a; return this.podName === ((_a = x.metadata) === null || _a === void 0 ? void 0 : _a.name); });
|
||||
const running = pods.length > 0 && (((_a = pods[0].status) === null || _a === void 0 ? void 0 : _a.phase) === `Running` || ((_b = pods[0].status) === null || _b === void 0 ? void 0 : _b.phase) === `Pending`);
|
||||
const running = yield kubernetes_pods_1.default.IsPodRunning(this.podName, this.namespace, this.kubeClient);
|
||||
if (!running) {
|
||||
cloud_runner_logger_1.default.log(`Pod not found, assumed ended! ${((_c = pods[0].status) === null || _c === void 0 ? void 0 : _c.phase) || 'undefined status'}`);
|
||||
cloud_runner_logger_1.default.log(`Pod not found, assumed ended!`);
|
||||
break;
|
||||
}
|
||||
else {
|
||||
|
|
@ -3088,8 +3089,10 @@ class Kubernetes {
|
|||
}
|
||||
}
|
||||
catch (error) {
|
||||
const reason = ((_e = (_d = error.response) === null || _d === void 0 ? void 0 : _d.body) === null || _e === void 0 ? void 0 : _e.reason) || ``;
|
||||
if (reason === `NotFound`) {
|
||||
const reason = ((_b = (_a = error.response) === null || _a === void 0 ? void 0 : _a.body) === null || _b === void 0 ? void 0 : _b.reason) || ``;
|
||||
const errorMessage = error.message || ``;
|
||||
const continueStreaming = reason === `NotFound` || errorMessage.includes(`dial timeout, backstop`);
|
||||
if (continueStreaming) {
|
||||
cloud_runner_logger_1.default.log('Log Stream Container Not Found');
|
||||
yield new Promise((resolve) => resolve(5000));
|
||||
continue;
|
||||
|
|
@ -3349,6 +3352,42 @@ class KubernetesJobSpecFactory {
|
|||
exports["default"] = KubernetesJobSpecFactory;
|
||||
|
||||
|
||||
/***/ }),
|
||||
|
||||
/***/ 90740:
|
||||
/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) {
|
||||
|
||||
"use strict";
|
||||
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", ({ value: true }));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
|
||||
class KubernetesPods {
|
||||
static IsPodRunning(podName, namespace, kubeClient) {
|
||||
var _a, _b, _c;
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const pods = (yield kubeClient.listNamespacedPod(namespace)).body.items.filter((x) => { var _a; return podName === ((_a = x.metadata) === null || _a === void 0 ? void 0 : _a.name); });
|
||||
const running = pods.length > 0 && (((_a = pods[0].status) === null || _a === void 0 ? void 0 : _a.phase) === `Running` || ((_b = pods[0].status) === null || _b === void 0 ? void 0 : _b.phase) === `Pending`);
|
||||
const phase = ((_c = pods[0].status) === null || _c === void 0 ? void 0 : _c.phase) || 'undefined status';
|
||||
cloud_runner_logger_1.default.log(`Getting pod status: ${phase}`);
|
||||
return running;
|
||||
});
|
||||
}
|
||||
}
|
||||
exports["default"] = KubernetesPods;
|
||||
|
||||
|
||||
/***/ }),
|
||||
|
||||
/***/ 95875:
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -14,24 +14,27 @@ import { CoreV1Api } from '@kubernetes/client-node';
|
|||
import CloudRunner from '../../cloud-runner';
|
||||
import { ProviderResource } from '../provider-resource';
|
||||
import { ProviderWorkflow } from '../provider-workflow';
|
||||
import KubernetesPods from './kubernetes-pods';
|
||||
|
||||
class Kubernetes implements ProviderInterface {
|
||||
private kubeConfig!: k8s.KubeConfig;
|
||||
private kubeClient!: k8s.CoreV1Api;
|
||||
private kubeClientBatch!: k8s.BatchV1Api;
|
||||
private buildGuid: string = '';
|
||||
private buildParameters!: BuildParameters;
|
||||
private pvcName: string = '';
|
||||
private secretName: string = '';
|
||||
private jobName: string = '';
|
||||
private namespace!: string;
|
||||
private podName: string = '';
|
||||
private containerName: string = '';
|
||||
private cleanupCronJobName: string = '';
|
||||
private serviceAccountName: string = '';
|
||||
public static Instance: Kubernetes;
|
||||
public kubeConfig!: k8s.KubeConfig;
|
||||
public kubeClient!: k8s.CoreV1Api;
|
||||
public kubeClientBatch!: k8s.BatchV1Api;
|
||||
public buildGuid: string = '';
|
||||
public buildParameters!: BuildParameters;
|
||||
public pvcName: string = '';
|
||||
public secretName: string = '';
|
||||
public jobName: string = '';
|
||||
public namespace!: string;
|
||||
public podName: string = '';
|
||||
public containerName: string = '';
|
||||
public cleanupCronJobName: string = '';
|
||||
public serviceAccountName: string = '';
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
constructor(buildParameters: BuildParameters) {
|
||||
Kubernetes.Instance = this;
|
||||
this.kubeConfig = new k8s.KubeConfig();
|
||||
this.kubeConfig.loadFromDefault();
|
||||
this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api);
|
||||
|
|
@ -149,21 +152,20 @@ class Kubernetes implements ProviderInterface {
|
|||
'main',
|
||||
this.namespace,
|
||||
);
|
||||
const pods = (await this.kubeClient.listNamespacedPod(this.namespace)).body.items.filter(
|
||||
(x) => this.podName === x.metadata?.name,
|
||||
);
|
||||
const running =
|
||||
pods.length > 0 && (pods[0].status?.phase === `Running` || pods[0].status?.phase === `Pending`);
|
||||
const running = await KubernetesPods.IsPodRunning(this.podName, this.namespace, this.kubeClient);
|
||||
|
||||
if (!running) {
|
||||
CloudRunnerLogger.log(`Pod not found, assumed ended! ${pods[0].status?.phase || 'undefined status'}`);
|
||||
CloudRunnerLogger.log(`Pod not found, assumed ended!`);
|
||||
break;
|
||||
} else {
|
||||
CloudRunnerLogger.log('Pod still running, recovering stream...');
|
||||
}
|
||||
} catch (error: any) {
|
||||
const reason = error.response?.body?.reason || ``;
|
||||
if (reason === `NotFound`) {
|
||||
const errorMessage = error.message || ``;
|
||||
|
||||
const continueStreaming = reason === `NotFound` || errorMessage.includes(`dial timeout, backstop`);
|
||||
if (continueStreaming) {
|
||||
CloudRunnerLogger.log('Log Stream Container Not Found');
|
||||
await new Promise((resolve) => resolve(5000));
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
import CloudRunnerLogger from '../../services/cloud-runner-logger';
|
||||
import { CoreV1Api } from '@kubernetes/client-node';
|
||||
class KubernetesPods {
|
||||
public static async IsPodRunning(podName: string, namespace: string, kubeClient: CoreV1Api) {
|
||||
const pods = (await kubeClient.listNamespacedPod(namespace)).body.items.filter((x) => podName === x.metadata?.name);
|
||||
const running = pods.length > 0 && (pods[0].status?.phase === `Running` || pods[0].status?.phase === `Pending`);
|
||||
const phase = pods[0].status?.phase || 'undefined status';
|
||||
CloudRunnerLogger.log(`Getting pod status: ${phase}`);
|
||||
|
||||
return running;
|
||||
}
|
||||
}
|
||||
|
||||
export default KubernetesPods;
|
||||
Loading…
Reference in New Issue