pull/496/head
Frostebite 2023-03-07 16:07:01 +00:00
parent 6bd5906a52
commit f386a54503
3 changed files with 45 additions and 150 deletions

88
dist/index.js generated vendored
View File

@ -4032,7 +4032,6 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod }; return (mod && mod.__esModule) ? mod : { "default": mod };
}; };
Object.defineProperty(exports, "__esModule", ({ value: true })); Object.defineProperty(exports, "__esModule", ({ value: true }));
const stream_1 = __nccwpck_require__(12781);
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855)); const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(22855));
const core = __importStar(__nccwpck_require__(42186)); const core = __importStar(__nccwpck_require__(42186));
const async_wait_until_1 = __importDefault(__nccwpck_require__(41299)); const async_wait_until_1 = __importDefault(__nccwpck_require__(41299));
@ -4043,82 +4042,32 @@ class KubernetesTaskRunner {
var _a; var _a;
return __awaiter(this, void 0, void 0, function* () { return __awaiter(this, void 0, void 0, function* () {
cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} finished ${alreadyFinished}`); cloud_runner_logger_1.default.log(`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} finished ${alreadyFinished}`);
const stream = new stream_1.Writable();
let output = ''; let output = '';
let didStreamAnyLogs = false; let didStreamAnyLogs = false;
let shouldReadLogs = true; let shouldReadLogs = true;
let shouldCleanup = true; let shouldCleanup = true;
stream._write = (chunk, encoding, next) => {
didStreamAnyLogs = true;
try {
const dateString = `${chunk.toString().split(`Z `)[0]}Z`;
const newDate = Date.parse(dateString);
new Date(newDate).toISOString();
KubernetesTaskRunner.lastReceivedTimestamp = newDate;
}
catch (_a) {
/* */
}
const message = chunk.toString().split(`Z `)[1].trimRight(`\n`);
({ shouldReadLogs, shouldCleanup, output } = follow_log_stream_service_1.FollowLogStreamService.handleIteration(message, shouldReadLogs, shouldCleanup, output));
next();
};
// export interface LogOptions {
/**
* Follow the log stream of the pod. Defaults to false.
*/
// follow?: boolean;
/**
* If set, the number of bytes to read from the server before terminating the log output. This may not display a
* complete final line of logging, and may return slightly more or slightly less than the specified limit.
*/
// limitBytes?: number;
/**
* If true, then the output is pretty printed.
*/
// pretty?: boolean;
/**
* Return previous terminated container logs. Defaults to false.
*/
// previous?: boolean;
/**
* A relative time in seconds before the current time from which to show logs. If this value precedes the time a
* pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will
* be returned. Only one of sinceSeconds or sinceTime may be specified.
*/
// sinceSeconds?: number;
/**
* If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation
* of the container or sinceSeconds or sinceTime
*/
// tailLines?: number;
/**
* If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
*/
// timestamps?: boolean;
// }
// const logOptions = {
// follow: !alreadyFinished,
// pretty: false,
// previous: alreadyFinished,
// timestamps: true,
// sinceSeconds: KubernetesTaskRunner.lastReceivedTimestamp,
// };
try { try {
// const resultError = await new Log(kubeConfig).log(namespace, podName, containerName, stream, logOptions);
const sinceTime = KubernetesTaskRunner.lastReceivedTimestamp const sinceTime = KubernetesTaskRunner.lastReceivedTimestamp
? `--since-time="${new Date(KubernetesTaskRunner.lastReceivedTimestamp).toISOString()}" ` ? `--since-time="${new Date(KubernetesTaskRunner.lastReceivedTimestamp).toISOString()}" `
: ` `; : ` `;
yield cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} --timestamps ${sinceTime}> app.log`, false, true); // using this instead of Kube
const logs = yield cloud_runner_system_1.CloudRunnerSystem.Run(`cat app.log`, false, true); const logs = yield cloud_runner_system_1.CloudRunnerSystem.Run(`kubectl logs ${podName} -c ${containerName} --timestamps ${sinceTime}`, false, true);
const splitLogs = logs.split(`\n`); const splitLogs = logs.split(`\n`);
for (const element of splitLogs) { for (const element of splitLogs) {
stream.write(element); didStreamAnyLogs = true;
const chunk = element;
try {
const dateString = `${chunk.toString().split(`Z `)[0]}Z`;
const newDate = Date.parse(dateString);
new Date(newDate).toISOString();
KubernetesTaskRunner.lastReceivedTimestamp = newDate;
}
catch (_b) {
/* */
}
const message = chunk.split(`Z `)[1];
({ shouldReadLogs, shouldCleanup, output } = follow_log_stream_service_1.FollowLogStreamService.handleIteration(message, shouldReadLogs, shouldCleanup, output));
} }
stream.destroy();
// if (resultError) {
// throw resultError;
// }
if (!didStreamAnyLogs) { if (!didStreamAnyLogs) {
core.error('Failed to stream any logs, listing namespace events, check for an error with the container'); core.error('Failed to stream any logs, listing namespace events, check for an error with the container');
core.error(JSON.stringify({ core.error(JSON.stringify({
@ -4138,9 +4087,6 @@ class KubernetesTaskRunner {
} }
} }
catch (error) { catch (error) {
if (stream) {
stream.destroy();
}
cloud_runner_logger_1.default.log('k8s task runner failed'); cloud_runner_logger_1.default.log('k8s task runner failed');
cloud_runner_logger_1.default.log(JSON.stringify((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.body, undefined, 4)); cloud_runner_logger_1.default.log(JSON.stringify((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.body, undefined, 4));
cloud_runner_logger_1.default.log(JSON.stringify(error, undefined, 4)); cloud_runner_logger_1.default.log(JSON.stringify(error, undefined, 4));
@ -4176,7 +4122,9 @@ class KubernetesTaskRunner {
timeout: 2000000, timeout: 2000000,
intervalBetweenAttempts: 15000, intervalBetweenAttempts: 15000,
}); });
cloud_runner_logger_1.default.log(message); if (!success) {
cloud_runner_logger_1.default.log(message);
}
return success; return success;
}); });
} }

2
dist/index.js.map generated vendored

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,4 @@
import { CoreV1Api, KubeConfig } from '@kubernetes/client-node'; import { CoreV1Api, KubeConfig } from '@kubernetes/client-node';
import { Writable } from 'stream';
import CloudRunnerLogger from '../../services/cloud-runner-logger'; import CloudRunnerLogger from '../../services/cloud-runner-logger';
import * as core from '@actions/core'; import * as core from '@actions/core';
import waitUntil from 'async-wait-until'; import waitUntil from 'async-wait-until';
@ -20,95 +19,44 @@ class KubernetesTaskRunner {
CloudRunnerLogger.log( CloudRunnerLogger.log(
`Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} finished ${alreadyFinished}`, `Streaming logs from pod: ${podName} container: ${containerName} namespace: ${namespace} finished ${alreadyFinished}`,
); );
const stream = new Writable();
let output = ''; let output = '';
let didStreamAnyLogs: boolean = false; let didStreamAnyLogs: boolean = false;
let shouldReadLogs = true; let shouldReadLogs = true;
let shouldCleanup = true; let shouldCleanup = true;
stream._write = (chunk, encoding, next) => {
didStreamAnyLogs = true;
try {
const dateString = `${chunk.toString().split(`Z `)[0]}Z`;
const newDate = Date.parse(dateString);
new Date(newDate).toISOString();
KubernetesTaskRunner.lastReceivedTimestamp = newDate;
} catch {
/* */
}
const message = chunk.toString().split(`Z `)[1].trimRight(`\n`);
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
message,
shouldReadLogs,
shouldCleanup,
output,
));
next();
};
// export interface LogOptions {
/**
* Follow the log stream of the pod. Defaults to false.
*/
// follow?: boolean;
/**
* If set, the number of bytes to read from the server before terminating the log output. This may not display a
* complete final line of logging, and may return slightly more or slightly less than the specified limit.
*/
// limitBytes?: number;
/**
* If true, then the output is pretty printed.
*/
// pretty?: boolean;
/**
* Return previous terminated container logs. Defaults to false.
*/
// previous?: boolean;
/**
* A relative time in seconds before the current time from which to show logs. If this value precedes the time a
* pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will
* be returned. Only one of sinceSeconds or sinceTime may be specified.
*/
// sinceSeconds?: number;
/**
* If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation
* of the container or sinceSeconds or sinceTime
*/
// tailLines?: number;
/**
* If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
*/
// timestamps?: boolean;
// }
// const logOptions = {
// follow: !alreadyFinished,
// pretty: false,
// previous: alreadyFinished,
// timestamps: true,
// sinceSeconds: KubernetesTaskRunner.lastReceivedTimestamp,
// };
try { try {
// const resultError = await new Log(kubeConfig).log(namespace, podName, containerName, stream, logOptions);
const sinceTime = KubernetesTaskRunner.lastReceivedTimestamp const sinceTime = KubernetesTaskRunner.lastReceivedTimestamp
? `--since-time="${new Date(KubernetesTaskRunner.lastReceivedTimestamp).toISOString()}" ` ? `--since-time="${new Date(KubernetesTaskRunner.lastReceivedTimestamp).toISOString()}" `
: ` `; : ` `;
await CloudRunnerSystem.Run(
`kubectl logs ${podName} -c ${containerName} --timestamps ${sinceTime}> app.log`, // using this instead of Kube
const logs = await CloudRunnerSystem.Run(
`kubectl logs ${podName} -c ${containerName} --timestamps ${sinceTime}`,
false, false,
true, true,
); );
const logs = await CloudRunnerSystem.Run(`cat app.log`, false, true);
const splitLogs = logs.split(`\n`); const splitLogs = logs.split(`\n`);
for (const element of splitLogs) { for (const element of splitLogs) {
stream.write(element); didStreamAnyLogs = true;
} const chunk = element;
stream.destroy(); try {
const dateString = `${chunk.toString().split(`Z `)[0]}Z`;
const newDate = Date.parse(dateString);
new Date(newDate).toISOString();
KubernetesTaskRunner.lastReceivedTimestamp = newDate;
} catch {
/* */
}
const message = chunk.split(`Z `)[1];
({ shouldReadLogs, shouldCleanup, output } = FollowLogStreamService.handleIteration(
message,
shouldReadLogs,
shouldCleanup,
output,
));
}
// if (resultError) {
// throw resultError;
// }
if (!didStreamAnyLogs) { if (!didStreamAnyLogs) {
core.error('Failed to stream any logs, listing namespace events, check for an error with the container'); core.error('Failed to stream any logs, listing namespace events, check for an error with the container');
core.error( core.error(
@ -133,9 +81,6 @@ class KubernetesTaskRunner {
throw new Error(`No logs streamed from k8s`); throw new Error(`No logs streamed from k8s`);
} }
} catch (error: any) { } catch (error: any) {
if (stream) {
stream.destroy();
}
CloudRunnerLogger.log('k8s task runner failed'); CloudRunnerLogger.log('k8s task runner failed');
CloudRunnerLogger.log(JSON.stringify(error?.response?.body, undefined, 4)); CloudRunnerLogger.log(JSON.stringify(error?.response?.body, undefined, 4));
CloudRunnerLogger.log(JSON.stringify(error, undefined, 4)); CloudRunnerLogger.log(JSON.stringify(error, undefined, 4));
@ -181,7 +126,9 @@ class KubernetesTaskRunner {
intervalBetweenAttempts: 15000, intervalBetweenAttempts: 15000,
}, },
); );
CloudRunnerLogger.log(message); if (!success) {
CloudRunnerLogger.log(message);
}
return success; return success;
} }