PR feedback
parent
2ecc14a8c8
commit
adcdf1b77a
|
|
@ -76,6 +76,15 @@ jobs:
|
|||
with:
|
||||
node-version: 20
|
||||
cache: 'yarn'
|
||||
- name: Clean up disk space before tests
|
||||
run: |
|
||||
# Clean up any leftover cache files from previous runs
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
# Clean up system caches and temporary files
|
||||
sudo apt-get clean || true
|
||||
docker system prune -f || true
|
||||
# Show available disk space
|
||||
df -h
|
||||
- run: yarn install --frozen-lockfile
|
||||
- run: yarn run test "${{ matrix.test }}" --detectOpenHandles --forceExit --runInBand
|
||||
timeout-minutes: 60
|
||||
|
|
@ -131,6 +140,15 @@ jobs:
|
|||
with:
|
||||
node-version: 20
|
||||
cache: 'yarn'
|
||||
- name: Clean up disk space before tests
|
||||
run: |
|
||||
# Clean up any leftover cache files from previous runs
|
||||
rm -rf ./cloud-runner-cache/* || true
|
||||
# Clean up system caches and temporary files
|
||||
sudo apt-get clean || true
|
||||
docker system prune -f || true
|
||||
# Show available disk space
|
||||
df -h
|
||||
- run: yarn install --frozen-lockfile
|
||||
- run: yarn run test "${{ matrix.test }}" --detectOpenHandles --forceExit --runInBand
|
||||
timeout-minutes: 60
|
||||
|
|
|
|||
|
|
@ -3500,7 +3500,10 @@ mkdir -p /github/workspace/cloud-runner-cache
|
|||
mkdir -p /data/cache
|
||||
cp -a /github/workspace/cloud-runner-cache/. ${sharedFolder}
|
||||
${command_hook_service_1.CommandHookService.ApplyHooksToCommands(commands, this.buildParameters)}
|
||||
cp -a ${sharedFolder}. /github/workspace/cloud-runner-cache/
|
||||
# Only copy cache directory, exclude retained workspaces to avoid running out of disk space
|
||||
if [ -d "${sharedFolder}cache" ]; then
|
||||
cp -a ${sharedFolder}cache/. /github/workspace/cloud-runner-cache/cache/ || true
|
||||
fi
|
||||
`;
|
||||
(0, node_fs_1.writeFileSync)(`${workspace}/${entrypointFilePath}`, fileContents, {
|
||||
flag: 'w',
|
||||
|
|
@ -3960,7 +3963,38 @@ class KubernetesPods {
|
|||
const phase = pods[0]?.status?.phase || 'undefined status';
|
||||
cloud_runner_logger_1.default.log(`Getting pod status: ${phase}`);
|
||||
if (phase === `Failed`) {
|
||||
throw new Error(`K8s pod failed`);
|
||||
const pod = pods[0];
|
||||
const containerStatuses = pod.status?.containerStatuses || [];
|
||||
const conditions = pod.status?.conditions || [];
|
||||
const events = (await kubeClient.listNamespacedEvent(namespace)).body.items
|
||||
.filter((x) => x.involvedObject?.name === podName)
|
||||
.map((x) => ({
|
||||
message: x.message || '',
|
||||
reason: x.reason || '',
|
||||
type: x.type || '',
|
||||
}));
|
||||
const errorDetails = [];
|
||||
errorDetails.push(`Pod: ${podName}`);
|
||||
errorDetails.push(`Phase: ${phase}`);
|
||||
if (conditions.length > 0) {
|
||||
errorDetails.push(`Conditions: ${JSON.stringify(conditions.map((c) => ({ type: c.type, status: c.status, reason: c.reason, message: c.message })), undefined, 2)}`);
|
||||
}
|
||||
if (containerStatuses.length > 0) {
|
||||
containerStatuses.forEach((cs, idx) => {
|
||||
if (cs.state?.waiting) {
|
||||
errorDetails.push(`Container ${idx} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
|
||||
}
|
||||
if (cs.state?.terminated) {
|
||||
errorDetails.push(`Container ${idx} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${cs.state.terminated.exitCode})`);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (events.length > 0) {
|
||||
errorDetails.push(`Recent events: ${JSON.stringify(events.slice(-5), undefined, 2)}`);
|
||||
}
|
||||
const errorMessage = `K8s pod failed\n${errorDetails.join('\n')}`;
|
||||
cloud_runner_logger_1.default.log(errorMessage);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
return running;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -7,7 +7,43 @@ class KubernetesPods {
|
|||
const phase = pods[0]?.status?.phase || 'undefined status';
|
||||
CloudRunnerLogger.log(`Getting pod status: ${phase}`);
|
||||
if (phase === `Failed`) {
|
||||
throw new Error(`K8s pod failed`);
|
||||
const pod = pods[0];
|
||||
const containerStatuses = pod.status?.containerStatuses || [];
|
||||
const conditions = pod.status?.conditions || [];
|
||||
const events = (await kubeClient.listNamespacedEvent(namespace)).body.items
|
||||
.filter((x) => x.involvedObject?.name === podName)
|
||||
.map((x) => ({
|
||||
message: x.message || '',
|
||||
reason: x.reason || '',
|
||||
type: x.type || '',
|
||||
}));
|
||||
|
||||
const errorDetails: string[] = [];
|
||||
errorDetails.push(`Pod: ${podName}`);
|
||||
errorDetails.push(`Phase: ${phase}`);
|
||||
|
||||
if (conditions.length > 0) {
|
||||
errorDetails.push(`Conditions: ${JSON.stringify(conditions.map(c => ({ type: c.type, status: c.status, reason: c.reason, message: c.message })), undefined, 2)}`);
|
||||
}
|
||||
|
||||
if (containerStatuses.length > 0) {
|
||||
containerStatuses.forEach((cs, idx) => {
|
||||
if (cs.state?.waiting) {
|
||||
errorDetails.push(`Container ${idx} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
|
||||
}
|
||||
if (cs.state?.terminated) {
|
||||
errorDetails.push(`Container ${idx} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${cs.state.terminated.exitCode})`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (events.length > 0) {
|
||||
errorDetails.push(`Recent events: ${JSON.stringify(events.slice(-5), undefined, 2)}`);
|
||||
}
|
||||
|
||||
const errorMessage = `K8s pod failed\n${errorDetails.join('\n')}`;
|
||||
CloudRunnerLogger.log(errorMessage);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
return running;
|
||||
|
|
|
|||
|
|
@ -87,5 +87,19 @@ describe('Cloud Runner Caching', () => {
|
|||
expect(build2NotContainsZeroLibraryCacheFilesMessage).toBeTruthy();
|
||||
expect(build2NotContainsZeroLFSCacheFilesMessage).toBeTruthy();
|
||||
}, 1_000_000_000);
|
||||
afterAll(async () => {
|
||||
// Clean up cache files to prevent disk space issues
|
||||
if (CloudRunnerOptions.providerStrategy === `local-docker` || CloudRunnerOptions.providerStrategy === `aws`) {
|
||||
const cachePath = `./cloud-runner-cache`;
|
||||
if (fs.existsSync(cachePath)) {
|
||||
try {
|
||||
CloudRunnerLogger.log(`Cleaning up cache directory: ${cachePath}`);
|
||||
await CloudRunnerSystem.Run(`rm -rf ${cachePath}/* || true`);
|
||||
} catch (error: any) {
|
||||
CloudRunnerLogger.log(`Failed to cleanup cache: ${error.message}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
|
|
|||
|
|
@ -86,6 +86,23 @@ describe('Cloud Runner Retain Workspace', () => {
|
|||
CloudRunnerLogger.log(
|
||||
`Cleaning up ./cloud-runner-cache/${path.basename(CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute)}`,
|
||||
);
|
||||
try {
|
||||
await CloudRunnerSystem.Run(
|
||||
`rm -rf ./cloud-runner-cache/${path.basename(CloudRunnerFolders.uniqueCloudRunnerJobFolderAbsolute)} || true`,
|
||||
);
|
||||
} catch (error: any) {
|
||||
CloudRunnerLogger.log(`Failed to cleanup workspace: ${error.message}`);
|
||||
}
|
||||
}
|
||||
// Clean up cache files to prevent disk space issues
|
||||
const cachePath = `./cloud-runner-cache`;
|
||||
if (fs.existsSync(cachePath)) {
|
||||
try {
|
||||
CloudRunnerLogger.log(`Cleaning up cache directory: ${cachePath}`);
|
||||
await CloudRunnerSystem.Run(`rm -rf ${cachePath}/* || true`);
|
||||
} catch (error: any) {
|
||||
CloudRunnerLogger.log(`Failed to cleanup cache: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue