better defaults

pull/1662/head
Cyril Rohr 2024-02-15 16:00:07 +00:00
parent 8f9e839eda
commit ede15a2f5d
No known key found for this signature in database
GPG Key ID: 4F06363B8C22B3B9
13 changed files with 86 additions and 254 deletions

View File

@ -1,19 +0,0 @@
name: Check dist/
on:
push:
branches:
- main
paths-ignore:
- '**.md'
pull_request:
paths-ignore:
- '**.md'
workflow_dispatch:
jobs:
call-check-dist:
name: Check dist/
uses: actions/reusable-workflows/.github/workflows/check-dist.yml@main
with:
node-version: "20.x"

View File

@ -1,22 +0,0 @@
name: Close inactive issues
on:
schedule:
- cron: "30 8 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v3
with:
days-before-issue-stale: 200
days-before-issue-close: 5
stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 200 days with no activity. Leave a comment to avoid closing this issue in 5 days."
close-issue-message: "This issue was closed because it has been inactive for 5 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,46 +0,0 @@
name: "Code scanning - action"
on:
push:
pull_request:
schedule:
- cron: '0 19 * * 0'
jobs:
CodeQL-Build:
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
runs-on: ubuntu-latest
permissions:
# required for all workflows
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java, ruby
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
# If this step fails, then you should remove it and run the build manually (see below).
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# ✏️ If the Autobuild fails above, remove it and uncomment the following
# three lines and modify them (or add more) to build your code if your
# project uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View File

@ -1,16 +0,0 @@
name: Assign issue
on:
issues:
types: [opened]
jobs:
run-action:
runs-on: ubuntu-latest
steps:
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: add_assignees
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.issue.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'

View File

@ -1,15 +0,0 @@
name: Licensed
on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch:
jobs:
call-licensed:
name: Licensed
uses: actions/reusable-workflows/.github/workflows/licensed.yml@main

View File

@ -1,20 +0,0 @@
name: Add Reviewer PR
on:
pull_request_target:
types: [opened]
jobs:
run-action:
runs-on: ubuntu-latest
steps:
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: Request Review
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/pulls/${{ github.event.pull_request.number}}/requested_reviewers -d '{"reviewers":["${{steps.oncall.outputs.CURRENT}}"]}'
- name: Add Assignee
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.pull_request.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'

View File

@ -9,51 +9,39 @@ on:
branches:
- main
- releases/**
- fix/**
- v4
jobs:
# Build and unit test
build:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Node.js 20.x
uses: actions/setup-node@v3
with:
node-version: 20.x
cache: npm
- run: npm ci
- name: Prettier Format Check
run: npm run format-check
- name: ESLint Check
run: npm run lint
- name: Build & Test
run: npm run test
# End to end save and restore
test-save:
runs-on: runs-on,env=dev,image=ubuntu22-full-arm64
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
part_size: [32]
queue_size: [4, 8]
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Generate files in working directory
shell: bash
run: __tests__/create-cache-files.sh ${{ runner.os }} test-cache
run: |
__tests__/create-cache-files.sh ${{ runner.os }} test-cache
# 5GB
# curl -o test-cache/ubuntu.iso https://releases.ubuntu.com/22.04.3/ubuntu-22.04.3-desktop-amd64.iso
# 2Gb
curl -o test-cache/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-22.04.3-live-server-amd64.iso
- name: Generate files outside working directory
shell: bash
run: __tests__/create-cache-files.sh ${{ runner.os }} ~/test-cache
- name: Save cache
uses: ./
env:
UPLOAD_PART_SIZE: ${{ matrix.part_size }}
UPLOAD_QUEUE_SIZE: ${{ matrix.queue_size }}
with:
key: test-${{ runner.os }}-${{ github.run_id }}
key: test-${{ runner.os }}-${{ github.run_id }}-${{ matrix.part_size }}-${{ matrix.queue_size }}
path: |
test-cache
~/test-cache
@ -61,16 +49,23 @@ jobs:
needs: test-save
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
part_size: [8, 16]
queue_size: [8, 12]
fail-fast: false
runs-on: ${{ matrix.os }}
runs-on: runs-on,env=dev,image=ubuntu22-full-arm64
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Restore cache
uses: ./
env:
DOWNLOAD_PART_SIZE: ${{ matrix.part_size }}
DOWNLOAD_QUEUE_SIZE: ${{ matrix.queue_size }}
with:
key: test-${{ runner.os }}-${{ github.run_id }}
key: test-${{ runner.os }}-${{ github.run_id }}-${{ matrix.part_size }}-${{ matrix.queue_size }}
restore-keys: |
test-${{ runner.os }}-${{ github.run_id }}-${{ matrix.part_size }}-
test-${{ runner.os }}-${{ github.run_id }}-
path: |
test-cache
~/test-cache
@ -80,50 +75,3 @@ jobs:
- name: Verify cache files outside working directory
shell: bash
run: __tests__/verify-cache-files.sh ${{ runner.os }} ~/test-cache
# End to end with proxy
test-proxy-save:
runs-on: ubuntu-latest
container:
image: ubuntu:latest
options: --dns 127.0.0.1
services:
squid-proxy:
image: ubuntu/squid:latest
ports:
- 3128:3128
env:
https_proxy: http://squid-proxy:3128
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Generate files
run: __tests__/create-cache-files.sh proxy test-cache
- name: Save cache
uses: ./
with:
key: test-proxy-${{ github.run_id }}
path: test-cache
test-proxy-restore:
needs: test-proxy-save
runs-on: ubuntu-latest
container:
image: ubuntu:latest
options: --dns 127.0.0.1
services:
squid-proxy:
image: ubuntu/squid:latest
ports:
- 3128:3128
env:
https_proxy: http://squid-proxy:3128
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Restore cache
uses: ./
with:
key: test-proxy-${{ github.run_id }}
path: test-cache
- name: Verify cache
run: __tests__/verify-cache-files.sh proxy test-cache

View File

@ -93769,7 +93769,6 @@ const client_s3_1 = __nccwpck_require__(9250);
const { getSignedUrl } = __nccwpck_require__(5052);
const fs_1 = __nccwpck_require__(7147);
const crypto = __importStar(__nccwpck_require__(6113));
const options_1 = __nccwpck_require__(6215);
const core = __importStar(__nccwpck_require__(2186));
const utils = __importStar(__nccwpck_require__(1518));
const lib_storage_1 = __nccwpck_require__(3087);
@ -93779,6 +93778,10 @@ const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -93855,8 +93858,7 @@ function downloadCache(archiveLocation, archivePath, options) {
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
const downloadOptions = (0, options_1.getDownloadOptions)(Object.assign(Object.assign({}, options), { downloadConcurrency: 14, concurrentBlobDownloads: true }));
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, downloadOptions);
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
});
}
exports.downloadCache = downloadCache;
@ -93882,16 +93884,17 @@ function saveCache(key, paths, archivePath, { compressionMethod, enableCrossOsAr
Body: (0, fs_1.createReadStream)(archivePath)
},
// Part size in bytes
partSize: 32 * 1024 * 1024,
partSize: uploadPartSize,
// Max concurrency
queueSize: 14
queueSize: uploadQueueSize
});
// Commit Cache
const cacheSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(cacheSize / (1024 * 1024))} MB (${cacheSize} B)`);
const totalParts = Math.ceil(cacheSize / uploadPartSize);
core.info(`Uploading cache from ${archivePath} to ${bucketName}/${s3Key}`);
multipartUpload.on("httpUploadProgress", progress => {
core.info(`Uploaded ${progress.part}/${progress.total}.`);
core.info(`Uploaded part ${progress.part}/${totalParts}.`);
});
yield multipartUpload.done();
core.info(`Cache saved successfully.`);
@ -94308,7 +94311,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
throw new Error(`Could not interpret Content-Length: ${length}`);
}
const downloads = [];
const blockSize = 32 * 1024 * 1024;
const blockSize = options.partSize;
for (let offset = 0; offset < length; offset += blockSize) {
const count = Math.min(blockSize, length - offset);
downloads.push({

17
dist/restore/index.js vendored
View File

@ -93769,7 +93769,6 @@ const client_s3_1 = __nccwpck_require__(9250);
const { getSignedUrl } = __nccwpck_require__(5052);
const fs_1 = __nccwpck_require__(7147);
const crypto = __importStar(__nccwpck_require__(6113));
const options_1 = __nccwpck_require__(6215);
const core = __importStar(__nccwpck_require__(2186));
const utils = __importStar(__nccwpck_require__(1518));
const lib_storage_1 = __nccwpck_require__(3087);
@ -93779,6 +93778,10 @@ const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -93855,8 +93858,7 @@ function downloadCache(archiveLocation, archivePath, options) {
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
const downloadOptions = (0, options_1.getDownloadOptions)(Object.assign(Object.assign({}, options), { downloadConcurrency: 14, concurrentBlobDownloads: true }));
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, downloadOptions);
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
});
}
exports.downloadCache = downloadCache;
@ -93882,16 +93884,17 @@ function saveCache(key, paths, archivePath, { compressionMethod, enableCrossOsAr
Body: (0, fs_1.createReadStream)(archivePath)
},
// Part size in bytes
partSize: 32 * 1024 * 1024,
partSize: uploadPartSize,
// Max concurrency
queueSize: 14
queueSize: uploadQueueSize
});
// Commit Cache
const cacheSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(cacheSize / (1024 * 1024))} MB (${cacheSize} B)`);
const totalParts = Math.ceil(cacheSize / uploadPartSize);
core.info(`Uploading cache from ${archivePath} to ${bucketName}/${s3Key}`);
multipartUpload.on("httpUploadProgress", progress => {
core.info(`Uploaded ${progress.part}/${progress.total}.`);
core.info(`Uploaded part ${progress.part}/${totalParts}.`);
});
yield multipartUpload.done();
core.info(`Cache saved successfully.`);
@ -94308,7 +94311,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
throw new Error(`Could not interpret Content-Length: ${length}`);
}
const downloads = [];
const blockSize = 32 * 1024 * 1024;
const blockSize = options.partSize;
for (let offset = 0; offset < length; offset += blockSize) {
const count = Math.min(blockSize, length - offset);
downloads.push({

View File

@ -93769,7 +93769,6 @@ const client_s3_1 = __nccwpck_require__(9250);
const { getSignedUrl } = __nccwpck_require__(5052);
const fs_1 = __nccwpck_require__(7147);
const crypto = __importStar(__nccwpck_require__(6113));
const options_1 = __nccwpck_require__(6215);
const core = __importStar(__nccwpck_require__(2186));
const utils = __importStar(__nccwpck_require__(1518));
const lib_storage_1 = __nccwpck_require__(3087);
@ -93779,6 +93778,10 @@ const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -93855,8 +93858,7 @@ function downloadCache(archiveLocation, archivePath, options) {
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
const downloadOptions = (0, options_1.getDownloadOptions)(Object.assign(Object.assign({}, options), { downloadConcurrency: 14, concurrentBlobDownloads: true }));
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, downloadOptions);
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
});
}
exports.downloadCache = downloadCache;
@ -93882,16 +93884,17 @@ function saveCache(key, paths, archivePath, { compressionMethod, enableCrossOsAr
Body: (0, fs_1.createReadStream)(archivePath)
},
// Part size in bytes
partSize: 32 * 1024 * 1024,
partSize: uploadPartSize,
// Max concurrency
queueSize: 14
queueSize: uploadQueueSize
});
// Commit Cache
const cacheSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(cacheSize / (1024 * 1024))} MB (${cacheSize} B)`);
const totalParts = Math.ceil(cacheSize / uploadPartSize);
core.info(`Uploading cache from ${archivePath} to ${bucketName}/${s3Key}`);
multipartUpload.on("httpUploadProgress", progress => {
core.info(`Uploaded ${progress.part}/${progress.total}.`);
core.info(`Uploaded part ${progress.part}/${totalParts}.`);
});
yield multipartUpload.done();
core.info(`Cache saved successfully.`);
@ -94308,7 +94311,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
throw new Error(`Could not interpret Content-Length: ${length}`);
}
const downloads = [];
const blockSize = 32 * 1024 * 1024;
const blockSize = options.partSize;
for (let offset = 0; offset < length; offset += blockSize) {
const count = Math.min(blockSize, length - offset);
downloads.push({

17
dist/save/index.js vendored
View File

@ -93769,7 +93769,6 @@ const client_s3_1 = __nccwpck_require__(9250);
const { getSignedUrl } = __nccwpck_require__(5052);
const fs_1 = __nccwpck_require__(7147);
const crypto = __importStar(__nccwpck_require__(6113));
const options_1 = __nccwpck_require__(6215);
const core = __importStar(__nccwpck_require__(2186));
const utils = __importStar(__nccwpck_require__(1518));
const lib_storage_1 = __nccwpck_require__(3087);
@ -93779,6 +93778,10 @@ const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -93855,8 +93858,7 @@ function downloadCache(archiveLocation, archivePath, options) {
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
const downloadOptions = (0, options_1.getDownloadOptions)(Object.assign(Object.assign({}, options), { downloadConcurrency: 14, concurrentBlobDownloads: true }));
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, downloadOptions);
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
});
}
exports.downloadCache = downloadCache;
@ -93882,16 +93884,17 @@ function saveCache(key, paths, archivePath, { compressionMethod, enableCrossOsAr
Body: (0, fs_1.createReadStream)(archivePath)
},
// Part size in bytes
partSize: 32 * 1024 * 1024,
partSize: uploadPartSize,
// Max concurrency
queueSize: 14
queueSize: uploadQueueSize
});
// Commit Cache
const cacheSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(cacheSize / (1024 * 1024))} MB (${cacheSize} B)`);
const totalParts = Math.ceil(cacheSize / uploadPartSize);
core.info(`Uploading cache from ${archivePath} to ${bucketName}/${s3Key}`);
multipartUpload.on("httpUploadProgress", progress => {
core.info(`Uploaded ${progress.part}/${progress.total}.`);
core.info(`Uploaded part ${progress.part}/${totalParts}.`);
});
yield multipartUpload.done();
core.info(`Cache saved successfully.`);
@ -94308,7 +94311,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
throw new Error(`Could not interpret Content-Length: ${length}`);
}
const downloads = [];
const blockSize = 32 * 1024 * 1024;
const blockSize = options.partSize;
for (let offset = 0; offset < length; offset += blockSize) {
const count = Math.min(blockSize, length - offset);
downloads.push({

View File

@ -31,6 +31,13 @@ const region =
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize =
Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize =
Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
export function getCacheVersion(
paths: string[],
compressionMethod?: CompressionMethod,
@ -140,12 +147,12 @@ export async function downloadCache(
const url = await getSignedUrl(s3Client, command, {
expiresIn: 3600
});
const downloadOptions = getDownloadOptions({
await downloadCacheHttpClientConcurrent(url, archivePath, {
...options,
downloadConcurrency: 14,
concurrentBlobDownloads: true
downloadConcurrency: downloadQueueSize,
concurrentBlobDownloads: true,
partSize: downloadPartSize
});
await downloadCacheHttpClientConcurrent(url, archivePath, downloadOptions);
}
export async function saveCache(
@ -176,12 +183,10 @@ export async function saveCache(
Key: s3Key,
Body: createReadStream(archivePath)
},
// Part size in bytes
partSize: 32 * 1024 * 1024,
partSize: uploadPartSize,
// Max concurrency
queueSize: 14
queueSize: uploadQueueSize
});
// Commit Cache
@ -192,9 +197,10 @@ export async function saveCache(
)} MB (${cacheSize} B)`
);
const totalParts = Math.ceil(cacheSize / uploadPartSize);
core.info(`Uploading cache from ${archivePath} to ${bucketName}/${s3Key}`);
multipartUpload.on("httpUploadProgress", progress => {
core.info(`Uploaded ${progress.part}/${progress.total}.`);
core.info(`Uploaded part ${progress.part}/${totalParts}.`);
});
await multipartUpload.done();

View File

@ -6,6 +6,10 @@ import * as fs from "fs";
import { DownloadOptions } from "@actions/cache/lib/options";
import { retryHttpClientResponse } from "@actions/cache/lib/internal/requestUtils";
export interface RunsOnDownloadOptions extends DownloadOptions {
partSize: number;
}
/**
* Class for tracking the download state and displaying stats.
*/
@ -149,7 +153,7 @@ export class DownloadProgress {
export async function downloadCacheHttpClientConcurrent(
archiveLocation: string,
archivePath: fs.PathLike,
options: DownloadOptions
options: RunsOnDownloadOptions
): Promise<void> {
const archiveDescriptor = await fs.promises.open(archivePath, "w");
const httpClient = new HttpClient("actions/cache", undefined, {
@ -185,7 +189,7 @@ export async function downloadCacheHttpClientConcurrent(
promiseGetter: () => Promise<DownloadSegment>;
}[] = [];
const blockSize = 32 * 1024 * 1024;
const blockSize = options.partSize;
for (let offset = 0; offset < length; offset += blockSize) {
const count = Math.min(blockSize, length - offset);