Merge pull request #30 from brndnmtthws/v4

Add support for S3 endpoints
pull/1662/head
Cyril Rohr 2025-06-24 15:26:00 +02:00 committed by GitHub
commit 3a15256b35
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 614 additions and 87 deletions

View File

@ -42,6 +42,7 @@ Be aware of S3 transfer costs if your runners are not in the same AWS region as
## Special environment variables
* `RUNS_ON_S3_BUCKET_CACHE`: if set, the action will use this bucket to store the cache.
* `RUNS_ON_S3_BUCKET_ENDPOINT`: if set, the action will use this endpoint to connect to the bucket. This is useful if you are using AWS's S3 transfer acceleration or a non-AWS S3-compatible service.
* `RUNS_ON_RUNNER_NAME`: when running on RunsOn, where this environment variable is non-empty, existing AWS credentials from the environment will be discarded. If you want to preserve existing environment variables, set this to the empty string `""`.
* `RUNS_ON_S3_FORCE_PATH_STYLE` or `AWS_S3_FORCE_PATH_STYLE`: if one of those environment variables equals the string `"true"`, then the S3 client will be configured to force the path style.

View File

@ -0,0 +1,232 @@
import * as core from "@actions/core";
import * as fs from "fs";
import nock from "nock";
import * as path from "path";
import { DownloadValidationError, restoreCache } from "../src/custom/cache";
import { downloadCacheHttpClientConcurrent } from "../src/custom/downloadUtils";
// Mock the core module
jest.mock("@actions/core");
// Mock fs for file size checks
jest.mock("fs", () => ({
...jest.requireActual("fs"),
promises: {
...jest.requireActual("fs").promises,
open: jest.fn()
}
}));
describe("Download Validation", () => {
const testArchivePath = "/tmp/test-cache.tar.gz";
const testUrl = "https://example.com/cache.tar.gz";
beforeEach(() => {
jest.clearAllMocks();
nock.cleanAll();
});
afterEach(() => {
nock.cleanAll();
});
describe("downloadCacheHttpClientConcurrent", () => {
it("should validate downloaded size matches expected content-length", async () => {
const expectedSize = 1024;
const mockFileDescriptor = {
write: jest.fn().mockResolvedValue(undefined),
close: jest.fn().mockResolvedValue(undefined)
};
(fs.promises.open as jest.Mock).mockResolvedValue(
mockFileDescriptor
);
// Mock the initial range request to get content length
nock("https://example.com")
.get("/cache.tar.gz")
.reply(206, "partial content", {
"content-range": `bytes 0-1/${expectedSize}`
});
// Mock the actual content download with wrong size
nock("https://example.com")
.get("/cache.tar.gz")
.reply(206, Buffer.alloc(512), {
// Return only 512 bytes instead of 1024
"content-range": "bytes 0-511/1024"
});
await expect(
downloadCacheHttpClientConcurrent(testUrl, testArchivePath, {
timeoutInMs: 30000,
partSize: 1024
})
).rejects.toThrow(
"Download validation failed: Expected 1024 bytes but downloaded 512 bytes"
);
});
it("should succeed when downloaded size matches expected", async () => {
const expectedSize = 1024;
const testContent = Buffer.alloc(expectedSize);
const mockFileDescriptor = {
write: jest.fn().mockResolvedValue(undefined),
close: jest.fn().mockResolvedValue(undefined)
};
(fs.promises.open as jest.Mock).mockResolvedValue(
mockFileDescriptor
);
// Mock the initial range request
nock("https://example.com")
.get("/cache.tar.gz")
.reply(206, "partial content", {
"content-range": `bytes 0-1/${expectedSize}`
});
// Mock the actual content download with correct size
nock("https://example.com")
.get("/cache.tar.gz")
.reply(206, testContent, {
"content-range": `bytes 0-${
expectedSize - 1
}/${expectedSize}`
});
await expect(
downloadCacheHttpClientConcurrent(testUrl, testArchivePath, {
timeoutInMs: 30000,
partSize: expectedSize
})
).resolves.not.toThrow();
});
});
describe("restoreCache validation", () => {
beforeEach(() => {
// Mock environment variables for S3 backend
process.env.RUNS_ON_S3_BUCKET_CACHE = "test-bucket";
process.env.RUNS_ON_AWS_REGION = "us-east-1";
});
afterEach(() => {
delete process.env.RUNS_ON_S3_BUCKET_CACHE;
delete process.env.RUNS_ON_AWS_REGION;
});
it("should throw DownloadValidationError for empty files", async () => {
// Mock the cache lookup to return a valid cache entry
const mockCacheHttpClient = require("../src/custom/backend");
jest.spyOn(mockCacheHttpClient, "getCacheEntry").mockResolvedValue({
cacheKey: "test-key",
archiveLocation: "https://s3.example.com/cache.tar.gz"
});
// Mock the download to succeed
jest.spyOn(mockCacheHttpClient, "downloadCache").mockResolvedValue(
undefined
);
// Mock utils to return 0 file size (empty file)
const mockUtils = require("@actions/cache/lib/internal/cacheUtils");
jest.spyOn(mockUtils, "getArchiveFileSizeInBytes").mockReturnValue(
0
);
jest.spyOn(mockUtils, "createTempDirectory").mockResolvedValue(
"/tmp"
);
jest.spyOn(mockUtils, "getCacheFileName").mockReturnValue(
"cache.tar.gz"
);
const coreSpy = jest.spyOn(core, "warning");
const result = await restoreCache(["/test/path"], "test-key");
expect(result).toBeUndefined(); // Should return undefined on validation failure
expect(coreSpy).toHaveBeenCalledWith(
expect.stringContaining(
"Cache download validation failed: Downloaded cache archive is empty"
)
);
});
it("should throw DownloadValidationError for files too small", async () => {
// Mock the cache lookup to return a valid cache entry
const mockCacheHttpClient = require("../src/custom/backend");
jest.spyOn(mockCacheHttpClient, "getCacheEntry").mockResolvedValue({
cacheKey: "test-key",
archiveLocation: "https://s3.example.com/cache.tar.gz"
});
// Mock the download to succeed
jest.spyOn(mockCacheHttpClient, "downloadCache").mockResolvedValue(
undefined
);
// Mock utils to return small file size (less than 512 bytes)
const mockUtils = require("@actions/cache/lib/internal/cacheUtils");
jest.spyOn(mockUtils, "getArchiveFileSizeInBytes").mockReturnValue(
100
);
jest.spyOn(mockUtils, "createTempDirectory").mockResolvedValue(
"/tmp"
);
jest.spyOn(mockUtils, "getCacheFileName").mockReturnValue(
"cache.tar.gz"
);
const coreSpy = jest.spyOn(core, "warning");
const result = await restoreCache(["/test/path"], "test-key");
expect(result).toBeUndefined(); // Should return undefined on validation failure
expect(coreSpy).toHaveBeenCalledWith(
expect.stringContaining(
"Cache download validation failed: Downloaded cache archive is too small (100 bytes)"
)
);
});
it("should succeed with valid file size", async () => {
// Mock the cache lookup to return a valid cache entry
const mockCacheHttpClient = require("../src/custom/backend");
jest.spyOn(mockCacheHttpClient, "getCacheEntry").mockResolvedValue({
cacheKey: "test-key",
archiveLocation: "https://s3.example.com/cache.tar.gz"
});
// Mock the download to succeed
jest.spyOn(mockCacheHttpClient, "downloadCache").mockResolvedValue(
undefined
);
// Mock utils to return valid file size (>= 512 bytes)
const mockUtils = require("@actions/cache/lib/internal/cacheUtils");
jest.spyOn(mockUtils, "getArchiveFileSizeInBytes").mockReturnValue(
1024
);
jest.spyOn(mockUtils, "createTempDirectory").mockResolvedValue(
"/tmp"
);
jest.spyOn(mockUtils, "getCacheFileName").mockReturnValue(
"cache.tar.gz"
);
jest.spyOn(mockUtils, "getCompressionMethod").mockResolvedValue(
"gzip"
);
// Mock tar operations
const mockTar = require("@actions/cache/lib/internal/tar");
jest.spyOn(mockTar, "extractTar").mockResolvedValue(undefined);
jest.spyOn(mockTar, "listTar").mockResolvedValue(undefined);
const result = await restoreCache(["/test/path"], "test-key");
expect(result).toBe("test-key"); // Should return the cache key on success
});
});
});

View File

@ -99371,6 +99371,7 @@ if (process.env.RUNS_ON_RUNNER_NAME && process.env.RUNS_ON_RUNNER_NAME !== "") {
}
const versionSalt = "1.0";
const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const endpoint = process.env.RUNS_ON_S3_BUCKET_ENDPOINT;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
@ -99380,7 +99381,7 @@ const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle });
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle, endpoint });
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -99448,14 +99449,42 @@ function downloadCache(archiveLocation, archivePath, options) {
}
const archiveUrl = new URL(archiveLocation);
const objectKey = archiveUrl.pathname.slice(1);
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// Retry logic for download validation failures
const maxRetries = 3;
let lastError;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// If we get here, download succeeded
return;
}
catch (error) {
const errorMessage = error.message;
lastError = error;
// Only retry on validation failures, not on other errors
if (errorMessage.includes("Download validation failed") ||
errorMessage.includes("Range request not supported") ||
errorMessage.includes("Content-Range header")) {
if (attempt < maxRetries) {
const delayMs = Math.pow(2, attempt - 1) * 1000; // exponential backoff
core.warning(`Download attempt ${attempt} failed: ${errorMessage}. Retrying in ${delayMs}ms...`);
yield new Promise(resolve => setTimeout(resolve, delayMs));
continue;
}
}
// For non-retryable errors or max retries reached, throw the error
throw error;
}
}
// This should never be reached, but just in case
throw lastError || new Error("Download failed after all retry attempts");
});
}
exports.downloadCache = downloadCache;
@ -99540,7 +99569,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
});
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.ReserveCacheError = exports.ValidationError = void 0;
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.DownloadValidationError = exports.ReserveCacheError = exports.ValidationError = void 0;
const core = __importStar(__nccwpck_require__(7484));
const path = __importStar(__nccwpck_require__(6928));
const utils = __importStar(__nccwpck_require__(8299));
@ -99562,6 +99591,14 @@ class ReserveCacheError extends Error {
}
}
exports.ReserveCacheError = ReserveCacheError;
class DownloadValidationError extends Error {
constructor(message) {
super(message);
this.name = "DownloadValidationError";
Object.setPrototypeOf(this, DownloadValidationError.prototype);
}
}
exports.DownloadValidationError = DownloadValidationError;
function checkPaths(paths) {
if (!paths || paths.length === 0) {
throw new ValidationError(`Path Validation Error: At least one directory or file path is required`);
@ -99633,6 +99670,15 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
}
const archiveFileSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
// Validate downloaded archive
if (archiveFileSize === 0) {
throw new DownloadValidationError("Downloaded cache archive is empty (0 bytes). This may indicate a failed download or corrupted cache.");
}
// Minimum size check - a valid tar archive needs at least 512 bytes for header
const MIN_ARCHIVE_SIZE = 512;
if (archiveFileSize < MIN_ARCHIVE_SIZE) {
throw new DownloadValidationError(`Downloaded cache archive is too small (${archiveFileSize} bytes). Expected at least ${MIN_ARCHIVE_SIZE} bytes for a valid archive.`);
}
yield (0, tar_1.extractTar)(archivePath, compressionMethod);
core.info("Cache restored successfully");
return cacheEntry.cacheKey;
@ -99642,6 +99688,10 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
if (typedError.name === ValidationError.name) {
throw error;
}
else if (typedError.name === DownloadValidationError.name) {
// Log download validation errors as warnings but don't fail the workflow
core.warning(`Cache download validation failed: ${typedError.message}`);
}
else {
// Supress all non-validation cache related errors because caching should be optional
core.warning(`Failed to restore: ${error.message}`);
@ -99888,6 +99938,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
socketTimeout: options.timeoutInMs,
keepAlive: true
});
let progress;
try {
const res = yield (0, requestUtils_1.retryHttpClientResponse)("downloadCacheMetadata", () => __awaiter(this, void 0, void 0, function* () {
return yield httpClient.request("GET", archiveLocation, null, {
@ -99921,7 +99972,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
downloads.reverse();
let actives = 0;
let bytesDownloaded = 0;
const progress = new DownloadProgress(length);
progress = new DownloadProgress(length);
progress.startDisplayTimer();
const progressFn = progress.onProgress();
const activeDownloads = [];
@ -99944,8 +99995,14 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
while (actives > 0) {
yield waitAndWrite();
}
// Validate that we downloaded the expected amount of data
if (bytesDownloaded !== length) {
throw new Error(`Download validation failed: Expected ${length} bytes but downloaded ${bytesDownloaded} bytes`);
}
progress.stopDisplayTimer();
}
finally {
progress === null || progress === void 0 ? void 0 : progress.stopDisplayTimer();
httpClient.dispose();
yield archiveDescriptor.close();
}
@ -100047,9 +100104,9 @@ exports.restoreRun = exports.restoreOnlyRun = exports.restoreImpl = void 0;
const cache = __importStar(__nccwpck_require__(5116));
const core = __importStar(__nccwpck_require__(7484));
const constants_1 = __nccwpck_require__(7242);
const custom = __importStar(__nccwpck_require__(897));
const stateProvider_1 = __nccwpck_require__(2879);
const utils = __importStar(__nccwpck_require__(8270));
const custom = __importStar(__nccwpck_require__(897));
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
function restoreImpl(stateProvider, earlyExit) {
return __awaiter(this, void 0, void 0, function* () {

81
dist/restore/index.js vendored
View File

@ -99371,6 +99371,7 @@ if (process.env.RUNS_ON_RUNNER_NAME && process.env.RUNS_ON_RUNNER_NAME !== "") {
}
const versionSalt = "1.0";
const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const endpoint = process.env.RUNS_ON_S3_BUCKET_ENDPOINT;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
@ -99380,7 +99381,7 @@ const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle });
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle, endpoint });
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -99448,14 +99449,42 @@ function downloadCache(archiveLocation, archivePath, options) {
}
const archiveUrl = new URL(archiveLocation);
const objectKey = archiveUrl.pathname.slice(1);
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// Retry logic for download validation failures
const maxRetries = 3;
let lastError;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// If we get here, download succeeded
return;
}
catch (error) {
const errorMessage = error.message;
lastError = error;
// Only retry on validation failures, not on other errors
if (errorMessage.includes("Download validation failed") ||
errorMessage.includes("Range request not supported") ||
errorMessage.includes("Content-Range header")) {
if (attempt < maxRetries) {
const delayMs = Math.pow(2, attempt - 1) * 1000; // exponential backoff
core.warning(`Download attempt ${attempt} failed: ${errorMessage}. Retrying in ${delayMs}ms...`);
yield new Promise(resolve => setTimeout(resolve, delayMs));
continue;
}
}
// For non-retryable errors or max retries reached, throw the error
throw error;
}
}
// This should never be reached, but just in case
throw lastError || new Error("Download failed after all retry attempts");
});
}
exports.downloadCache = downloadCache;
@ -99540,7 +99569,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
});
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.ReserveCacheError = exports.ValidationError = void 0;
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.DownloadValidationError = exports.ReserveCacheError = exports.ValidationError = void 0;
const core = __importStar(__nccwpck_require__(7484));
const path = __importStar(__nccwpck_require__(6928));
const utils = __importStar(__nccwpck_require__(8299));
@ -99562,6 +99591,14 @@ class ReserveCacheError extends Error {
}
}
exports.ReserveCacheError = ReserveCacheError;
class DownloadValidationError extends Error {
constructor(message) {
super(message);
this.name = "DownloadValidationError";
Object.setPrototypeOf(this, DownloadValidationError.prototype);
}
}
exports.DownloadValidationError = DownloadValidationError;
function checkPaths(paths) {
if (!paths || paths.length === 0) {
throw new ValidationError(`Path Validation Error: At least one directory or file path is required`);
@ -99633,6 +99670,15 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
}
const archiveFileSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
// Validate downloaded archive
if (archiveFileSize === 0) {
throw new DownloadValidationError("Downloaded cache archive is empty (0 bytes). This may indicate a failed download or corrupted cache.");
}
// Minimum size check - a valid tar archive needs at least 512 bytes for header
const MIN_ARCHIVE_SIZE = 512;
if (archiveFileSize < MIN_ARCHIVE_SIZE) {
throw new DownloadValidationError(`Downloaded cache archive is too small (${archiveFileSize} bytes). Expected at least ${MIN_ARCHIVE_SIZE} bytes for a valid archive.`);
}
yield (0, tar_1.extractTar)(archivePath, compressionMethod);
core.info("Cache restored successfully");
return cacheEntry.cacheKey;
@ -99642,6 +99688,10 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
if (typedError.name === ValidationError.name) {
throw error;
}
else if (typedError.name === DownloadValidationError.name) {
// Log download validation errors as warnings but don't fail the workflow
core.warning(`Cache download validation failed: ${typedError.message}`);
}
else {
// Supress all non-validation cache related errors because caching should be optional
core.warning(`Failed to restore: ${error.message}`);
@ -99888,6 +99938,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
socketTimeout: options.timeoutInMs,
keepAlive: true
});
let progress;
try {
const res = yield (0, requestUtils_1.retryHttpClientResponse)("downloadCacheMetadata", () => __awaiter(this, void 0, void 0, function* () {
return yield httpClient.request("GET", archiveLocation, null, {
@ -99921,7 +99972,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
downloads.reverse();
let actives = 0;
let bytesDownloaded = 0;
const progress = new DownloadProgress(length);
progress = new DownloadProgress(length);
progress.startDisplayTimer();
const progressFn = progress.onProgress();
const activeDownloads = [];
@ -99944,8 +99995,14 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
while (actives > 0) {
yield waitAndWrite();
}
// Validate that we downloaded the expected amount of data
if (bytesDownloaded !== length) {
throw new Error(`Download validation failed: Expected ${length} bytes but downloaded ${bytesDownloaded} bytes`);
}
progress.stopDisplayTimer();
}
finally {
progress === null || progress === void 0 ? void 0 : progress.stopDisplayTimer();
httpClient.dispose();
yield archiveDescriptor.close();
}
@ -100047,9 +100104,9 @@ exports.restoreRun = exports.restoreOnlyRun = exports.restoreImpl = void 0;
const cache = __importStar(__nccwpck_require__(5116));
const core = __importStar(__nccwpck_require__(7484));
const constants_1 = __nccwpck_require__(7242);
const custom = __importStar(__nccwpck_require__(897));
const stateProvider_1 = __nccwpck_require__(2879);
const utils = __importStar(__nccwpck_require__(8270));
const custom = __importStar(__nccwpck_require__(897));
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
function restoreImpl(stateProvider, earlyExit) {
return __awaiter(this, void 0, void 0, function* () {

View File

@ -99371,6 +99371,7 @@ if (process.env.RUNS_ON_RUNNER_NAME && process.env.RUNS_ON_RUNNER_NAME !== "") {
}
const versionSalt = "1.0";
const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const endpoint = process.env.RUNS_ON_S3_BUCKET_ENDPOINT;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
@ -99380,7 +99381,7 @@ const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle });
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle, endpoint });
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -99448,14 +99449,42 @@ function downloadCache(archiveLocation, archivePath, options) {
}
const archiveUrl = new URL(archiveLocation);
const objectKey = archiveUrl.pathname.slice(1);
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// Retry logic for download validation failures
const maxRetries = 3;
let lastError;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// If we get here, download succeeded
return;
}
catch (error) {
const errorMessage = error.message;
lastError = error;
// Only retry on validation failures, not on other errors
if (errorMessage.includes("Download validation failed") ||
errorMessage.includes("Range request not supported") ||
errorMessage.includes("Content-Range header")) {
if (attempt < maxRetries) {
const delayMs = Math.pow(2, attempt - 1) * 1000; // exponential backoff
core.warning(`Download attempt ${attempt} failed: ${errorMessage}. Retrying in ${delayMs}ms...`);
yield new Promise(resolve => setTimeout(resolve, delayMs));
continue;
}
}
// For non-retryable errors or max retries reached, throw the error
throw error;
}
}
// This should never be reached, but just in case
throw lastError || new Error("Download failed after all retry attempts");
});
}
exports.downloadCache = downloadCache;
@ -99540,7 +99569,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
});
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.ReserveCacheError = exports.ValidationError = void 0;
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.DownloadValidationError = exports.ReserveCacheError = exports.ValidationError = void 0;
const core = __importStar(__nccwpck_require__(7484));
const path = __importStar(__nccwpck_require__(6928));
const utils = __importStar(__nccwpck_require__(8299));
@ -99562,6 +99591,14 @@ class ReserveCacheError extends Error {
}
}
exports.ReserveCacheError = ReserveCacheError;
class DownloadValidationError extends Error {
constructor(message) {
super(message);
this.name = "DownloadValidationError";
Object.setPrototypeOf(this, DownloadValidationError.prototype);
}
}
exports.DownloadValidationError = DownloadValidationError;
function checkPaths(paths) {
if (!paths || paths.length === 0) {
throw new ValidationError(`Path Validation Error: At least one directory or file path is required`);
@ -99633,6 +99670,15 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
}
const archiveFileSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
// Validate downloaded archive
if (archiveFileSize === 0) {
throw new DownloadValidationError("Downloaded cache archive is empty (0 bytes). This may indicate a failed download or corrupted cache.");
}
// Minimum size check - a valid tar archive needs at least 512 bytes for header
const MIN_ARCHIVE_SIZE = 512;
if (archiveFileSize < MIN_ARCHIVE_SIZE) {
throw new DownloadValidationError(`Downloaded cache archive is too small (${archiveFileSize} bytes). Expected at least ${MIN_ARCHIVE_SIZE} bytes for a valid archive.`);
}
yield (0, tar_1.extractTar)(archivePath, compressionMethod);
core.info("Cache restored successfully");
return cacheEntry.cacheKey;
@ -99642,6 +99688,10 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
if (typedError.name === ValidationError.name) {
throw error;
}
else if (typedError.name === DownloadValidationError.name) {
// Log download validation errors as warnings but don't fail the workflow
core.warning(`Cache download validation failed: ${typedError.message}`);
}
else {
// Supress all non-validation cache related errors because caching should be optional
core.warning(`Failed to restore: ${error.message}`);
@ -99888,6 +99938,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
socketTimeout: options.timeoutInMs,
keepAlive: true
});
let progress;
try {
const res = yield (0, requestUtils_1.retryHttpClientResponse)("downloadCacheMetadata", () => __awaiter(this, void 0, void 0, function* () {
return yield httpClient.request("GET", archiveLocation, null, {
@ -99921,7 +99972,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
downloads.reverse();
let actives = 0;
let bytesDownloaded = 0;
const progress = new DownloadProgress(length);
progress = new DownloadProgress(length);
progress.startDisplayTimer();
const progressFn = progress.onProgress();
const activeDownloads = [];
@ -99944,8 +99995,14 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
while (actives > 0) {
yield waitAndWrite();
}
// Validate that we downloaded the expected amount of data
if (bytesDownloaded !== length) {
throw new Error(`Download validation failed: Expected ${length} bytes but downloaded ${bytesDownloaded} bytes`);
}
progress.stopDisplayTimer();
}
finally {
progress === null || progress === void 0 ? void 0 : progress.stopDisplayTimer();
httpClient.dispose();
yield archiveDescriptor.close();
}
@ -100047,9 +100104,9 @@ exports.saveRun = exports.saveOnlyRun = exports.saveImpl = void 0;
const cache = __importStar(__nccwpck_require__(5116));
const core = __importStar(__nccwpck_require__(7484));
const constants_1 = __nccwpck_require__(7242);
const custom = __importStar(__nccwpck_require__(897));
const stateProvider_1 = __nccwpck_require__(2879);
const utils = __importStar(__nccwpck_require__(8270));
const custom = __importStar(__nccwpck_require__(897));
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
// Catch and log any unhandled exceptions. These exceptions can leak out of the uploadChunk method in
// @actions/toolkit when a failed upload closes the file descriptor causing any in-process reads to

81
dist/save/index.js vendored
View File

@ -99371,6 +99371,7 @@ if (process.env.RUNS_ON_RUNNER_NAME && process.env.RUNS_ON_RUNNER_NAME !== "") {
}
const versionSalt = "1.0";
const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const endpoint = process.env.RUNS_ON_S3_BUCKET_ENDPOINT;
const region = process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
process.env.AWS_DEFAULT_REGION;
@ -99380,7 +99381,7 @@ const uploadQueueSize = Number(process.env.UPLOAD_QUEUE_SIZE || "4");
const uploadPartSize = Number(process.env.UPLOAD_PART_SIZE || "32") * 1024 * 1024;
const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize = Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle });
const s3Client = new client_s3_1.S3Client({ region, forcePathStyle, endpoint });
function getCacheVersion(paths, compressionMethod, enableCrossOsArchive = false) {
// don't pass changes upstream
const components = paths.slice();
@ -99448,14 +99449,42 @@ function downloadCache(archiveLocation, archivePath, options) {
}
const archiveUrl = new URL(archiveLocation);
const objectKey = archiveUrl.pathname.slice(1);
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// Retry logic for download validation failures
const maxRetries = 3;
let lastError;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const command = new client_s3_1.GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = yield getSignedUrl(s3Client, command, {
expiresIn: 3600
});
yield (0, downloadUtils_1.downloadCacheHttpClientConcurrent)(url, archivePath, Object.assign(Object.assign({}, options), { downloadConcurrency: downloadQueueSize, concurrentBlobDownloads: true, partSize: downloadPartSize }));
// If we get here, download succeeded
return;
}
catch (error) {
const errorMessage = error.message;
lastError = error;
// Only retry on validation failures, not on other errors
if (errorMessage.includes("Download validation failed") ||
errorMessage.includes("Range request not supported") ||
errorMessage.includes("Content-Range header")) {
if (attempt < maxRetries) {
const delayMs = Math.pow(2, attempt - 1) * 1000; // exponential backoff
core.warning(`Download attempt ${attempt} failed: ${errorMessage}. Retrying in ${delayMs}ms...`);
yield new Promise(resolve => setTimeout(resolve, delayMs));
continue;
}
}
// For non-retryable errors or max retries reached, throw the error
throw error;
}
}
// This should never be reached, but just in case
throw lastError || new Error("Download failed after all retry attempts");
});
}
exports.downloadCache = downloadCache;
@ -99540,7 +99569,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
});
};
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.ReserveCacheError = exports.ValidationError = void 0;
exports.saveCache = exports.restoreCache = exports.isFeatureAvailable = exports.DownloadValidationError = exports.ReserveCacheError = exports.ValidationError = void 0;
const core = __importStar(__nccwpck_require__(7484));
const path = __importStar(__nccwpck_require__(6928));
const utils = __importStar(__nccwpck_require__(8299));
@ -99562,6 +99591,14 @@ class ReserveCacheError extends Error {
}
}
exports.ReserveCacheError = ReserveCacheError;
class DownloadValidationError extends Error {
constructor(message) {
super(message);
this.name = "DownloadValidationError";
Object.setPrototypeOf(this, DownloadValidationError.prototype);
}
}
exports.DownloadValidationError = DownloadValidationError;
function checkPaths(paths) {
if (!paths || paths.length === 0) {
throw new ValidationError(`Path Validation Error: At least one directory or file path is required`);
@ -99633,6 +99670,15 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
}
const archiveFileSize = utils.getArchiveFileSizeInBytes(archivePath);
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
// Validate downloaded archive
if (archiveFileSize === 0) {
throw new DownloadValidationError("Downloaded cache archive is empty (0 bytes). This may indicate a failed download or corrupted cache.");
}
// Minimum size check - a valid tar archive needs at least 512 bytes for header
const MIN_ARCHIVE_SIZE = 512;
if (archiveFileSize < MIN_ARCHIVE_SIZE) {
throw new DownloadValidationError(`Downloaded cache archive is too small (${archiveFileSize} bytes). Expected at least ${MIN_ARCHIVE_SIZE} bytes for a valid archive.`);
}
yield (0, tar_1.extractTar)(archivePath, compressionMethod);
core.info("Cache restored successfully");
return cacheEntry.cacheKey;
@ -99642,6 +99688,10 @@ function restoreCache(paths, primaryKey, restoreKeys, options, enableCrossOsArch
if (typedError.name === ValidationError.name) {
throw error;
}
else if (typedError.name === DownloadValidationError.name) {
// Log download validation errors as warnings but don't fail the workflow
core.warning(`Cache download validation failed: ${typedError.message}`);
}
else {
// Supress all non-validation cache related errors because caching should be optional
core.warning(`Failed to restore: ${error.message}`);
@ -99888,6 +99938,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
socketTimeout: options.timeoutInMs,
keepAlive: true
});
let progress;
try {
const res = yield (0, requestUtils_1.retryHttpClientResponse)("downloadCacheMetadata", () => __awaiter(this, void 0, void 0, function* () {
return yield httpClient.request("GET", archiveLocation, null, {
@ -99921,7 +99972,7 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
downloads.reverse();
let actives = 0;
let bytesDownloaded = 0;
const progress = new DownloadProgress(length);
progress = new DownloadProgress(length);
progress.startDisplayTimer();
const progressFn = progress.onProgress();
const activeDownloads = [];
@ -99944,8 +99995,14 @@ function downloadCacheHttpClientConcurrent(archiveLocation, archivePath, options
while (actives > 0) {
yield waitAndWrite();
}
// Validate that we downloaded the expected amount of data
if (bytesDownloaded !== length) {
throw new Error(`Download validation failed: Expected ${length} bytes but downloaded ${bytesDownloaded} bytes`);
}
progress.stopDisplayTimer();
}
finally {
progress === null || progress === void 0 ? void 0 : progress.stopDisplayTimer();
httpClient.dispose();
yield archiveDescriptor.close();
}
@ -100047,9 +100104,9 @@ exports.saveRun = exports.saveOnlyRun = exports.saveImpl = void 0;
const cache = __importStar(__nccwpck_require__(5116));
const core = __importStar(__nccwpck_require__(7484));
const constants_1 = __nccwpck_require__(7242);
const custom = __importStar(__nccwpck_require__(897));
const stateProvider_1 = __nccwpck_require__(2879);
const utils = __importStar(__nccwpck_require__(8270));
const custom = __importStar(__nccwpck_require__(897));
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
// Catch and log any unhandled exceptions. These exceptions can leak out of the uploadChunk method in
// @actions/toolkit when a failed upload closes the file descriptor causing any in-process reads to

30
package-lock.json generated
View File

@ -32,7 +32,7 @@
"eslint-plugin-simple-import-sort": "^7.0.0",
"jest": "^28.1.3",
"jest-circus": "^27.5.1",
"nock": "^13.2.9",
"nock": "^13.5.6",
"prettier": "^2.8.8",
"ts-jest": "^28.0.8",
"typescript": "^4.9.3"
@ -4792,6 +4792,7 @@
"integrity": "sha512-jI/ewavBQ7X5178262JQR0ewicPAcJhXS/iFaNJl0VHLfyosZ/kwSrsa6VNQNSO8i9d8SqdRgOtZSOKJ/+iNMw==",
"deprecated": "This is a stub types definition. nock provides its own type definitions, so you do not need this installed.",
"dev": true,
"license": "MIT",
"dependencies": {
"nock": "*"
}
@ -10078,12 +10079,6 @@
"node": ">=4"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"node_modules/lodash.memoize": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
@ -10225,14 +10220,14 @@
"dev": true
},
"node_modules/nock": {
"version": "13.2.9",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.2.9.tgz",
"integrity": "sha512-1+XfJNYF1cjGB+TKMWi29eZ0b82QOvQs2YoLNzbpWGqFMtRQHTa57osqdGj4FrFPgkO4D4AZinzUJR9VvW3QUA==",
"version": "13.5.6",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.5.6.tgz",
"integrity": "sha512-o2zOYiCpzRqSzPj0Zt/dQ/DqZeYoaQ7TUonc/xUPjCGl9WeHpNbxgVvOquXYAaJzI0M9BXV3HTzG0p8IUAbBTQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"debug": "^4.1.0",
"json-stringify-safe": "^5.0.1",
"lodash": "^4.17.21",
"propagate": "^2.0.0"
},
"engines": {
@ -19767,12 +19762,6 @@
"path-exists": "^3.0.0"
}
},
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"lodash.memoize": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
@ -19886,14 +19875,13 @@
"dev": true
},
"nock": {
"version": "13.2.9",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.2.9.tgz",
"integrity": "sha512-1+XfJNYF1cjGB+TKMWi29eZ0b82QOvQs2YoLNzbpWGqFMtRQHTa57osqdGj4FrFPgkO4D4AZinzUJR9VvW3QUA==",
"version": "13.5.6",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.5.6.tgz",
"integrity": "sha512-o2zOYiCpzRqSzPj0Zt/dQ/DqZeYoaQ7TUonc/xUPjCGl9WeHpNbxgVvOquXYAaJzI0M9BXV3HTzG0p8IUAbBTQ==",
"dev": true,
"requires": {
"debug": "^4.1.0",
"json-stringify-safe": "^5.0.1",
"lodash": "^4.17.21",
"propagate": "^2.0.0"
}
},

View File

@ -46,7 +46,7 @@
"eslint-plugin-simple-import-sort": "^7.0.0",
"jest": "^28.1.3",
"jest-circus": "^27.5.1",
"nock": "^13.2.9",
"nock": "^13.5.6",
"prettier": "^2.8.8",
"ts-jest": "^28.0.8",
"typescript": "^4.9.3"

View File

@ -36,6 +36,7 @@ if (process.env.RUNS_ON_RUNNER_NAME && process.env.RUNS_ON_RUNNER_NAME !== "") {
const versionSalt = "1.0";
const bucketName = process.env.RUNS_ON_S3_BUCKET_CACHE;
const endpoint = process.env.RUNS_ON_S3_BUCKET_ENDPOINT;
const region =
process.env.RUNS_ON_AWS_REGION ||
process.env.AWS_REGION ||
@ -51,7 +52,7 @@ const downloadQueueSize = Number(process.env.DOWNLOAD_QUEUE_SIZE || "8");
const downloadPartSize =
Number(process.env.DOWNLOAD_PART_SIZE || "16") * 1024 * 1024;
const s3Client = new S3Client({ region, forcePathStyle });
const s3Client = new S3Client({ region, forcePathStyle, endpoint });
export function getCacheVersion(
paths: string[],
@ -153,19 +154,57 @@ export async function downloadCache(
const archiveUrl = new URL(archiveLocation);
const objectKey = archiveUrl.pathname.slice(1);
const command = new GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = await getSignedUrl(s3Client, command, {
expiresIn: 3600
});
await downloadCacheHttpClientConcurrent(url, archivePath, {
...options,
downloadConcurrency: downloadQueueSize,
concurrentBlobDownloads: true,
partSize: downloadPartSize
});
// Retry logic for download validation failures
const maxRetries = 3;
let lastError: Error | undefined;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const command = new GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const url = await getSignedUrl(s3Client, command, {
expiresIn: 3600
});
await downloadCacheHttpClientConcurrent(url, archivePath, {
...options,
downloadConcurrency: downloadQueueSize,
concurrentBlobDownloads: true,
partSize: downloadPartSize
});
// If we get here, download succeeded
return;
} catch (error) {
const errorMessage = (error as Error).message;
lastError = error as Error;
// Only retry on validation failures, not on other errors
if (
errorMessage.includes("Download validation failed") ||
errorMessage.includes("Range request not supported") ||
errorMessage.includes("Content-Range header")
) {
if (attempt < maxRetries) {
const delayMs = Math.pow(2, attempt - 1) * 1000; // exponential backoff
core.warning(
`Download attempt ${attempt} failed: ${errorMessage}. Retrying in ${delayMs}ms...`
);
await new Promise(resolve => setTimeout(resolve, delayMs));
continue;
}
}
// For non-retryable errors or max retries reached, throw the error
throw error;
}
}
// This should never be reached, but just in case
throw lastError || new Error("Download failed after all retry attempts");
}
export async function saveCache(

View File

@ -27,6 +27,14 @@ export class ReserveCacheError extends Error {
}
}
export class DownloadValidationError extends Error {
constructor(message: string) {
super(message);
this.name = "DownloadValidationError";
Object.setPrototypeOf(this, DownloadValidationError.prototype);
}
}
function checkPaths(paths: string[]): void {
if (!paths || paths.length === 0) {
throw new ValidationError(
@ -135,6 +143,21 @@ export async function restoreCache(
)} MB (${archiveFileSize} B)`
);
// Validate downloaded archive
if (archiveFileSize === 0) {
throw new DownloadValidationError(
"Downloaded cache archive is empty (0 bytes). This may indicate a failed download or corrupted cache."
);
}
// Minimum size check - a valid tar archive needs at least 512 bytes for header
const MIN_ARCHIVE_SIZE = 512;
if (archiveFileSize < MIN_ARCHIVE_SIZE) {
throw new DownloadValidationError(
`Downloaded cache archive is too small (${archiveFileSize} bytes). Expected at least ${MIN_ARCHIVE_SIZE} bytes for a valid archive.`
);
}
await extractTar(archivePath, compressionMethod);
core.info("Cache restored successfully");
@ -143,6 +166,11 @@ export async function restoreCache(
const typedError = error as Error;
if (typedError.name === ValidationError.name) {
throw error;
} else if (typedError.name === DownloadValidationError.name) {
// Log download validation errors as warnings but don't fail the workflow
core.warning(
`Cache download validation failed: ${typedError.message}`
);
} else {
// Supress all non-validation cache related errors because caching should be optional
core.warning(`Failed to restore: ${(error as Error).message}`);

View File

@ -160,6 +160,7 @@ export async function downloadCacheHttpClientConcurrent(
socketTimeout: options.timeoutInMs,
keepAlive: true
});
let progress: DownloadProgress | undefined;
try {
const res = await retryHttpClientResponse(
"downloadCacheMetadata",
@ -210,7 +211,7 @@ export async function downloadCacheHttpClientConcurrent(
downloads.reverse();
let actives = 0;
let bytesDownloaded = 0;
const progress = new DownloadProgress(length);
progress = new DownloadProgress(length);
progress.startDisplayTimer();
const progressFn = progress.onProgress();
@ -246,7 +247,17 @@ export async function downloadCacheHttpClientConcurrent(
while (actives > 0) {
await waitAndWrite();
}
// Validate that we downloaded the expected amount of data
if (bytesDownloaded !== length) {
throw new Error(
`Download validation failed: Expected ${length} bytes but downloaded ${bytesDownloaded} bytes`
);
}
progress.stopDisplayTimer();
} finally {
progress?.stopDisplayTimer();
httpClient.dispose();
await archiveDescriptor.close();
}

View File

@ -2,6 +2,7 @@ import * as cache from "@actions/cache";
import * as core from "@actions/core";
import { Events, Inputs, Outputs, State } from "./constants";
import * as custom from "./custom/cache";
import {
IStateProvider,
NullStateProvider,
@ -9,7 +10,6 @@ import {
} from "./stateProvider";
import * as utils from "./utils/actionUtils";
import * as custom from "./custom/cache";
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
export async function restoreImpl(

View File

@ -2,6 +2,7 @@ import * as cache from "@actions/cache";
import * as core from "@actions/core";
import { Events, Inputs, State } from "./constants";
import * as custom from "./custom/cache";
import {
IStateProvider,
NullStateProvider,
@ -9,7 +10,6 @@ import {
} from "./stateProvider";
import * as utils from "./utils/actionUtils";
import * as custom from "./custom/cache";
const canSaveToS3 = process.env["RUNS_ON_S3_BUCKET_CACHE"] !== undefined;
// Catch and log any unhandled exceptions. These exceptions can leak out of the uploadChunk method in